From b8e873648d30b7c1e9fcd4b88c3f6775e4fed1b8 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 2 Apr 2020 14:17:28 -0400 Subject: [PATCH 01/27] Use more net.HardwareAddr and less string Pass MACs around as net.HardwareAddr internally, and only convert to strings when we actually want them as strings. (This is simpler than the alternative because "mac.String()" can't fail, but "net.ParseMAC(macStr)" can.) Signed-off-by: Dan Winship --- .../hybrid-overlay/pkg/controller/master.go | 2 +- .../pkg/controller/node_linux.go | 2 +- go-controller/pkg/node/gateway_init.go | 12 ++--- go-controller/pkg/node/management-port.go | 10 ++-- .../pkg/node/management-port_linux.go | 8 ++-- .../pkg/node/management-port_linux_test.go | 4 +- .../pkg/node/management-port_windows.go | 4 +- go-controller/pkg/ovn/master.go | 14 +++--- go-controller/pkg/ovn/master_test.go | 48 ++++++++++--------- go-controller/pkg/ovn/ovn.go | 7 +-- go-controller/pkg/util/gateway-init.go | 8 ++-- go-controller/pkg/util/net.go | 19 ++++---- go-controller/pkg/util/net_linux.go | 16 +++---- go-controller/pkg/util/node_annotations.go | 25 +++++----- go-controller/pkg/util/util_test.go | 6 +-- 15 files changed, 92 insertions(+), 93 deletions(-) diff --git a/go-controller/hybrid-overlay/pkg/controller/master.go b/go-controller/hybrid-overlay/pkg/controller/master.go index 1e21bca8f3..f6ab7f7f04 100644 --- a/go-controller/hybrid-overlay/pkg/controller/master.go +++ b/go-controller/hybrid-overlay/pkg/controller/master.go @@ -125,7 +125,7 @@ func (m *MasterController) handleOverlayPort(node *kapi.Node, annotator kube.Ann portIP = util.NextIP(second) } if portMAC == nil { - portMAC, _ = net.ParseMAC(util.IPAddrToHWAddr(portIP)) + portMAC = util.IPAddrToHWAddr(portIP) } klog.Infof("creating node %s hybrid overlay port", node.Name) diff --git a/go-controller/hybrid-overlay/pkg/controller/node_linux.go b/go-controller/hybrid-overlay/pkg/controller/node_linux.go index f13a15f383..78bfc1c245 100644 --- a/go-controller/hybrid-overlay/pkg/controller/node_linux.go +++ b/go-controller/hybrid-overlay/pkg/controller/node_linux.go @@ -406,7 +406,7 @@ func (n *NodeController) ensureHybridOverlayBridge() error { if err != nil { return err } - stdout, stderr, err := util.RunOVSVsctl("set", "bridge", extBridgeName, "other-config:hwaddr="+macAddress) + stdout, stderr, err := util.RunOVSVsctl("set", "bridge", extBridgeName, "other-config:hwaddr="+macAddress.String()) if err != nil { return fmt.Errorf("Failed to set bridge, stdout: %q, stderr: %q, "+ "error: %v", stdout, stderr, err) diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 15a2d9677e..43f8c0e55a 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -17,20 +17,20 @@ import ( // bridgedGatewayNodeSetup makes the bridge's MAC address permanent (if needed), sets up // the physical network name mappings for the bridge, and returns an ifaceID // created from the bridge name and the node name -func bridgedGatewayNodeSetup(nodeName, bridgeName, bridgeInterface string, syncBridgeMac bool) (string, string, error) { +func bridgedGatewayNodeSetup(nodeName, bridgeName, bridgeInterface string, syncBridgeMAC bool) (string, net.HardwareAddr, error) { // A OVS bridge's mac address can change when ports are added to it. // We cannot let that happen, so make the bridge mac address permanent. macAddress, err := util.GetOVSPortMACAddress(bridgeInterface) if err != nil { - return "", "", err + return "", nil, err } - if syncBridgeMac { + if syncBridgeMAC { var err error stdout, stderr, err := util.RunOVSVsctl("set", "bridge", - bridgeName, "other-config:hwaddr="+macAddress) + bridgeName, "other-config:hwaddr="+macAddress.String()) if err != nil { - return "", "", fmt.Errorf("Failed to set bridge, stdout: %q, stderr: %q, "+ + return "", nil, fmt.Errorf("Failed to set bridge, stdout: %q, stderr: %q, "+ "error: %v", stdout, stderr, err) } } @@ -40,7 +40,7 @@ func bridgedGatewayNodeSetup(nodeName, bridgeName, bridgeInterface string, syncB _, stderr, err := util.RunOVSVsctl("set", "Open_vSwitch", ".", fmt.Sprintf("external_ids:ovn-bridge-mappings=%s:%s", util.PhysicalNetworkName, bridgeName)) if err != nil { - return "", "", fmt.Errorf("Failed to set ovn-bridge-mappings for ovs bridge %s"+ + return "", nil, fmt.Errorf("Failed to set ovn-bridge-mappings for ovs bridge %s"+ ", stderr:%s (%v)", bridgeName, stderr, err) } diff --git a/go-controller/pkg/node/management-port.go b/go-controller/pkg/node/management-port.go index 746dac0a3e..1750c32a53 100644 --- a/go-controller/pkg/node/management-port.go +++ b/go-controller/pkg/node/management-port.go @@ -15,7 +15,7 @@ func (n *OvnNode) createManagementPort(localSubnet *net.IPNet, nodeAnnotator kub waiter *startupWaiter) error { // Retrieve the routerIP and mangementPortIP for a given localSubnet routerIP, portIP := util.GetNodeWellKnownAddresses(localSubnet) - routerMac := util.IPAddrToHWAddr(routerIP.IP) + routerMAC := util.IPAddrToHWAddr(routerIP.IP) // Kubernetes emits events when pods are created. The event will contain // only lowercase letters of the hostname even though the kubelet is @@ -54,20 +54,20 @@ func (n *OvnNode) createManagementPort(localSubnet *net.IPNet, nodeAnnotator kub } // persist the MAC address so that upon node reboot we get back the same mac address. _, stderr, err = util.RunOVSVsctl("set", "interface", util.K8sMgmtIntfName, - fmt.Sprintf("mac=%s", strings.ReplaceAll(macAddress, ":", "\\:"))) + fmt.Sprintf("mac=%s", strings.ReplaceAll(macAddress.String(), ":", "\\:"))) if err != nil { - klog.Errorf("failed to persist MAC address %q for %q: stderr:%s (%v)", macAddress, + klog.Errorf("failed to persist MAC address %q for %q: stderr:%s (%v)", macAddress.String(), util.K8sMgmtIntfName, stderr, err) return err } err = createPlatformManagementPort(util.K8sMgmtIntfName, portIP.String(), routerIP.IP.String(), - routerMac, n.stopChan) + routerMAC, n.stopChan) if err != nil { return err } - if err := util.SetNodeManagementPortMacAddr(nodeAnnotator, macAddress); err != nil { + if err := util.SetNodeManagementPortMACAddress(nodeAnnotator, macAddress); err != nil { return err } diff --git a/go-controller/pkg/node/management-port_linux.go b/go-controller/pkg/node/management-port_linux.go index b5e929f80e..f9e111dd09 100644 --- a/go-controller/pkg/node/management-port_linux.go +++ b/go-controller/pkg/node/management-port_linux.go @@ -30,10 +30,10 @@ type managementPortConfig struct { ifIPMask string ifIP string routerIP string - routerMAC string + routerMAC net.HardwareAddr } -func newManagementPortConfig(interfaceName, interfaceIP, routerIP, routerMAC string) (*managementPortConfig, error) { +func newManagementPortConfig(interfaceName, interfaceIP, routerIP string, routerMAC net.HardwareAddr) (*managementPortConfig, error) { var err error cfg := &managementPortConfig{} @@ -126,7 +126,7 @@ func setupManagementPortConfig(cfg *managementPortConfig) ([]string, error) { // source protocol address to be in the Logical Switch's subnet. if exists, err = util.LinkNeighExists(cfg.link, cfg.routerIP, cfg.routerMAC); err == nil && !exists { warnings = append(warnings, fmt.Sprintf("missing arp entry for MAC/IP binding (%s/%s) on link %s", - cfg.routerMAC, cfg.routerIP, util.K8sMgmtIntfName)) + cfg.routerMAC.String(), cfg.routerIP, util.K8sMgmtIntfName)) err = util.LinkNeighAdd(cfg.link, cfg.routerIP, cfg.routerMAC) } if err != nil { @@ -159,7 +159,7 @@ func setupManagementPortConfig(cfg *managementPortConfig) ([]string, error) { // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. -func createPlatformManagementPort(interfaceName, interfaceIP, routerIP, routerMAC string, +func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, routerMAC net.HardwareAddr, stopChan chan struct{}) error { var cfg *managementPortConfig var err error diff --git a/go-controller/pkg/node/management-port_linux_test.go b/go-controller/pkg/node/management-port_linux_test.go index cd01ed3faa..7a3e92d117 100644 --- a/go-controller/pkg/node/management-port_linux_test.go +++ b/go-controller/pkg/node/management-port_linux_test.go @@ -207,9 +207,9 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net updatedNode, err := fakeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - macFromAnnotation, err := util.ParseNodeManagementPortMacAddr(updatedNode) + macFromAnnotation, err := util.ParseNodeManagementPortMACAddress(updatedNode) Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation).To(Equal(mgtPortMAC)) + Expect(macFromAnnotation.String()).To(Equal(mgtPortMAC)) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) } diff --git a/go-controller/pkg/node/management-port_windows.go b/go-controller/pkg/node/management-port_windows.go index f5bf98ade1..4e0b282f80 100644 --- a/go-controller/pkg/node/management-port_windows.go +++ b/go-controller/pkg/node/management-port_windows.go @@ -18,8 +18,8 @@ import ( // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. -func createPlatformManagementPort(interfaceName, interfaceIP, routerIP, routerMAC string, - stopChan chan struct{}) error { +func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, + routerMAC net.HardwareAddr, stopChan chan struct{}) error { // Up the interface. _, _, err := util.RunPowershell("Enable-NetAdapter", "-IncludeHidden", interfaceName) if err != nil { diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index bfa9cee31d..f5da10d8e6 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -309,12 +309,12 @@ func (oc *Controller) deleteNodeJoinSubnet(nodeName string, subnet *net.IPNet) e } func (oc *Controller) syncNodeManagementPort(node *kapi.Node, subnet *net.IPNet) error { - macAddress, err := util.ParseNodeManagementPortMacAddr(node) + macAddress, err := util.ParseNodeManagementPortMACAddress(node) if err != nil { return err } - if macAddress == "" { + if macAddress == nil { // When macAddress was removed, delete the switch port stdout, stderr, err := util.RunOVNNbctl("--", "--if-exists", "lsp-del", "k8s-"+node.Name) if err != nil { @@ -336,7 +336,7 @@ func (oc *Controller) syncNodeManagementPort(node *kapi.Node, subnet *net.IPNet) // Create this node's management logical port on the node switch stdout, stderr, err := util.RunOVNNbctl( "--", "--may-exist", "lsp-add", node.Name, "k8s-"+node.Name, - "--", "lsp-set-addresses", "k8s-"+node.Name, macAddress+" "+portIP.IP.String()) + "--", "lsp-set-addresses", "k8s-"+node.Name, macAddress.String()+" "+portIP.IP.String()) if err != nil { klog.Errorf("Failed to add logical port to switch, stdout: %q, stderr: %q, error: %v", stdout, stderr, err) return err @@ -420,12 +420,12 @@ func addStaticRouteToHost(node *kapi.Node, nicIP string) error { func (oc *Controller) ensureNodeLogicalNetwork(nodeName string, hostsubnet *net.IPNet) error { firstIP, secondIP := util.GetNodeWellKnownAddresses(hostsubnet) - nodeLRPMac := util.IPAddrToHWAddr(firstIP.IP) + nodeLRPMAC := util.IPAddrToHWAddr(firstIP.IP) clusterRouter := util.GetK8sClusterRouter() // Create a router port and provide it the first address on the node's host subnet _, stderr, err := util.RunOVNNbctl("--may-exist", "lrp-add", clusterRouter, "rtos-"+nodeName, - nodeLRPMac, firstIP.String()) + nodeLRPMAC.String(), firstIP.String()) if err != nil { klog.Errorf("Failed to add logical port to router, stderr: %q, error: %v", stderr, err) return err @@ -473,7 +473,7 @@ func (oc *Controller) ensureNodeLogicalNetwork(nodeName string, hostsubnet *net. if !utilnet.IsIPv6(firstIP.IP) { stdout, stderr, err = util.RunOVNNbctl("set", "logical_switch", nodeName, "other-config:mcast_querier=\"true\"", - "other-config:mcast_eth_src=\""+nodeLRPMac+"\"", + "other-config:mcast_eth_src=\""+nodeLRPMAC.String()+"\"", "other-config:mcast_ip4_src=\""+firstIP.IP.String()+"\"") if err != nil { klog.Errorf("Failed to enable IGMP Querier on logical switch %v, stdout: %q, stderr: %q, error: %v", @@ -495,7 +495,7 @@ func (oc *Controller) ensureNodeLogicalNetwork(nodeName string, hostsubnet *net. // Connect the switch to the router. stdout, stderr, err = util.RunOVNNbctl("--", "--may-exist", "lsp-add", nodeName, "stor-"+nodeName, - "--", "set", "logical_switch_port", "stor-"+nodeName, "type=router", "options:router-port=rtos-"+nodeName, "addresses="+"\""+nodeLRPMac+"\"") + "--", "set", "logical_switch_port", "stor-"+nodeName, "type=router", "options:router-port=rtos-"+nodeName, "addresses="+"\""+nodeLRPMAC.String()+"\"") if err != nil { klog.Errorf("Failed to add logical port to switch, stdout: %q, stderr: %q, error: %v", stdout, stderr, err) return err diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 82569bae4f..3882af8ab9 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -134,7 +134,7 @@ func defaultFakeExec(nodeSubnet, nodeName string, sctpSupport bool) (*ovntest.Fa // Node-related logical network stuff cidr := ovntest.MustParseIPNet(nodeSubnet) cidr.IP = util.NextIP(cidr.IP) - lrpMAC := util.IPAddrToHWAddr(cidr.IP) + lrpMAC := util.IPAddrToHWAddr(cidr.IP).String() gwCIDR := cidr.String() gwIP := cidr.IP.String() nodeMgmtPortIP := util.NextIP(cidr.IP) @@ -244,7 +244,7 @@ var _ = Describe("Master Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) err = util.SetDisabledL3GatewayConfig(nodeAnnotator) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, mgmtMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(mgmtMAC)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -274,9 +274,9 @@ var _ = Describe("Master Operations", func() { Expect(err).NotTo(HaveOccurred()) Expect(subnetFromAnnotation.String()).To(Equal(nodeSubnet)) - macFromAnnotation, err := util.ParseNodeManagementPortMacAddr(updatedNode) + macFromAnnotation, err := util.ParseNodeManagementPortMACAddress(updatedNode) Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation).To(Equal(mgmtMAC)) + Expect(macFromAnnotation.String()).To(Equal(mgmtMAC)) Eventually(fexec.CalledMatchesExpected, 2).Should(BeTrue(), fexec.ErrorDesc) return nil @@ -326,7 +326,7 @@ var _ = Describe("Master Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) err = util.SetDisabledL3GatewayConfig(nodeAnnotator) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, mgmtMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(mgmtMAC)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -356,9 +356,9 @@ var _ = Describe("Master Operations", func() { Expect(err).NotTo(HaveOccurred()) Expect(subnetFromAnnotation.String()).To(Equal(nodeSubnet)) - macFromAnnotation, err := util.ParseNodeManagementPortMacAddr(updatedNode) + macFromAnnotation, err := util.ParseNodeManagementPortMACAddress(updatedNode) Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation).To(Equal(mgmtMAC)) + Expect(macFromAnnotation.String()).To(Equal(mgmtMAC)) Eventually(fexec.CalledMatchesExpected, 2).Should(BeTrue(), fexec.ErrorDesc) return nil @@ -407,7 +407,7 @@ var _ = Describe("Master Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) err = util.SetDisabledL3GatewayConfig(nodeAnnotator) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, mgmtMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(mgmtMAC)) Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) Expect(err).NotTo(HaveOccurred()) @@ -445,9 +445,9 @@ var _ = Describe("Master Operations", func() { Expect(err).NotTo(HaveOccurred()) Expect(subnetFromAnnotation.String()).To(Equal(nodeSubnet)) - macFromAnnotation, err := util.ParseNodeManagementPortMacAddr(updatedNode) + macFromAnnotation, err := util.ParseNodeManagementPortMACAddress(updatedNode) Expect(err).NotTo(HaveOccurred()) - Expect(macFromAnnotation).To(Equal(mgmtMAC)) + Expect(macFromAnnotation.String()).To(Equal(mgmtMAC)) Eventually(fexec.CalledMatchesExpected, 2).Should(BeTrue(), fexec.ErrorDesc) return nil @@ -477,7 +477,7 @@ var _ = Describe("Master Operations", func() { masterSubnet string = "10.128.2.0/24" masterGWCIDR string = "10.128.2.1/24" masterMgmtPortIP string = "10.128.2.2" - lrpMAC string = "0A:58:0A:80:02:01" + lrpMAC string = "0a:58:0a:80:02:01" masterMgmtPortMAC string = "00:00:00:55:66:77" ) @@ -597,7 +597,7 @@ subnet=%s nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &masterNode) err = util.SetDisabledL3GatewayConfig(nodeAnnotator) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, masterMgmtPortMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(masterMgmtPortMAC)) Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, masterSubnet) Expect(err).NotTo(HaveOccurred()) @@ -657,11 +657,11 @@ var _ = Describe("Gateway Init Operations", func() { app.Action = func(ctx *cli.Context) error { const ( nodeName string = "node1" - nodeLRPMAC string = "0A:58:0A:01:01:01" + nodeLRPMAC string = "0a:58:0a:01:01:01" joinSubnet string = "100.64.0.0/29" - lrpMAC string = "0A:58:64:40:00:01" + lrpMAC string = "0a:58:64:40:00:01" lrpIP string = "100.64.0.1" - drLrpMAC string = "0A:58:64:40:00:02" + drLrpMAC string = "0a:58:64:40:00:02" drLrpIP string = "100.64.0.2" brLocalnetMAC string = "11:22:33:44:55:66" clusterRouter string = util.OvnClusterRouter @@ -706,9 +706,10 @@ var _ = Describe("Gateway Init Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) ifaceID := localnetBridgeName + "_" + nodeName err = util.SetLocalL3GatewayConfig(nodeAnnotator, ifaceID, - brLocalnetMAC, localnetGatewayIP, localnetGatewayNextHop, true) + ovntest.MustParseMAC(brLocalnetMAC), localnetGatewayIP, localnetGatewayNextHop, + true) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, brLocalnetMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(brLocalnetMAC)) Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) Expect(err).NotTo(HaveOccurred()) @@ -839,11 +840,11 @@ var _ = Describe("Gateway Init Operations", func() { app.Action = func(ctx *cli.Context) error { const ( nodeName string = "node1" - nodeLRPMAC string = "0A:58:0A:01:01:01" + nodeLRPMAC string = "0a:58:0a:01:01:01" joinSubnet string = "100.64.0.0/29" - lrpMAC string = "0A:58:64:40:00:01" + lrpMAC string = "0a:58:64:40:00:01" lrpIP string = "100.64.0.1" - drLrpMAC string = "0A:58:64:40:00:02" + drLrpMAC string = "0a:58:64:40:00:02" drLrpIP string = "100.64.0.2" physicalBridgeMAC string = "11:22:33:44:55:66" lrpCIDR string = lrpIP + "/16" @@ -863,7 +864,7 @@ var _ = Describe("Gateway Init Operations", func() { physicalBridgeName string = "br-eth0" nodeGWIP string = "10.1.1.1/24" nodeMgmtPortIP string = "10.1.1.2" - nodeMgmtPortMAC string = "0A:58:0A:01:01:02" + nodeMgmtPortMAC string = "0a:58:0a:01:01:02" ) testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{ @@ -889,9 +890,10 @@ var _ = Describe("Gateway Init Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) ifaceID := physicalBridgeName + "_" + nodeName err = util.SetSharedL3GatewayConfig(nodeAnnotator, ifaceID, - physicalBridgeMAC, physicalGatewayIPMask, physicalGatewayNextHop, + ovntest.MustParseMAC(physicalBridgeMAC), + physicalGatewayIPMask, physicalGatewayNextHop, true, 1024) - err = util.SetNodeManagementPortMacAddr(nodeAnnotator, nodeMgmtPortMAC) + err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(nodeMgmtPortMAC)) Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 8fb8cd4f6d..6d4ded3e68 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -1,6 +1,7 @@ package ovn import ( + "bytes" "encoding/json" "errors" "fmt" @@ -757,9 +758,9 @@ func gatewayChanged(oldNode, newNode *kapi.Node) bool { // macAddressChanged() compares old annotations to new and returns true if something has changed. func macAddressChanged(oldNode, node *kapi.Node) bool { - oldMacAddress, _ := util.ParseNodeManagementPortMacAddr(oldNode) - macAddress, _ := util.ParseNodeManagementPortMacAddr(node) - return oldMacAddress != macAddress + oldMacAddress, _ := util.ParseNodeManagementPortMACAddress(oldNode) + macAddress, _ := util.ParseNodeManagementPortMACAddress(node) + return !bytes.Equal(oldMacAddress, macAddress) } // noHostSubnet() compares the no-hostsubenet-nodes flag with node labels to see if the node is manageing its diff --git a/go-controller/pkg/util/gateway-init.go b/go-controller/pkg/util/gateway-init.go index c2095bd01b..74c946f7fe 100644 --- a/go-controller/pkg/util/gateway-init.go +++ b/go-controller/pkg/util/gateway-init.go @@ -125,8 +125,8 @@ func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IP prefixLen, _ := joinSubnet.Mask.Size() gwLRPIp := NextIP(joinSubnet.IP) drLRPIp := NextIP(gwLRPIp) - gwLRPMac := IPAddrToHWAddr(gwLRPIp) - drLRPMac := IPAddrToHWAddr(drLRPIp) + gwLRPMAC := IPAddrToHWAddr(gwLRPIp) + drLRPMAC := IPAddrToHWAddr(drLRPIp) joinSwitch := JoinSwitchPrefix + nodeName // create the per-node join switch @@ -148,7 +148,7 @@ func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IP } _, stderr, err = RunOVNNbctl( - "--", "--may-exist", "lrp-add", gatewayRouter, gwRouterPort, gwLRPMac, + "--", "--may-exist", "lrp-add", gatewayRouter, gwRouterPort, gwLRPMAC.String(), fmt.Sprintf("%s/%d", gwLRPIp.String(), prefixLen)) if err != nil { return fmt.Errorf("Failed to add logical router port %q, stderr: %q, error: %v", gwRouterPort, stderr, err) @@ -169,7 +169,7 @@ func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IP } _, stderr, err = RunOVNNbctl( - "--", "--may-exist", "lrp-add", k8sClusterRouter, drRouterPort, drLRPMac, + "--", "--may-exist", "lrp-add", k8sClusterRouter, drRouterPort, drLRPMAC.String(), fmt.Sprintf("%s/%d", drLRPIp.String(), prefixLen)) if err != nil { return fmt.Errorf("Failed to add logical router port %q, stderr: %q, error: %v", drRouterPort, stderr, err) diff --git a/go-controller/pkg/util/net.go b/go-controller/pkg/util/net.go index 105e02da33..d45ade9cfb 100644 --- a/go-controller/pkg/util/net.go +++ b/go-controller/pkg/util/net.go @@ -64,15 +64,15 @@ func GetPortAddresses(portName string) (net.HardwareAddr, net.IP, error) { } // GetOVSPortMACAddress returns the MAC address of a given OVS port -func GetOVSPortMACAddress(portName string) (string, error) { +func GetOVSPortMACAddress(portName string) (net.HardwareAddr, error) { macAddress, stderr, err := RunOVSVsctl("--if-exists", "get", "interface", portName, "mac_in_use") if err != nil { - return "", fmt.Errorf("failed to get MAC address for %q, stderr: %q, error: %v", + return nil, fmt.Errorf("failed to get MAC address for %q, stderr: %q, error: %v", portName, stderr, err) } if macAddress == "[]" { - return "", fmt.Errorf("no mac_address found for %q", portName) + return nil, fmt.Errorf("no mac_address found for %q", portName) } if runtime.GOOS == windowsOS && macAddress == "00:00:00:00:00:00" { // There is a known issue with OVS not correctly picking up the @@ -80,12 +80,11 @@ func GetOVSPortMACAddress(portName string) (string, error) { stdout, stderr, err := RunPowershell("$(Get-NetAdapter", "-IncludeHidden", "-InterfaceAlias", fmt.Sprintf("\"%s\"", portName), ").MacAddress") if err != nil { - return "", fmt.Errorf("failed to get mac address of %q, stderr: %q, error: %v", portName, stderr, err) + return nil, fmt.Errorf("failed to get mac address of %q, stderr: %q, error: %v", portName, stderr, err) } - // Windows returns it in 00-00-00-00-00-00 format, we want ':' instead of '-' - macAddress = strings.ToLower(strings.Replace(stdout, "-", ":", -1)) + macAddress = stdout } - return macAddress, nil + return net.ParseMAC(macAddress) } // GetNodeWellKnownAddresses returns routerIP, Management Port IP and prefix len @@ -104,14 +103,14 @@ func JoinHostPortInt32(host string, port int32) string { // IPAddrToHWAddr takes the four octets of IPv4 address (aa.bb.cc.dd, for example) and uses them in creating // a MAC address (0A:58:AA:BB:CC:DD). For IPv6, we'll use the first two bytes and last two bytes and hope // that results in a unique MAC for the scope of where it's used. -func IPAddrToHWAddr(ip net.IP) string { +func IPAddrToHWAddr(ip net.IP) net.HardwareAddr { // Ensure that for IPv4, we are always working with the IP in 4-byte form. ip4 := ip.To4() if ip4 != nil { // safe to use private MAC prefix: 0A:58 - return fmt.Sprintf("0A:58:%02X:%02X:%02X:%02X", ip4[0], ip4[1], ip4[2], ip4[3]) + return net.HardwareAddr{0x0A, 0x58, ip4[0], ip4[1], ip4[2], ip4[3]} } // IPv6 - use the first two and last two bytes. - return fmt.Sprintf("0A:58:%02X:%02X:%02X:%02X", ip[0], ip[1], ip[14], ip[15]) + return net.HardwareAddr{0x0A, 0x58, ip[0], ip[1], ip[14], ip[15]} } diff --git a/go-controller/pkg/util/net_linux.go b/go-controller/pkg/util/net_linux.go index 6cdf668acf..4b253ba4ef 100644 --- a/go-controller/pkg/util/net_linux.go +++ b/go-controller/pkg/util/net_linux.go @@ -3,10 +3,10 @@ package util import ( + "bytes" "fmt" "net" "os" - "strings" "github.com/vishvananda/netlink" @@ -159,15 +159,11 @@ func LinkRouteExists(link netlink.Link, gwIPstr, subnet string) (bool, error) { } // LinkNeighAdd adds MAC/IP bindings for the given link -func LinkNeighAdd(link netlink.Link, neighIPstr, neighMacstr string) error { +func LinkNeighAdd(link netlink.Link, neighIPstr string, neighMAC net.HardwareAddr) error { neighIP := net.ParseIP(neighIPstr) if neighIP == nil { return fmt.Errorf("neighbour IP %s is not a valid IPv4 or IPv6 address", neighIPstr) } - hwAddr, err := net.ParseMAC(neighMacstr) - if err != nil { - return fmt.Errorf("neighbour MAC address %s is not valid: %v", neighMacstr, err) - } family := netlink.FAMILY_V4 if utilnet.IsIPv6(neighIP) { @@ -178,9 +174,9 @@ func LinkNeighAdd(link netlink.Link, neighIPstr, neighMacstr string) error { Family: family, State: netlink.NUD_PERMANENT, IP: neighIP, - HardwareAddr: hwAddr, + HardwareAddr: neighMAC, } - err = netlink.NeighSet(neigh) + err := netlink.NeighSet(neigh) if err != nil { return fmt.Errorf("failed to add neighbour entry %+v: %v", neigh, err) } @@ -188,7 +184,7 @@ func LinkNeighAdd(link netlink.Link, neighIPstr, neighMacstr string) error { } // LinkNeighExists checks to see if the given MAC/IP bindings exists -func LinkNeighExists(link netlink.Link, neighIPstr, neighMacstr string) (bool, error) { +func LinkNeighExists(link netlink.Link, neighIPstr string, neighMAC net.HardwareAddr) (bool, error) { neighIP := net.ParseIP(neighIPstr) if neighIP == nil { return false, fmt.Errorf("neighbour IP %s is not a valid IPv4 or IPv6 address", @@ -208,7 +204,7 @@ func LinkNeighExists(link netlink.Link, neighIPstr, neighMacstr string) (bool, e for _, neigh := range neighs { if neigh.IP.String() == neighIPstr { - if neigh.HardwareAddr.String() == strings.ToLower(neighMacstr) && + if bytes.Equal(neigh.HardwareAddr, neighMAC) && (neigh.State&netlink.NUD_PERMANENT) == netlink.NUD_PERMANENT { return true, nil } diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index 889ced0145..1c72ff3ff7 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -101,13 +101,13 @@ func SetDisabledL3GatewayConfig(nodeAnnotator kube.Annotator) error { // SetSharedL3GatewayConfig uses nodeAnnotator set an l3-gateway-config annotation // for the "shared interface" gateway mode. func SetSharedL3GatewayConfig(nodeAnnotator kube.Annotator, - ifaceID, macAddress, gatewayAddress, nextHop string, nodePortEnable bool, - vlanID uint) error { + ifaceID string, macAddress net.HardwareAddr, gatewayAddress, nextHop string, + nodePortEnable bool, vlanID uint) error { return setAnnotations(nodeAnnotator, map[string]string{ ovnNodeGatewayMode: string(config.GatewayModeShared), ovnNodeGatewayVlanID: fmt.Sprintf("%d", vlanID), ovnNodeGatewayIfaceID: ifaceID, - ovnNodeGatewayMacAddress: macAddress, + ovnNodeGatewayMacAddress: macAddress.String(), ovnNodeGatewayIP: gatewayAddress, ovnNodeGatewayNextHop: nextHop, ovnNodePortEnable: fmt.Sprintf("%t", nodePortEnable), @@ -117,11 +117,12 @@ func SetSharedL3GatewayConfig(nodeAnnotator kube.Annotator, // SetSharedL3GatewayConfig uses nodeAnnotator set an l3-gateway-config annotation // for the "localnet" gateway mode. func SetLocalL3GatewayConfig(nodeAnnotator kube.Annotator, - ifaceID, macAddress, gatewayAddress, nextHop string, nodePortEnable bool) error { + ifaceID string, macAddress net.HardwareAddr, gatewayAddress, nextHop string, + nodePortEnable bool) error { return setAnnotations(nodeAnnotator, map[string]string{ ovnNodeGatewayMode: string(config.GatewayModeLocal), ovnNodeGatewayIfaceID: ifaceID, - ovnNodeGatewayMacAddress: macAddress, + ovnNodeGatewayMacAddress: macAddress.String(), ovnNodeGatewayIP: gatewayAddress, ovnNodeGatewayNextHop: nextHop, ovnNodePortEnable: fmt.Sprintf("%t", nodePortEnable), @@ -197,20 +198,20 @@ func ParseNodeL3GatewayAnnotation(node *kapi.Node) (*L3GatewayConfig, error) { return l3GatewayConfig, nil } -func SetNodeManagementPortMacAddr(nodeAnnotator kube.Annotator, macAddress string) error { - return nodeAnnotator.Set(ovnNodeManagementPortMacAddress, macAddress) +func SetNodeManagementPortMACAddress(nodeAnnotator kube.Annotator, macAddress net.HardwareAddr) error { + return nodeAnnotator.Set(ovnNodeManagementPortMacAddress, macAddress.String()) } -func ParseNodeManagementPortMacAddr(node *kapi.Node) (string, error) { - macAddress, ok := node.Annotations[ovnNodeManagementPortMacAddress] +func ParseNodeManagementPortMACAddress(node *kapi.Node) (net.HardwareAddr, error) { + macAddrString, ok := node.Annotations[ovnNodeManagementPortMacAddress] if !ok { klog.Errorf("macAddress annotation not found for node %q ", node.Name) - return "", nil + return nil, nil } - _, err := net.ParseMAC(macAddress) + macAddress, err := net.ParseMAC(macAddrString) if err != nil { - return "", fmt.Errorf("Error %v in parsing node %v macAddress %v", err, node.Name, macAddress) + return nil, fmt.Errorf("Error %v in parsing node %v macAddress %v", err, node.Name, macAddrString) } return macAddress, nil diff --git a/go-controller/pkg/util/util_test.go b/go-controller/pkg/util/util_test.go index 59721828bb..796bafbd48 100644 --- a/go-controller/pkg/util/util_test.go +++ b/go-controller/pkg/util/util_test.go @@ -34,19 +34,19 @@ var _ = Describe("Util tests", func() { { name: "IPv4 to MAC", IP: "10.1.2.3", - expectedMAC: "0A:58:0A:01:02:03", + expectedMAC: "0a:58:0a:01:02:03", }, { name: "IPv6 to MAC", IP: "fd98::1", - expectedMAC: "0A:58:FD:98:00:01", + expectedMAC: "0a:58:fd:98:00:01", }, } for _, tc := range testcases { ip := ovntest.MustParseIP(tc.IP) mac := IPAddrToHWAddr(ip) - Expect(mac).To(Equal(tc.expectedMAC), " test case \"%s\" returned %s instead of %s from IP %s", tc.name, mac, tc.expectedMAC, ip.String()) + Expect(mac.String()).To(Equal(tc.expectedMAC), " test case \"%s\" returned %s instead of %s from IP %s", tc.name, mac.String(), tc.expectedMAC, ip.String()) } }) }) From 039b522b02f27a37bf0c21d0d2ae67702385e2b7 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Sat, 4 Apr 2020 12:25:57 -0400 Subject: [PATCH 02/27] Use more net.IP / *net.IPNet and less string As with MAC addresses, pass IPs/CIDRs around internally as the parsed types and only convert to string when needed. Signed-off-by: Dan Winship --- .../hybrid-overlay/pkg/controller/master.go | 2 +- .../pkg/controller/master_test.go | 2 +- .../pkg/controller/node_linux_test.go | 2 +- go-controller/pkg/node/gateway_init.go | 26 ++--- .../pkg/node/gateway_init_linux_test.go | 8 +- go-controller/pkg/node/gateway_localnet.go | 46 ++++---- .../pkg/node/gateway_localnet_windows.go | 3 +- go-controller/pkg/node/gateway_shared_intf.go | 5 +- go-controller/pkg/node/helper_linux.go | 9 +- go-controller/pkg/node/helper_windows.go | 5 +- go-controller/pkg/node/management-port.go | 3 +- .../pkg/node/management-port_linux.go | 25 ++--- .../pkg/node/management-port_linux_test.go | 2 +- .../pkg/node/management-port_windows.go | 14 +-- go-controller/pkg/node/node.go | 2 +- go-controller/pkg/ovn/allocator/allocator.go | 8 +- .../pkg/ovn/allocator/allocator_test.go | 18 +-- go-controller/pkg/ovn/master.go | 31 +++--- go-controller/pkg/ovn/master_test.go | 29 ++--- go-controller/pkg/ovn/ovn.go | 2 +- go-controller/pkg/util/gateway-cleanup.go | 24 ++-- go-controller/pkg/util/gateway-init.go | 32 ++---- go-controller/pkg/util/net_linux.go | 103 +++++------------- go-controller/pkg/util/node_annotations.go | 23 ++-- go-controller/pkg/util/subnet_annotations.go | 12 +- 25 files changed, 180 insertions(+), 256 deletions(-) diff --git a/go-controller/hybrid-overlay/pkg/controller/master.go b/go-controller/hybrid-overlay/pkg/controller/master.go index f6ab7f7f04..5b4ef3ee09 100644 --- a/go-controller/hybrid-overlay/pkg/controller/master.go +++ b/go-controller/hybrid-overlay/pkg/controller/master.go @@ -33,7 +33,7 @@ func NewMaster(clientset kubernetes.Interface) (*MasterController, error) { // Add our hybrid overlay CIDRs to the allocator for _, clusterEntry := range config.HybridOverlay.ClusterSubnets { - err := m.allocator.AddNetworkRange(clusterEntry.CIDR.String(), 32-clusterEntry.HostSubnetLength) + err := m.allocator.AddNetworkRange(clusterEntry.CIDR, 32-clusterEntry.HostSubnetLength) if err != nil { return nil, err } diff --git a/go-controller/hybrid-overlay/pkg/controller/master_test.go b/go-controller/hybrid-overlay/pkg/controller/master_test.go index 577802148b..37e6c220e9 100644 --- a/go-controller/hybrid-overlay/pkg/controller/master_test.go +++ b/go-controller/hybrid-overlay/pkg/controller/master_test.go @@ -36,7 +36,7 @@ func addGetPortAddressesCmds(fexec *ovntest.FakeExec, nodeName, hybMAC, hybIP st func newTestNode(name, os, ovnHostSubnet, hybridHostSubnet, drMAC string) v1.Node { annotations := make(map[string]string) if ovnHostSubnet != "" { - subnetAnnotations, err := util.CreateNodeHostSubnetAnnotation(ovnHostSubnet) + subnetAnnotations, err := util.CreateNodeHostSubnetAnnotation(ovntest.MustParseIPNet(ovnHostSubnet)) Expect(err).NotTo(HaveOccurred()) for k, v := range subnetAnnotations { annotations[k] = fmt.Sprintf("%s", v) diff --git a/go-controller/hybrid-overlay/pkg/controller/node_linux_test.go b/go-controller/hybrid-overlay/pkg/controller/node_linux_test.go index 392b9c0459..9959209623 100644 --- a/go-controller/hybrid-overlay/pkg/controller/node_linux_test.go +++ b/go-controller/hybrid-overlay/pkg/controller/node_linux_test.go @@ -170,7 +170,7 @@ var _ = Describe("Hybrid Overlay Node Linux Operations", func() { node1IP string = "10.0.0.2" ) - subnetAnnotations, err := util.CreateNodeHostSubnetAnnotation(node1Subnet) + subnetAnnotations, err := util.CreateNodeHostSubnetAnnotation(ovntest.MustParseIPNet(node1Subnet)) Expect(err).NotTo(HaveOccurred()) annotations := make(map[string]string) for k, v := range subnetAnnotations { diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 43f8c0e55a..1453e39a42 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -49,34 +49,28 @@ func bridgedGatewayNodeSetup(nodeName, bridgeName, bridgeInterface string, syncB } // getIPv4Address returns the ipv4 address for the network interface 'iface'. -func getIPv4Address(iface string) (string, error) { - var ipAddress string +func getIPv4Address(iface string) (*net.IPNet, error) { intf, err := net.InterfaceByName(iface) if err != nil { - return ipAddress, err + return nil, err } addrs, err := intf.Addrs() if err != nil { - return ipAddress, err + return nil, err } -loop: for _, addr := range addrs { switch ip := addr.(type) { case *net.IPNet: - if !utilnet.IsIPv6(ip.IP) { - ipAddress = ip.String() - } - // get the first ip address - if ipAddress != "" { - break loop + if !utilnet.IsIPv6CIDR(ip) { + return ip, nil } } } - return ipAddress, nil + return nil, nil } -func (n *OvnNode) initGateway(subnet string, nodeAnnotator kube.Annotator, +func (n *OvnNode) initGateway(subnet *net.IPNet, nodeAnnotator kube.Annotator, waiter *startupWaiter) error { if config.Gateway.NodeportEnable { @@ -92,16 +86,16 @@ func (n *OvnNode) initGateway(subnet string, nodeAnnotator kube.Annotator, case config.GatewayModeLocal: err = initLocalnetGateway(n.name, subnet, n.watchFactory, nodeAnnotator) case config.GatewayModeShared: - gatewayNextHop := config.Gateway.NextHop + gatewayNextHop := net.ParseIP(config.Gateway.NextHop) gatewayIntf := config.Gateway.Interface - if gatewayNextHop == "" || gatewayIntf == "" { + if gatewayNextHop == nil || gatewayIntf == "" { // We need to get the interface details from the default gateway. defaultGatewayIntf, defaultGatewayNextHop, err := getDefaultGatewayInterfaceDetails() if err != nil { return err } - if gatewayNextHop == "" { + if gatewayNextHop == nil { gatewayNextHop = defaultGatewayNextHop } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 387c3a5655..f2c432ae38 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -182,7 +182,7 @@ cookie=0x0, duration=8366.597s, table=1, n_packets=10641, n_bytes=10370087, prio nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &existingNode) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -191,7 +191,7 @@ cookie=0x0, duration=8366.597s, table=1, n_packets=10641, n_bytes=10370087, prio defer GinkgoRecover() waiter := newStartupWaiter() - err = n.initGateway(nodeSubnet, nodeAnnotator, waiter) + err = n.initGateway(ovntest.MustParseIPNet(nodeSubnet), nodeAnnotator, waiter) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() @@ -347,7 +347,7 @@ var _ = Describe("Gateway Init Operations", func() { util.SetIPTablesHelper(iptables.ProtocolIPv4, ipt) nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &existingNode) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -355,7 +355,7 @@ var _ = Describe("Gateway Init Operations", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - err = initLocalnetGateway(nodeName, nodeSubnet, wf, nodeAnnotator) + err = initLocalnetGateway(nodeName, ovntest.MustParseIPNet(nodeSubnet), wf, nodeAnnotator) Expect(err).NotTo(HaveOccurred()) // Check if IP has been assigned to LocalnetGatewayNextHopPort link, err := netlink.LinkByName(localnetGatewayNextHopPort) diff --git a/go-controller/pkg/node/gateway_localnet.go b/go-controller/pkg/node/gateway_localnet.go index 49f7941787..240360e795 100644 --- a/go-controller/pkg/node/gateway_localnet.go +++ b/go-controller/pkg/node/gateway_localnet.go @@ -23,11 +23,11 @@ import ( const ( v4localnetGatewayIP = "169.254.33.2" v4localnetGatewayNextHop = "169.254.33.1" - v4localnetGatewaySubnetPrefix = "/24" + v4localnetGatewaySubnetPrefix = 24 v6localnetGatewayIP = "fd99::2" v6localnetGatewayNextHop = "fd99::1" - v6localnetGatewaySubnetPrefix = "/64" + v6localnetGatewaySubnetPrefix = 64 // localnetGatewayNextHopPort is the name of the gateway port on the host to which all // the packets leaving the OVN logical topology will be forwarded @@ -87,7 +87,7 @@ func delIptRules(ipt util.IPTablesHelper, rules []iptRule) { } } -func generateGatewayNATRules(ifname string, ip string) []iptRule { +func generateGatewayNATRules(ifname string, ip net.IP) []iptRule { // Allow packets to/from the gateway interface in case defaults deny rules := make([]iptRule, 0) rules = append(rules, iptRule{ @@ -111,17 +111,17 @@ func generateGatewayNATRules(ifname string, ip string) []iptRule { rules = append(rules, iptRule{ table: "nat", chain: "POSTROUTING", - args: []string{"-s", ip, "-j", "MASQUERADE"}, + args: []string{"-s", ip.String(), "-j", "MASQUERADE"}, }) return rules } -func localnetGatewayNAT(ipt util.IPTablesHelper, ifname, ip string) error { +func localnetGatewayNAT(ipt util.IPTablesHelper, ifname string, ip net.IP) error { rules := generateGatewayNATRules(ifname, ip) return addIptRules(ipt, rules) } -func initLocalnetGateway(nodeName string, subnet string, wf *factory.WatchFactory, nodeAnnotator kube.Annotator) error { +func initLocalnetGateway(nodeName string, subnet *net.IPNet, wf *factory.WatchFactory, nodeAnnotator kube.Annotator) error { // Create a localnet OVS bridge. localnetBridgeName := "br-local" _, stderr, err := util.RunOVSVsctl("--may-exist", "add-br", @@ -156,33 +156,35 @@ func initLocalnetGateway(nodeName string, subnet string, wf *factory.WatchFactor return err } - var gatewayIP, gatewayNextHop, gatewaySubnetPrefix string - if utilnet.IsIPv6CIDRString(subnet) { - gatewayIP = v6localnetGatewayIP - gatewayNextHop = v6localnetGatewayNextHop - gatewaySubnetPrefix = v6localnetGatewaySubnetPrefix + var gatewayIP, gatewayNextHop net.IP + var gatewaySubnetMask net.IPMask + if utilnet.IsIPv6CIDR(subnet) { + gatewayIP = net.ParseIP(v6localnetGatewayIP) + gatewayNextHop = net.ParseIP(v6localnetGatewayNextHop) + gatewaySubnetMask = net.CIDRMask(v6localnetGatewaySubnetPrefix, 128) } else { - gatewayIP = v4localnetGatewayIP - gatewayNextHop = v4localnetGatewayNextHop - gatewaySubnetPrefix = v4localnetGatewaySubnetPrefix + gatewayIP = net.ParseIP(v4localnetGatewayIP) + gatewayNextHop = net.ParseIP(v4localnetGatewayNextHop) + gatewaySubnetMask = net.CIDRMask(v4localnetGatewaySubnetPrefix, 32) } + gatewayIPCIDR := &net.IPNet{IP: gatewayIP, Mask: gatewaySubnetMask} + gatewayNextHopCIDR := &net.IPNet{IP: gatewayNextHop, Mask: gatewaySubnetMask} // Flush any addresses on localnetBridgeNextHopPort and add the new IP address. if err = util.LinkAddrFlush(link); err == nil { - err = util.LinkAddrAdd(link, gatewayNextHop+gatewaySubnetPrefix) + err = util.LinkAddrAdd(link, gatewayNextHopCIDR) } if err != nil { return err } err = util.SetLocalL3GatewayConfig(nodeAnnotator, ifaceID, macAddress, - gatewayIP+gatewaySubnetPrefix, gatewayNextHop, - config.Gateway.NodeportEnable) + gatewayIPCIDR, gatewayNextHop, config.Gateway.NodeportEnable) if err != nil { return err } - if utilnet.IsIPv6CIDRString(subnet) { + if utilnet.IsIPv6CIDR(subnet) { // TODO - IPv6 hack ... for some reason neighbor discovery isn't working here, so hard code a // MAC binding for the gateway IP address for now - need to debug this further err = util.LinkNeighAdd(link, gatewayIP, macAddress) @@ -211,10 +213,10 @@ func initLocalnetGateway(nodeName string, subnet string, wf *factory.WatchFactor } // localnetIPTablesHelper gets an IPTablesHelper for IPv4 or IPv6 as appropriate -func localnetIPTablesHelper(subnet string) (util.IPTablesHelper, error) { +func localnetIPTablesHelper(subnet *net.IPNet) (util.IPTablesHelper, error) { var ipt util.IPTablesHelper var err error - if utilnet.IsIPv6CIDRString(subnet) { + if utilnet.IsIPv6CIDR(subnet) { ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv6) } else { ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv4) @@ -279,7 +281,7 @@ func (npw *localnetNodePortWatcherData) deleteService(svc *kapi.Service) error { return nil } -func localnetNodePortWatcher(ipt util.IPTablesHelper, wf *factory.WatchFactory, gatewayIP string) error { +func localnetNodePortWatcher(ipt util.IPTablesHelper, wf *factory.WatchFactory, gatewayIP net.IP) error { // delete all the existing OVN-NODEPORT rules // TODO: Add a localnetSyncService method to remove the stale entries only _ = ipt.ClearChain("nat", iptableNodePortChain) @@ -306,7 +308,7 @@ func localnetNodePortWatcher(ipt util.IPTablesHelper, wf *factory.WatchFactory, return err } - npw := &localnetNodePortWatcherData{ipt: ipt, gatewayIP: gatewayIP} + npw := &localnetNodePortWatcherData{ipt: ipt, gatewayIP: gatewayIP.String()} _, err := wf.AddServiceHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { svc := obj.(*kapi.Service) diff --git a/go-controller/pkg/node/gateway_localnet_windows.go b/go-controller/pkg/node/gateway_localnet_windows.go index 5a9808ec8a..7103f986e8 100644 --- a/go-controller/pkg/node/gateway_localnet_windows.go +++ b/go-controller/pkg/node/gateway_localnet_windows.go @@ -4,12 +4,13 @@ package node import ( "fmt" + "net" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" ) -func initLocalnetGateway(nodeName string, subnet string, +func initLocalnetGateway(nodeName string, subnet *net.IPNet, wf *factory.WatchFactory, nodeAnnotator kube.Annotator) error { // TODO: Implement this return fmt.Errorf("Not implemented yet on Windows") diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 3183696d6e..6d1fab4398 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -2,6 +2,7 @@ package node import ( "fmt" + "net" "reflect" "regexp" "strings" @@ -284,7 +285,7 @@ func addDefaultConntrackRules(nodeName, gwBridge, gwIntf string, stopChan chan s return nil } -func (n *OvnNode) initSharedGateway(subnet, gwNextHop, gwIntf string, +func (n *OvnNode) initSharedGateway(subnet *net.IPNet, gwNextHop net.IP, gwIntf string, nodeAnnotator kube.Annotator) (postWaitFunc, error) { var bridgeName string var uplinkName string @@ -324,7 +325,7 @@ func (n *OvnNode) initSharedGateway(subnet, gwNextHop, gwIntf string, return nil, fmt.Errorf("Failed to get interface details for %s (%v)", gwIntf, err) } - if ipAddress == "" { + if ipAddress == nil { return nil, fmt.Errorf("%s does not have a ipv4 address", gwIntf) } diff --git a/go-controller/pkg/node/helper_linux.go b/go-controller/pkg/node/helper_linux.go index 5faaea1cf9..b20ed7ef65 100644 --- a/go-controller/pkg/node/helper_linux.go +++ b/go-controller/pkg/node/helper_linux.go @@ -4,6 +4,7 @@ package node import ( "fmt" + "net" "syscall" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -14,10 +15,10 @@ import ( // getDefaultGatewayInterfaceDetails returns the interface name on // which the default gateway (for route to 0.0.0.0) is configured. // It also returns the default gateway itself. -func getDefaultGatewayInterfaceDetails() (string, string, error) { +func getDefaultGatewayInterfaceDetails() (string, net.IP, error) { routes, err := netlink.RouteList(nil, syscall.AF_INET) if err != nil { - return "", "", fmt.Errorf("Failed to get routing table in node") + return "", nil, fmt.Errorf("Failed to get routing table in node") } for i := range routes { @@ -29,11 +30,11 @@ func getDefaultGatewayInterfaceDetails() (string, string, error) { } intfName := intfLink.Attrs().Name if intfName != "" { - return intfName, route.Gw.String(), nil + return intfName, route.Gw, nil } } } - return "", "", fmt.Errorf("Failed to get default gateway interface") + return "", nil, fmt.Errorf("Failed to get default gateway interface") } func getIntfName(gatewayIntf string) (string, error) { diff --git a/go-controller/pkg/node/helper_windows.go b/go-controller/pkg/node/helper_windows.go index 23644366e6..0d7f2448fe 100644 --- a/go-controller/pkg/node/helper_windows.go +++ b/go-controller/pkg/node/helper_windows.go @@ -4,6 +4,7 @@ package node import ( "fmt" + "net" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -11,9 +12,9 @@ import ( // getDefaultGatewayInterfaceDetails returns the interface name on // which the default gateway (for route to 0.0.0.0) is configured. // It also returns the default gateway itself. -func getDefaultGatewayInterfaceDetails() (string, string, error) { +func getDefaultGatewayInterfaceDetails() (string, net.IP, error) { // TODO: Implement this - return "", "", fmt.Errorf("Not implemented yet on Windows") + return "", nil, fmt.Errorf("Not implemented yet on Windows") } func getIntfName(gatewayIntf string) (string, error) { diff --git a/go-controller/pkg/node/management-port.go b/go-controller/pkg/node/management-port.go index 1750c32a53..498fc4120a 100644 --- a/go-controller/pkg/node/management-port.go +++ b/go-controller/pkg/node/management-port.go @@ -61,8 +61,7 @@ func (n *OvnNode) createManagementPort(localSubnet *net.IPNet, nodeAnnotator kub return err } - err = createPlatformManagementPort(util.K8sMgmtIntfName, portIP.String(), routerIP.IP.String(), - routerMAC, n.stopChan) + err = createPlatformManagementPort(util.K8sMgmtIntfName, portIP, routerIP.IP, routerMAC, n.stopChan) if err != nil { return err } diff --git a/go-controller/pkg/node/management-port_linux.go b/go-controller/pkg/node/management-port_linux.go index f9e111dd09..dd0d1527bc 100644 --- a/go-controller/pkg/node/management-port_linux.go +++ b/go-controller/pkg/node/management-port_linux.go @@ -6,7 +6,6 @@ import ( "fmt" "net" "os" - "strings" "time" "github.com/coreos/go-iptables/iptables" @@ -25,15 +24,14 @@ const ( type managementPortConfig struct { link netlink.Link ipt util.IPTablesHelper - allSubnets []string + allSubnets []*net.IPNet ifName string - ifIPMask string - ifIP string - routerIP string + ifIPMask *net.IPNet + routerIP net.IP routerMAC net.HardwareAddr } -func newManagementPortConfig(interfaceName, interfaceIP, routerIP string, routerMAC net.HardwareAddr) (*managementPortConfig, error) { +func newManagementPortConfig(interfaceName string, interfaceIP *net.IPNet, routerIP net.IP, routerMAC net.HardwareAddr) (*managementPortConfig, error) { var err error cfg := &managementPortConfig{} @@ -47,14 +45,11 @@ func newManagementPortConfig(interfaceName, interfaceIP, routerIP string, router // capture all the subnets for which we need to add routes through management port for _, subnet := range config.Default.ClusterSubnets { - cfg.allSubnets = append(cfg.allSubnets, subnet.CIDR.String()) - } - for _, subnet := range config.Kubernetes.ServiceCIDRs { - cfg.allSubnets = append(cfg.allSubnets, subnet.String()) + cfg.allSubnets = append(cfg.allSubnets, subnet.CIDR) } + cfg.allSubnets = append(cfg.allSubnets, config.Kubernetes.ServiceCIDRs...) - cfg.ifIP = strings.Split(cfg.ifIPMask, "/")[0] - if utilnet.IsIPv6(net.ParseIP(cfg.ifIP)) { + if utilnet.IsIPv6CIDR(cfg.ifIPMask) { cfg.ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv6) } else { cfg.ipt, err = util.GetIPTablesHelper(iptables.ProtocolIPv4) @@ -105,7 +100,7 @@ func setupManagementPortConfig(cfg *managementPortConfig) ([]string, error) { // we need to warn so that it can be debugged as to why routes are disappearing warnings = append(warnings, fmt.Sprintf("missing route entry for subnet %s via gateway %s on link %v", subnet, cfg.routerIP, cfg.ifName)) - err = util.LinkRoutesAdd(cfg.link, cfg.routerIP, []string{subnet}) + err = util.LinkRoutesAdd(cfg.link, cfg.routerIP, []*net.IPNet{subnet}) if err != nil { if os.IsExist(err) { klog.V(5).Infof("Ignoring error %s from 'route add %s via %s' - already added via IPv6 RA?", @@ -142,7 +137,7 @@ func setupManagementPortConfig(cfg *managementPortConfig) ([]string, error) { if err != nil { return warnings, fmt.Errorf("could not set up iptables chain rules for management port: %v", err) } - rule = []string{"-o", cfg.ifName, "-j", "SNAT", "--to-source", cfg.ifIP, + rule = []string{"-o", cfg.ifName, "-j", "SNAT", "--to-source", cfg.ifIPMask.IP.String(), "-m", "comment", "--comment", "OVN SNAT to Management Port"} if exists, err = cfg.ipt.Exists("nat", iptableMgmPortChain, rule...); err == nil && !exists { warnings = append(warnings, fmt.Sprintf("missing management port nat rule in chain %s, adding it", @@ -159,7 +154,7 @@ func setupManagementPortConfig(cfg *managementPortConfig) ([]string, error) { // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. -func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, routerMAC net.HardwareAddr, +func createPlatformManagementPort(interfaceName string, interfaceIP *net.IPNet, routerIP net.IP, routerMAC net.HardwareAddr, stopChan chan struct{}) error { var cfg *managementPortConfig var err error diff --git a/go-controller/pkg/node/management-port_linux_test.go b/go-controller/pkg/node/management-port_linux_test.go index 7a3e92d117..a4a3393d50 100644 --- a/go-controller/pkg/node/management-port_linux_test.go +++ b/go-controller/pkg/node/management-port_linux_test.go @@ -116,7 +116,7 @@ func testManagementPort(ctx *cli.Context, fexec *ovntest.FakeExec, testNS ns.Net Expect(err).NotTo(HaveOccurred()) nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &existingNode) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/management-port_windows.go b/go-controller/pkg/node/management-port_windows.go index 4e0b282f80..f56ee156a7 100644 --- a/go-controller/pkg/node/management-port_windows.go +++ b/go-controller/pkg/node/management-port_windows.go @@ -18,7 +18,7 @@ import ( // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. -func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, +func createPlatformManagementPort(interfaceName string, interfaceIP *net.IPNet, routerIP net.IP, routerMAC net.HardwareAddr, stopChan chan struct{}) error { // Up the interface. _, _, err := util.RunPowershell("Enable-NetAdapter", "-IncludeHidden", interfaceName) @@ -39,13 +39,9 @@ func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, } // Assign IP address to the internal interface. - portIP, interfaceIPNet, err := net.ParseCIDR(interfaceIP) - if err != nil { - return fmt.Errorf("Failed to parse interfaceIP %v : %v", interfaceIP, err) - } - portPrefix, _ := interfaceIPNet.Mask.Size() + portPrefix, _ := interfaceIP.Mask.Size() _, _, err = util.RunPowershell("New-NetIPAddress", - fmt.Sprintf("-IPAddress %s", portIP), + fmt.Sprintf("-IPAddress %s", interfaceIP.IP), fmt.Sprintf("-PrefixLength %d", portPrefix), ifAlias) if err != nil { @@ -89,7 +85,7 @@ func createPlatformManagementPort(interfaceName, interfaceIP, routerIP string, return nil } -func addRoute(subnet *net.IPNet, routerIP, interfaceIndex string) error { +func addRoute(subnet *net.IPNet, routerIP net.IP, interfaceIndex string) error { var familyFlag string if utilnet.IsIPv6CIDR(subnet) { familyFlag = "-6" @@ -112,7 +108,7 @@ func addRoute(subnet *net.IPNet, routerIP, interfaceIndex string) error { // Create a route for the entire subnet. _, stderr, err = util.RunRoute("-p", "add", subnet.IP.String(), "mask", subnetMask, - routerIP, "METRIC", "2", "IF", interfaceIndex) + routerIP.String(), "METRIC", "2", "IF", interfaceIndex) if err != nil { return fmt.Errorf("failed to run route add, stderr: %q, error: %v", stderr, err) } diff --git a/go-controller/pkg/node/node.go b/go-controller/pkg/node/node.go index 87c005d7b7..2a31efcb07 100644 --- a/go-controller/pkg/node/node.go +++ b/go-controller/pkg/node/node.go @@ -201,7 +201,7 @@ func (n *OvnNode) Start() error { waiter := newStartupWaiter() // Initialize gateway resources on the node - if err := n.initGateway(subnet.String(), nodeAnnotator, waiter); err != nil { + if err := n.initGateway(subnet, nodeAnnotator, waiter); err != nil { return err } diff --git a/go-controller/pkg/ovn/allocator/allocator.go b/go-controller/pkg/ovn/allocator/allocator.go index 55f0477156..7718c0842a 100644 --- a/go-controller/pkg/ovn/allocator/allocator.go +++ b/go-controller/pkg/ovn/allocator/allocator.go @@ -21,15 +21,11 @@ func NewSubnetAllocator() *SubnetAllocator { return &SubnetAllocator{} } -func (sna *SubnetAllocator) AddNetworkRange(network string, hostBits uint32) error { +func (sna *SubnetAllocator) AddNetworkRange(network *net.IPNet, hostBits uint32) error { sna.Lock() defer sna.Unlock() - _, ipnet, err := net.ParseCIDR(network) - if err != nil { - return err - } - snr, err := newSubnetAllocatorRange(ipnet, hostBits) + snr, err := newSubnetAllocatorRange(network, hostBits) if err != nil { return err } diff --git a/go-controller/pkg/ovn/allocator/allocator_test.go b/go-controller/pkg/ovn/allocator/allocator_test.go index 1c56a856fe..5c9ecbb130 100644 --- a/go-controller/pkg/ovn/allocator/allocator_test.go +++ b/go-controller/pkg/ovn/allocator/allocator_test.go @@ -10,7 +10,7 @@ import ( func newSubnetAllocator(clusterCIDR string, hostBits uint32) (*SubnetAllocator, error) { sna := NewSubnetAllocator() - err := sna.AddNetworkRange(clusterCIDR, hostBits) + err := sna.AddNetworkRange(ovntest.MustParseIPNet(clusterCIDR), hostBits) return sna, err } @@ -279,11 +279,6 @@ func TestAllocateSubnetInvalidHostBitsOrCIDR(t *testing.T) { t.Fatal("Unexpectedly succeeded in initializing subnet allocator") } - _, err = newSubnetAllocator("10.1.0.0/33", 16) - if err == nil { - t.Fatal("Unexpectedly succeeded in initializing subnet allocator") - } - _, err = newSubnetAllocator("fd01::/64", 66) if err == nil { t.Fatal("Unexpectedly succeeded in initializing subnet allocator") @@ -293,11 +288,6 @@ func TestAllocateSubnetInvalidHostBitsOrCIDR(t *testing.T) { if err == nil { t.Fatal("Unexpectedly succeeded in initializing subnet allocator") } - - _, err = newSubnetAllocator("fd01::/129", 64) - if err == nil { - t.Fatal("Unexpectedly succeeded in initializing subnet allocator") - } } func TestMarkAllocatedNetwork(t *testing.T) { @@ -384,7 +374,7 @@ func TestMultipleSubnets(t *testing.T) { if err != nil { t.Fatal("Failed to initialize IP allocator: ", err) } - err = sna.AddNetworkRange("10.2.0.0/16", 14) + err = sna.AddNetworkRange(ovntest.MustParseIPNet("10.2.0.0/16"), 14) if err != nil { t.Fatal("Failed to add network range: ", err) } @@ -431,11 +421,11 @@ func TestDualStack(t *testing.T) { if err != nil { t.Fatal("Failed to initialize IP allocator: ", err) } - err = sna.AddNetworkRange("10.2.0.0/16", 14) + err = sna.AddNetworkRange(ovntest.MustParseIPNet("10.2.0.0/16"), 14) if err != nil { t.Fatal("Failed to add network range: ", err) } - err = sna.AddNetworkRange("fd01::/48", 64) + err = sna.AddNetworkRange(ovntest.MustParseIPNet("fd01::/48"), 64) if err != nil { t.Fatal("Failed to add network range: ", err) } diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index f5da10d8e6..49c286cfd7 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -111,7 +111,8 @@ func (oc *Controller) StartClusterMaster(masterNodeName string) error { if config.IPv6Mode { joinSubnet = config.V6JoinSubnet } - _ = oc.joinSubnetAllocator.AddNetworkRange(joinSubnet, 3) + _, joinSubnetCIDR, _ := net.ParseCIDR(joinSubnet) + _ = oc.joinSubnetAllocator.AddNetworkRange(joinSubnetCIDR, 3) existingNodes, err := oc.kube.GetNodes() if err != nil { @@ -119,7 +120,7 @@ func (oc *Controller) StartClusterMaster(masterNodeName string) error { return err } for _, clusterEntry := range config.Default.ClusterSubnets { - err := oc.masterSubnetAllocator.AddNetworkRange(clusterEntry.CIDR.String(), clusterEntry.HostBits()) + err := oc.masterSubnetAllocator.AddNetworkRange(clusterEntry.CIDR, clusterEntry.HostBits()) if err != nil { return err } @@ -252,11 +253,11 @@ func (oc *Controller) SetupMaster(masterNodeName string) error { return nil } -func (oc *Controller) addNodeJoinSubnetAnnotations(node *kapi.Node, subnet string) error { +func (oc *Controller) addNodeJoinSubnetAnnotations(node *kapi.Node, subnet *net.IPNet) error { nodeAnnotations, err := util.CreateNodeJoinSubnetAnnotation(subnet) if err != nil { return fmt.Errorf("failed to marshal node %q join subnets annotation for subnet %s", - node.Name, subnet) + node.Name, subnet.String()) } err = oc.kube.SetAnnotationsOnNode(node, nodeAnnotations) if err != nil { @@ -290,7 +291,7 @@ func (oc *Controller) allocateJoinSubnet(node *kapi.Node) (*net.IPNet, error) { }() // Set annotation on the node - err = oc.addNodeJoinSubnetAnnotations(node, joinSubnet.String()) + err = oc.addNodeJoinSubnetAnnotations(node, joinSubnet) if err != nil { return nil, err } @@ -353,11 +354,11 @@ func (oc *Controller) syncNodeManagementPort(node *kapi.Node, subnet *net.IPNet) return nil } -func (oc *Controller) syncGatewayLogicalNetwork(node *kapi.Node, l3GatewayConfig *util.L3GatewayConfig, subnet string) error { +func (oc *Controller) syncGatewayLogicalNetwork(node *kapi.Node, l3GatewayConfig *util.L3GatewayConfig, subnet *net.IPNet) error { var err error - var clusterSubnets []string + var clusterSubnets []*net.IPNet for _, clusterSubnet := range config.Default.ClusterSubnets { - clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR.String()) + clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } // get a subnet for the per-node join switch @@ -374,7 +375,7 @@ func (oc *Controller) syncGatewayLogicalNetwork(node *kapi.Node, l3GatewayConfig if l3GatewayConfig.Mode == config.GatewayModeShared { // Add static routes to OVN Cluster Router to enable pods on this Node to // reach the host IP - err = addStaticRouteToHost(node, l3GatewayConfig.IPAddress.String()) + err = addStaticRouteToHost(node, l3GatewayConfig.IPAddress) if err != nil { return err } @@ -399,7 +400,7 @@ func (oc *Controller) syncGatewayLogicalNetwork(node *kapi.Node, l3GatewayConfig return err } -func addStaticRouteToHost(node *kapi.Node, nicIP string) error { +func addStaticRouteToHost(node *kapi.Node, nicIP *net.IPNet) error { k8sClusterRouter := util.GetK8sClusterRouter() subnet, err := util.ParseNodeHostSubnetAnnotation(node) if err != nil { @@ -407,8 +408,8 @@ func addStaticRouteToHost(node *kapi.Node, nicIP string) error { util.K8sMgmtIntfName, err) } _, secondIP := util.GetNodeWellKnownAddresses(subnet) - prefix := strings.Split(nicIP, "/")[0] + "/32" - nexthop := strings.Split(secondIP.String(), "/")[0] + prefix := nicIP.IP.String() + "/32" + nexthop := secondIP.IP.String() _, stderr, err := util.RunOVNNbctl("--may-exist", "lr-route-add", k8sClusterRouter, prefix, nexthop) if err != nil { return fmt.Errorf("failed to add static route '%s via %s' for host %q on %s "+ @@ -568,11 +569,11 @@ func (oc *Controller) ensureNodeLogicalNetwork(nodeName string, hostsubnet *net. return nil } -func (oc *Controller) addNodeAnnotations(node *kapi.Node, subnet string) error { +func (oc *Controller) addNodeAnnotations(node *kapi.Node, subnet *net.IPNet) error { nodeAnnotations, err := util.CreateNodeHostSubnetAnnotation(subnet) if err != nil { return fmt.Errorf("failed to marshal node %q annotation for subnet %s", - node.Name, subnet) + node.Name, subnet.String()) } err = oc.kube.SetAnnotationsOnNode(node, nodeAnnotations) if err != nil { @@ -618,7 +619,7 @@ func (oc *Controller) addNode(node *kapi.Node) (hostsubnet *net.IPNet, err error // Set the HostSubnet annotation on the node object to signal // to nodes that their logical infrastructure is set up and they can // proceed with their initialization - err = oc.addNodeAnnotations(node, hostsubnet.String()) + err = oc.addNodeAnnotations(node, hostsubnet) if err != nil { return nil, err } diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 3882af8ab9..02e2f25972 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -409,7 +409,7 @@ var _ = Describe("Master Operations", func() { Expect(err).NotTo(HaveOccurred()) err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(mgmtMAC)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -599,7 +599,7 @@ subnet=%s Expect(err).NotTo(HaveOccurred()) err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(masterMgmtPortMAC)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, masterSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(masterSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -615,7 +615,7 @@ subnet=%s clusterController.UDPLoadBalancerUUID = udpLBUUID clusterController.SCTPLoadBalancerUUID = sctpLBUUID clusterController.SCTPSupport = true - _ = clusterController.joinSubnetAllocator.AddNetworkRange("100.64.0.0/16", 3) + _ = clusterController.joinSubnetAllocator.AddNetworkRange(ovntest.MustParseIPNet("100.64.0.0/16"), 3) // Let the real code run and ensure OVN database sync err = clusterController.WatchNodes() @@ -706,14 +706,16 @@ var _ = Describe("Gateway Init Operations", func() { nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{fakeClient}, &testNode) ifaceID := localnetBridgeName + "_" + nodeName err = util.SetLocalL3GatewayConfig(nodeAnnotator, ifaceID, - ovntest.MustParseMAC(brLocalnetMAC), localnetGatewayIP, localnetGatewayNextHop, + ovntest.MustParseMAC(brLocalnetMAC), + ovntest.MustParseIPNet(localnetGatewayIP), + ovntest.MustParseIP(localnetGatewayNextHop), true) Expect(err).NotTo(HaveOccurred()) err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(brLocalnetMAC)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeJoinSubnetAnnotation(nodeAnnotator, joinSubnet) + err = util.SetNodeJoinSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(joinSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -812,14 +814,14 @@ var _ = Describe("Gateway Init Operations", func() { clusterController.UDPLoadBalancerUUID = udpLBUUID clusterController.SCTPLoadBalancerUUID = sctpLBUUID clusterController.SCTPSupport = true - _ = clusterController.joinSubnetAllocator.AddNetworkRange("100.64.0.0/16", 3) + _ = clusterController.joinSubnetAllocator.AddNetworkRange(ovntest.MustParseIPNet("100.64.0.0/16"), 3) // Let the real code run and ensure OVN database sync err = clusterController.WatchNodes() Expect(err).NotTo(HaveOccurred()) subnet := ovntest.MustParseIPNet(nodeSubnet) - err = clusterController.syncGatewayLogicalNetwork(updatedNode, l3GatewayConfig, subnet.String()) + err = clusterController.syncGatewayLogicalNetwork(updatedNode, l3GatewayConfig, subnet) Expect(err).NotTo(HaveOccurred()) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) @@ -891,13 +893,14 @@ var _ = Describe("Gateway Init Operations", func() { ifaceID := physicalBridgeName + "_" + nodeName err = util.SetSharedL3GatewayConfig(nodeAnnotator, ifaceID, ovntest.MustParseMAC(physicalBridgeMAC), - physicalGatewayIPMask, physicalGatewayNextHop, + ovntest.MustParseIPNet(physicalGatewayIPMask), + ovntest.MustParseIP(physicalGatewayNextHop), true, 1024) err = util.SetNodeManagementPortMACAddress(nodeAnnotator, ovntest.MustParseMAC(nodeMgmtPortMAC)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, nodeSubnet) + err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) - err = util.SetNodeJoinSubnetAnnotation(nodeAnnotator, joinSubnet) + err = util.SetNodeJoinSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNet(joinSubnet)) Expect(err).NotTo(HaveOccurred()) err = nodeAnnotator.Run() Expect(err).NotTo(HaveOccurred()) @@ -1005,14 +1008,14 @@ var _ = Describe("Gateway Init Operations", func() { clusterController.UDPLoadBalancerUUID = udpLBUUID clusterController.SCTPLoadBalancerUUID = sctpLBUUID clusterController.SCTPSupport = true - _ = clusterController.joinSubnetAllocator.AddNetworkRange("100.64.0.0/16", 3) + _ = clusterController.joinSubnetAllocator.AddNetworkRange(ovntest.MustParseIPNet("100.64.0.0/16"), 3) // Let the real code run and ensure OVN database sync err = clusterController.WatchNodes() Expect(err).NotTo(HaveOccurred()) subnet := ovntest.MustParseIPNet(nodeSubnet) - err = clusterController.syncGatewayLogicalNetwork(updatedNode, l3GatewayConfig, subnet.String()) + err = clusterController.syncGatewayLogicalNetwork(updatedNode, l3GatewayConfig, subnet) Expect(err).NotTo(HaveOccurred()) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 6d4ded3e68..1118d362ae 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -539,7 +539,7 @@ func (oc *Controller) syncNodeGateway(node *kapi.Node, subnet *net.IPNet) error return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) } } else if subnet != nil { - if err := oc.syncGatewayLogicalNetwork(node, l3GatewayConfig, subnet.String()); err != nil { + if err := oc.syncGatewayLogicalNetwork(node, l3GatewayConfig, subnet); err != nil { return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) } } diff --git a/go-controller/pkg/util/gateway-cleanup.go b/go-controller/pkg/util/gateway-cleanup.go index 6be8cc0fc8..4736bb6651 100644 --- a/go-controller/pkg/util/gateway-cleanup.go +++ b/go-controller/pkg/util/gateway-cleanup.go @@ -15,8 +15,8 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { gatewayRouter := GWRouterPrefix + nodeName // Get the gateway router port's IP address (connected to join switch) - var routerIP string - var nextHops []string + var routerIP net.IP + var nextHops []net.IP routerIPNetwork, stderr, err := RunOVNNbctl("--if-exist", "get", "logical_router_port", "rtoj-"+gatewayRouter, "networks") if err != nil { @@ -24,21 +24,21 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { "error: %v", stderr, err) } + routerIPNetwork = strings.Trim(routerIPNetwork, "[]\"") if routerIPNetwork != "" { - routerIPNetwork = strings.Trim(routerIPNetwork, "[]\"") - if routerIPNetwork != "" { - routerIP = strings.Split(routerIPNetwork, "/")[0] + routerIP, _, err = net.ParseCIDR(routerIPNetwork) + if err != nil { + return fmt.Errorf("could not parse logical router port %q: %v", + routerIPNetwork, err) } } - if routerIP != "" { + if routerIP != nil { nextHops = append(nextHops, routerIP) } if nodeSubnet != nil { _, mgtPortIP := GetNodeWellKnownAddresses(nodeSubnet) - if mgtPortIP.IP.String() != "" { - nextHops = append(nextHops, mgtPortIP.IP.String()) - } + nextHops = append(nextHops, mgtPortIP.IP) } staticRouteCleanup(clusterRouter, nextHops) @@ -102,17 +102,17 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { return nil } -func staticRouteCleanup(clusterRouter string, nextHops []string) { +func staticRouteCleanup(clusterRouter string, nextHops []net.IP) { for _, nextHop := range nextHops { // Get a list of all the routes in cluster router with the next hop IP. var uuids string uuids, stderr, err := RunOVNNbctl("--data=bare", "--no-heading", "--columns=_uuid", "find", "logical_router_static_route", - "nexthop=\""+nextHop+"\"") + "nexthop=\""+nextHop.String()+"\"") if err != nil { klog.Errorf("Failed to fetch all routes with "+ "IP %s as nexthop, stderr: %q, "+ - "error: %v", nextHop, stderr, err) + "error: %v", nextHop.String(), stderr, err) continue } diff --git a/go-controller/pkg/util/gateway-init.go b/go-controller/pkg/util/gateway-init.go index 74c946f7fe..04a08423b3 100644 --- a/go-controller/pkg/util/gateway-init.go +++ b/go-controller/pkg/util/gateway-init.go @@ -109,7 +109,7 @@ func getGatewayLoadBalancers(gatewayRouter string) (string, string, string, erro } // GatewayInit creates a gateway router for the local chassis. -func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IPNet, nodeName string, l3GatewayConfig *L3GatewayConfig, sctpSupport bool) error { +func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet *net.IPNet, nodeName string, l3GatewayConfig *L3GatewayConfig, sctpSupport bool) error { k8sClusterRouter := GetK8sClusterRouter() // Create a gateway router. gatewayRouter := GWRouterPrefix + nodeName @@ -189,7 +189,7 @@ func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IP for _, entry := range clusterIPSubnet { // Add a static route in GR with distributed router as the nexthop. stdout, stderr, err = RunOVNNbctl("--may-exist", "lr-route-add", - gatewayRouter, entry, drLRPIp.String()) + gatewayRouter, entry.String(), drLRPIp.String()) if err != nil { return fmt.Errorf("Failed to add a static route in GR with distributed "+ "router as the nexthop, stdout: %q, stderr: %q, error: %v", @@ -314,29 +314,21 @@ func GatewayInit(clusterIPSubnet []string, hostSubnet string, joinSubnet *net.IP "stderr: %q, error: %v", stdout, stderr, err) } - rampoutIPSubnets := strings.Split(hostSubnet, ",") - for _, rampoutIPSubnet := range rampoutIPSubnets { - _, _, err = net.ParseCIDR(rampoutIPSubnet) - if err != nil { - continue - } - - // Add source IP address based routes in distributed router - // for this gateway router. - stdout, stderr, err = RunOVNNbctl("--may-exist", - "--policy=src-ip", "lr-route-add", k8sClusterRouter, - rampoutIPSubnet, gwLRPIp.String()) - if err != nil { - return fmt.Errorf("Failed to add source IP address based "+ - "routes in distributed router, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) - } + // Add source IP address based routes in distributed router + // for this gateway router. + stdout, stderr, err = RunOVNNbctl("--may-exist", + "--policy=src-ip", "lr-route-add", k8sClusterRouter, + hostSubnet.String(), gwLRPIp.String()) + if err != nil { + return fmt.Errorf("Failed to add source IP address based "+ + "routes in distributed router, stdout: %q, "+ + "stderr: %q, error: %v", stdout, stderr, err) } // Default SNAT rules. for _, entry := range clusterIPSubnet { stdout, stderr, err = RunOVNNbctl("--may-exist", "lr-nat-add", - gatewayRouter, "snat", l3GatewayConfig.IPAddress.IP.String(), entry) + gatewayRouter, "snat", l3GatewayConfig.IPAddress.IP.String(), entry.String()) if err != nil { return fmt.Errorf("Failed to create default SNAT rules, stdout: %q, "+ "stderr: %q, error: %v", stdout, stderr, err) diff --git a/go-controller/pkg/util/net_linux.go b/go-controller/pkg/util/net_linux.go index 4b253ba4ef..f43441417f 100644 --- a/go-controller/pkg/util/net_linux.go +++ b/go-controller/pkg/util/net_linux.go @@ -13,6 +13,14 @@ import ( utilnet "k8s.io/utils/net" ) +func getFamily(ip net.IP) int { + if utilnet.IsIPv6(ip) { + return netlink.FAMILY_V6 + } else { + return netlink.FAMILY_V4 + } +} + // LinkSetUp returns the netlink device with its state marked up func LinkSetUp(interfaceName string) (netlink.Link, error) { link, err := netlink.LinkByName(interfaceName) @@ -43,22 +51,14 @@ func LinkAddrFlush(link netlink.Link) error { } // LinkAddrExist returns true if the given address is present on the link -func LinkAddrExist(link netlink.Link, address string) (bool, error) { - ipnet, err := netlink.ParseIPNet(address) - if err != nil { - return false, fmt.Errorf("failed to parse ip %s :%v\n", address, err) - } - family := netlink.FAMILY_V4 - if ipnet.IP.To4() == nil { - family = netlink.FAMILY_V6 - } - addrs, err := netlink.AddrList(link, family) +func LinkAddrExist(link netlink.Link, address *net.IPNet) (bool, error) { + addrs, err := netlink.AddrList(link, getFamily(address.IP)) if err != nil { return false, fmt.Errorf("failed to list addresses for the link %s: %v", link.Attrs().Name, err) } for _, addr := range addrs { - if addr.IPNet.String() == address { + if addr.IPNet.String() == address.String() { return true, nil } } @@ -66,12 +66,8 @@ func LinkAddrExist(link netlink.Link, address string) (bool, error) { } // LinkAddrAdd removes existing addresses on the link and adds the new address -func LinkAddrAdd(link netlink.Link, address string) error { - ipnet, err := netlink.ParseIPNet(address) - if err != nil { - return fmt.Errorf("failed to parse ip %s :%v\n", address, err) - } - err = netlink.AddrAdd(link, &netlink.Addr{IPNet: ipnet}) +func LinkAddrAdd(link netlink.Link, address *net.IPNet) error { + err := netlink.AddrAdd(link, &netlink.Addr{IPNet: address}) if err != nil { return fmt.Errorf("failed to add address %s on link %s: %v", address, link.Attrs().Name, err) } @@ -79,7 +75,7 @@ func LinkAddrAdd(link netlink.Link, address string) error { } // LinkRoutesDel deletes all the routes for the given subnets via the link -func LinkRoutesDel(link netlink.Link, subnets []string) error { +func LinkRoutesDel(link netlink.Link, subnets []*net.IPNet) error { routes, err := netlink.RouteList(link, netlink.FAMILY_ALL) if err != nil { return fmt.Errorf("failed to get all the routes for link %s: %v", @@ -87,7 +83,7 @@ func LinkRoutesDel(link netlink.Link, subnets []string) error { } for _, subnet := range subnets { for _, route := range routes { - if route.Dst.String() == subnet { + if route.Dst.String() == subnet.String() { err = netlink.RouteDel(&route) if err != nil { return fmt.Errorf("failed to delete route '%s via %s' for link %s : %v\n", @@ -101,57 +97,36 @@ func LinkRoutesDel(link netlink.Link, subnets []string) error { } // LinkRoutesAdd adds a new route for given subnets through the gwIPstr -func LinkRoutesAdd(link netlink.Link, gwIPstr string, subnets []string) error { - gwIP := net.ParseIP(gwIPstr) - if gwIP == nil { - return fmt.Errorf("gateway IP %s is not a valid IPv4 or IPv6 address", gwIPstr) - } +func LinkRoutesAdd(link netlink.Link, gwIP net.IP, subnets []*net.IPNet) error { for _, subnet := range subnets { - dstIPnet, err := netlink.ParseIPNet(subnet) - if err != nil { - return fmt.Errorf("failed to parse subnet %s :%v\n", subnet, err) - } route := &netlink.Route{ - Dst: dstIPnet, + Dst: subnet, LinkIndex: link.Attrs().Index, Scope: netlink.SCOPE_UNIVERSE, Gw: gwIP, } - err = netlink.RouteAdd(route) + err := netlink.RouteAdd(route) if err != nil { if os.IsExist(err) { return err } return fmt.Errorf("failed to add route for subnet %s via gateway %s: %v", - subnet, gwIPstr, err) + subnet.String(), gwIP.String(), err) } } return nil } // LinkRouteExists checks for existence of routes for the given subnet through gwIPStr -func LinkRouteExists(link netlink.Link, gwIPstr, subnet string) (bool, error) { - gwIP := net.ParseIP(gwIPstr) - if gwIP == nil { - return false, fmt.Errorf("gateway IP %s is not a valid IPv4 or IPv6 address", gwIPstr) - } - family := netlink.FAMILY_V4 - if utilnet.IsIPv6(gwIP) { - family = netlink.FAMILY_V6 - } - - dstIPnet, err := netlink.ParseIPNet(subnet) - if err != nil { - return false, fmt.Errorf("failed to parse subnet %s :%v\n", subnet, err) - } - routeFilter := &netlink.Route{Dst: dstIPnet} +func LinkRouteExists(link netlink.Link, gwIP net.IP, subnet *net.IPNet) (bool, error) { + routeFilter := &netlink.Route{Dst: subnet} filterMask := netlink.RT_FILTER_DST - routes, err := netlink.RouteListFiltered(family, routeFilter, filterMask) + routes, err := netlink.RouteListFiltered(getFamily(gwIP), routeFilter, filterMask) if err != nil { - return false, fmt.Errorf("failed to get routes for subnet %s", subnet) + return false, fmt.Errorf("failed to get routes for subnet %s", subnet.String()) } for _, route := range routes { - if route.Gw.String() == gwIPstr { + if route.Gw.Equal(gwIP) { return true, nil } } @@ -159,19 +134,10 @@ func LinkRouteExists(link netlink.Link, gwIPstr, subnet string) (bool, error) { } // LinkNeighAdd adds MAC/IP bindings for the given link -func LinkNeighAdd(link netlink.Link, neighIPstr string, neighMAC net.HardwareAddr) error { - neighIP := net.ParseIP(neighIPstr) - if neighIP == nil { - return fmt.Errorf("neighbour IP %s is not a valid IPv4 or IPv6 address", neighIPstr) - } - - family := netlink.FAMILY_V4 - if utilnet.IsIPv6(neighIP) { - family = netlink.FAMILY_V6 - } +func LinkNeighAdd(link netlink.Link, neighIP net.IP, neighMAC net.HardwareAddr) error { neigh := &netlink.Neigh{ LinkIndex: link.Attrs().Index, - Family: family, + Family: getFamily(neighIP), State: netlink.NUD_PERMANENT, IP: neighIP, HardwareAddr: neighMAC, @@ -184,26 +150,15 @@ func LinkNeighAdd(link netlink.Link, neighIPstr string, neighMAC net.HardwareAdd } // LinkNeighExists checks to see if the given MAC/IP bindings exists -func LinkNeighExists(link netlink.Link, neighIPstr string, neighMAC net.HardwareAddr) (bool, error) { - neighIP := net.ParseIP(neighIPstr) - if neighIP == nil { - return false, fmt.Errorf("neighbour IP %s is not a valid IPv4 or IPv6 address", - neighIPstr) - } - - family := netlink.FAMILY_V4 - if utilnet.IsIPv6(neighIP) { - family = netlink.FAMILY_V6 - } - - neighs, err := netlink.NeighList(link.Attrs().Index, family) +func LinkNeighExists(link netlink.Link, neighIP net.IP, neighMAC net.HardwareAddr) (bool, error) { + neighs, err := netlink.NeighList(link.Attrs().Index, getFamily(neighIP)) if err != nil { return false, fmt.Errorf("failed to get the list of neighbour entries for link %s", link.Attrs().Name) } for _, neigh := range neighs { - if neigh.IP.String() == neighIPstr { + if neigh.IP.Equal(neighIP) { if bytes.Equal(neigh.HardwareAddr, neighMAC) && (neigh.State&netlink.NUD_PERMANENT) == netlink.NUD_PERMANENT { return true, nil diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index 1c72ff3ff7..d3d1517cb8 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -101,15 +101,16 @@ func SetDisabledL3GatewayConfig(nodeAnnotator kube.Annotator) error { // SetSharedL3GatewayConfig uses nodeAnnotator set an l3-gateway-config annotation // for the "shared interface" gateway mode. func SetSharedL3GatewayConfig(nodeAnnotator kube.Annotator, - ifaceID string, macAddress net.HardwareAddr, gatewayAddress, nextHop string, + ifaceID string, macAddress net.HardwareAddr, + gatewayAddress *net.IPNet, nextHop net.IP, nodePortEnable bool, vlanID uint) error { return setAnnotations(nodeAnnotator, map[string]string{ ovnNodeGatewayMode: string(config.GatewayModeShared), ovnNodeGatewayVlanID: fmt.Sprintf("%d", vlanID), ovnNodeGatewayIfaceID: ifaceID, ovnNodeGatewayMacAddress: macAddress.String(), - ovnNodeGatewayIP: gatewayAddress, - ovnNodeGatewayNextHop: nextHop, + ovnNodeGatewayIP: gatewayAddress.String(), + ovnNodeGatewayNextHop: nextHop.String(), ovnNodePortEnable: fmt.Sprintf("%t", nodePortEnable), }) } @@ -117,14 +118,15 @@ func SetSharedL3GatewayConfig(nodeAnnotator kube.Annotator, // SetSharedL3GatewayConfig uses nodeAnnotator set an l3-gateway-config annotation // for the "localnet" gateway mode. func SetLocalL3GatewayConfig(nodeAnnotator kube.Annotator, - ifaceID string, macAddress net.HardwareAddr, gatewayAddress, nextHop string, + ifaceID string, macAddress net.HardwareAddr, + gatewayAddress *net.IPNet, nextHop net.IP, nodePortEnable bool) error { return setAnnotations(nodeAnnotator, map[string]string{ ovnNodeGatewayMode: string(config.GatewayModeLocal), ovnNodeGatewayIfaceID: ifaceID, ovnNodeGatewayMacAddress: macAddress.String(), - ovnNodeGatewayIP: gatewayAddress, - ovnNodeGatewayNextHop: nextHop, + ovnNodeGatewayIP: gatewayAddress.String(), + ovnNodeGatewayNextHop: nextHop.String(), ovnNodePortEnable: fmt.Sprintf("%t", nodePortEnable), }) } @@ -203,16 +205,11 @@ func SetNodeManagementPortMACAddress(nodeAnnotator kube.Annotator, macAddress ne } func ParseNodeManagementPortMACAddress(node *kapi.Node) (net.HardwareAddr, error) { - macAddrString, ok := node.Annotations[ovnNodeManagementPortMacAddress] + macAddress, ok := node.Annotations[ovnNodeManagementPortMacAddress] if !ok { klog.Errorf("macAddress annotation not found for node %q ", node.Name) return nil, nil } - macAddress, err := net.ParseMAC(macAddrString) - if err != nil { - return nil, fmt.Errorf("Error %v in parsing node %v macAddress %v", err, node.Name, macAddrString) - } - - return macAddress, nil + return net.ParseMAC(macAddress) } diff --git a/go-controller/pkg/util/subnet_annotations.go b/go-controller/pkg/util/subnet_annotations.go index 83f1620f6f..da54a33c6b 100644 --- a/go-controller/pkg/util/subnet_annotations.go +++ b/go-controller/pkg/util/subnet_annotations.go @@ -35,9 +35,9 @@ const ( // CreateNodeHostSubnetAnnotation creates a "k8s.ovn.org/node-subnets" annotation, // with a single "default" network, suitable for passing to kube.SetAnnotationsOnNode -func CreateNodeHostSubnetAnnotation(defaultSubnet string) (map[string]interface{}, error) { +func CreateNodeHostSubnetAnnotation(defaultSubnet *net.IPNet) (map[string]interface{}, error) { bytes, err := json.Marshal(map[string]string{ - "default": defaultSubnet, + "default": defaultSubnet.String(), }) if err != nil { return nil, err @@ -49,7 +49,7 @@ func CreateNodeHostSubnetAnnotation(defaultSubnet string) (map[string]interface{ // SetNodeHostSubnetAnnotation sets a "k8s.ovn.org/node-subnets" annotation // using a kube.Annotator -func SetNodeHostSubnetAnnotation(nodeAnnotator kube.Annotator, defaultSubnet string) error { +func SetNodeHostSubnetAnnotation(nodeAnnotator kube.Annotator, defaultSubnet *net.IPNet) error { annotation, err := CreateNodeHostSubnetAnnotation(defaultSubnet) if err != nil { return err @@ -88,9 +88,9 @@ func ParseNodeHostSubnetAnnotation(node *kapi.Node) (*net.IPNet, error) { // CreateNodeJoinSubnetAnnotation creates a "k8s.ovn.org/node-join-subnets" annotation // with a single "default" network, suitable for passing to kube.SetAnnotationsOnNode -func CreateNodeJoinSubnetAnnotation(defaultSubnet string) (map[string]interface{}, error) { +func CreateNodeJoinSubnetAnnotation(defaultSubnet *net.IPNet) (map[string]interface{}, error) { bytes, err := json.Marshal(map[string]string{ - "default": defaultSubnet, + "default": defaultSubnet.String(), }) if err != nil { return nil, err @@ -102,7 +102,7 @@ func CreateNodeJoinSubnetAnnotation(defaultSubnet string) (map[string]interface{ // SetNodeJoinSubnetAnnotation sets a "k8s.ovn.org/node-join-subnets" annotation // using a kube.Annotator -func SetNodeJoinSubnetAnnotation(nodeAnnotator kube.Annotator, defaultSubnet string) error { +func SetNodeJoinSubnetAnnotation(nodeAnnotator kube.Annotator, defaultSubnet *net.IPNet) error { annotation, err := CreateNodeJoinSubnetAnnotation(defaultSubnet) if err != nil { return err From afc1fb1b8f39e1a7cbc744ee1a0609ac96a75590 Mon Sep 17 00:00:00 2001 From: Andrew Sun Date: Mon, 6 Apr 2020 17:22:56 -0400 Subject: [PATCH 03/27] Add test that kills the master pod, and fix test for multiple nodes Signed-off-by: Andrew Sun --- test/e2e/e2e_test.go | 55 ++++++++++++++++++++++++++++++++++++++------ test/e2e/go.mod | 1 - 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 9589861af2..95cde644ed 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,7 +17,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) -func checkContinuousConnectivity(f *framework.Framework, nodeName, podName, host string, port, timeout int, readyChan chan bool, errChan chan error) { +func checkContinuousConnectivity(f *framework.Framework, nodeName, podName, host string, port, timeout int, podChan chan *v1.Pod, errChan chan error) { contName := fmt.Sprintf("%s-container", podName) command := []string{ @@ -48,7 +48,19 @@ func checkContinuousConnectivity(f *framework.Framework, nodeName, podName, host return } - close(readyChan) + err = e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, podName) + if err != nil { + errChan <- err + return + } + + podGet, err := podClient.Get(podName, metav1.GetOptions{}) + if err != nil { + errChan <- err + return + } + + podChan <- podGet err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) @@ -86,11 +98,11 @@ var _ = Describe("e2e control plane", func() { ginkgo.It("should provide Internet connection continuously when ovn-k8s pod is killed", func() { ginkgo.By("Running container which tries to connect to 8.8.8.8 in a loop") - readyChan, errChan := make(chan bool), make(chan error) - go checkContinuousConnectivity(f, "", "connectivity-test-continuous", "8.8.8.8", 53, 30, readyChan, errChan) + podChan, errChan := make(chan *v1.Pod), make(chan error) + go checkContinuousConnectivity(f, "", "connectivity-test-continuous", "8.8.8.8", 53, 30, podChan, errChan) - <-readyChan - framework.Logf("Container is ready, waiting a few seconds") + testPod := <-podChan + framework.Logf("Test pod running on %q, waiting a few seconds", testPod.Spec.NodeName) time.Sleep(10 * time.Second) @@ -99,7 +111,7 @@ var _ = Describe("e2e control plane", func() { podList, _ := podClient.List(metav1.ListOptions{}) podName := "" for _, pod := range podList.Items { - if strings.HasPrefix(pod.Name, "ovnkube-node") { + if strings.HasPrefix(pod.Name, "ovnkube-node") && pod.Spec.NodeName == testPod.Spec.NodeName { podName = pod.Name break } @@ -111,4 +123,33 @@ var _ = Describe("e2e control plane", func() { framework.ExpectNoError(<-errChan) }) + + ginkgo.It("should provide Internet connection continuously when master is killed", func() { + ginkgo.By("Running container which tries to connect to 8.8.8.8 in a loop") + + podChan, errChan := make(chan *v1.Pod), make(chan error) + go checkContinuousConnectivity(f, "", "connectivity-test-continuous", "8.8.8.8", 53, 30, podChan, errChan) + + testPod := <-podChan + framework.Logf("Test pod running on %q, waiting a few seconds", testPod.Spec.NodeName) + + time.Sleep(10 * time.Second) + + podClient := f.ClientSet.CoreV1().Pods("ovn-kubernetes") + + podList, _ := podClient.List(metav1.ListOptions{}) + podName := "" + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, "ovnkube-master") { + podName = pod.Name + break + } + } + + err := podClient.Delete(podName, metav1.NewDeleteOptions(0)) + framework.ExpectNoError(err, "should delete ovnkube-master pod") + framework.Logf("Deleted ovnkube-master %q", podName) + + framework.ExpectNoError(<-errChan) + }) }) diff --git a/test/e2e/go.mod b/test/e2e/go.mod index ec9e494f3c..63d2297fb7 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -33,5 +33,4 @@ require ( k8s.io/apimachinery v0.16.4 k8s.io/kubectl v0.0.0 k8s.io/kubernetes v1.16.4 -// k8s.io/kubernetes v0.17.4 ) From 8f1edff7dd2a284c731dd3b8f379d540b4818dc4 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 7 Apr 2020 10:23:34 -0400 Subject: [PATCH 04/27] Fix golang linter timeouts Changes-Include: - Use verbose print resource usage - Use vendor folder to avoid having to download deps - Bump Timeout to 15 min Using vendor for download saves about a minute; we see that the entire execution is around 9-10 minutes, while our timeout was 10 min. Closes #1222 Signed-off-by: Tim Rozet --- go-controller/hack/lint.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/go-controller/hack/lint.sh b/go-controller/hack/lint.sh index 23e95f8fe7..8d216ae320 100755 --- a/go-controller/hack/lint.sh +++ b/go-controller/hack/lint.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash - GO111MODULE=on ${GOPATH}/bin/golangci-lint run \ --tests=false --enable gofmt \ - --timeout=10m0s \ + --timeout=15m0s --verbose --print-resources-usage --modules-download-mode=vendor \ && echo "lint OK!" From 9cce542dc647ee7a68f14bbb4861ca27888f336b Mon Sep 17 00:00:00 2001 From: Dave Tucker Date: Wed, 1 Apr 2020 20:32:07 +0100 Subject: [PATCH 05/27] ci: Remove Ruby Dependency This commit: 1. Removes the template YAML file and use of Ruby to handle YAML anchors 2. Squahes the Verify and Build Job as they have identical node requirements 3. Uses a matrix for the sharded KIND e2e jobs 4. Moves the Golang Version to an env variable to avoid duplication 5. Adds a Makefile to the test directory CI can then simply `make -C test $TARGET` to run tests. This is preferable to having the logic inside the CI workflow as it can be used for local testing Signed-off-by: Dave Tucker --- .github/workflow-templates/render.rb | 19 - .github/workflow-templates/test.yml.erb | 303 -------------- .github/workflows/test.yml | 173 ++++++++ .github/workflows/test_generated.yml | 526 ------------------------ test/Makefile | 11 + test/scripts/e2e-cp.sh | 19 + test/scripts/e2e-kind.sh | 42 ++ test/scripts/install-kind.sh | 17 + 8 files changed, 262 insertions(+), 848 deletions(-) delete mode 100644 .github/workflow-templates/render.rb delete mode 100644 .github/workflow-templates/test.yml.erb create mode 100644 .github/workflows/test.yml delete mode 100644 .github/workflows/test_generated.yml create mode 100644 test/Makefile create mode 100755 test/scripts/e2e-cp.sh create mode 100755 test/scripts/e2e-kind.sh create mode 100755 test/scripts/install-kind.sh diff --git a/.github/workflow-templates/render.rb b/.github/workflow-templates/render.rb deleted file mode 100644 index 6c2090007b..0000000000 --- a/.github/workflow-templates/render.rb +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env ruby - -# https://github.community/t5/GitHub-Actions/Support-for-YAML-anchors/m-p/42517/highlight/true#M5024 - -require "yaml" -require "json" -require "erb" - -header = """# THIS FILE IS AUTOMATICALLY GENERATED -# DO NOT EDIT -""" - -ginkgo_skip='--ginkgo.skip=Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' - -rendered_out = ERB.new(File.read(File.expand_path("test.yml.erb", __dir__))).result() -yaml_out = YAML.load(rendered_out) -puts "rendered yaml is: " -puts YAML.dump(yaml_out) -File.write(File.expand_path("../workflows/test_generated.yml", __dir__), header + YAML.load(yaml_out.to_json).to_yaml()) diff --git a/.github/workflow-templates/test.yml.erb b/.github/workflow-templates/test.yml.erb deleted file mode 100644 index a43f3c04a5..0000000000 --- a/.github/workflow-templates/test.yml.erb +++ /dev/null @@ -1,303 +0,0 @@ -# THIS FILE IS A TEMPLATE: be sure to run render.rb before committing - -name: Go -'on': [push, pull_request] -env: - K8S_VERSION: v1.16.4 - KIND_CLUSTER_NAME: ovn - KIND_INSTALL_INGRESS: true -jobs: - build: - name: Build - runs-on: ubuntu-latest - steps: - - &step_cleanup - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* mono-* msbuild php-* php7* ghc-* zulu-* - - - &step_gosetup - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - - &step_checkout - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - - &step_environment - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - - name: Build - run: | - set -x - go get golang.org/x/tools/cmd/cover - pushd go-controller - make - make windows - COVERALLS=1 make check - popd - pushd dist/images - if [ -n "$(git diff --stat origin/master.. | grep dist/images/Dockerfile)" ]; then make all; fi - popd - - # Combine separate code coverage profiles into one - go get github.com/modocache/gover - gover go-controller/ gover.coverprofile - - # Convert coverage profile to LCOV format for coveralls github action - go get github.com/jandelgado/gcov2lcov - mkdir -p src/github.com/ovn-org - ln -sf $(pwd) src/github.com/ovn-org/ovn-kubernetes - GOPATH=$(pwd) gcov2lcov -infile gover.coverprofile -outfile coverage.lcov - - - name: Submit code coverage to Coveralls - uses: coverallsapp/github-action@master - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: coverage.lcov - - verify: - name: Verify - runs-on: ubuntu-latest - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - - name: Verify - run: | - pushd go-controller - make gofmt - make install.tools - make lint - popd - - k8s: - name: Build k8s - runs-on: ubuntu-latest - steps: - - *step_environment - - - &step_k8s_cache - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: ${{ env.GOPATH }}/src/k8s.io/kubernetes/ - key: k8s-go-2-${{ env.K8S_VERSION }} - - - << : *step_gosetup - if: steps.cache-k8s.outputs.cache-hit != 'true' - - - - &step_k8s_cache_miss - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - - e2e-shard-n: - name: e2e-kind-ovn-shard-n-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: &e2e_strategy - matrix: - config: - - {ha: "false", name: "noHA"} - - {ha: "true", name: "HA"} - env: - KIND_HA: ${{matrix.config.ha}} - JOB_NAME: e2e-shard-n-${{ matrix.config.name }} - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - *step_k8s_cache - - *step_k8s_cache_miss - - - &step_kind_setup - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - - name: e2e-kind-ovn-shard-n - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - - # all tests that don't have P as their sixth letter after the N - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Nn](.{6}[^Pp].*|.{0,6}$) --disable-log-dump=false <%= ginkgo_skip %>' - - - &step_export_logs - name: Export logs - if: always() - run: | - mkdir -p /tmp/kind/logs - kind export logs --name ${KIND_CLUSTER_NAME} /tmp/kind/logs - - - &step_upload_logs - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: /tmp/kind/logs - - e2e-shard-np: - name: e2e-kind-ovn-shard-np-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: *e2e_strategy - env: - JOB_NAME: e2e-shard-np-${{ matrix.config.name }} - KIND_HA: ${{matrix.config.ha}} - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - *step_k8s_cache - - *step_k8s_cache_miss - - - *step_kind_setup - - - name: e2e-kind-ovn-shard-np - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - # all tests that have P as the sixth letter after the N - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Nn].{6}[Pp].*$ --disable-log-dump=false <%= ginkgo_skip %>' - - - *step_export_logs - - *step_upload_logs - - e2e-shard-s: - name: e2e-kind-ovn-shard-s-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: *e2e_strategy - env: - JOB_NAME: e2e-shard-s-${{ matrix.config.name }} - KIND_HA: ${{matrix.config.ha}} - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - *step_k8s_cache - - *step_k8s_cache_miss - - - *step_kind_setup - - - name: e2e-kind-ovn-shard-s - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Ss].* --disable-log-dump=false <%= ginkgo_skip %>' - - - *step_export_logs - - *step_upload_logs - - e2e-shard-other: - name: e2e-kind-ovn-shard-other-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: *e2e_strategy - env: - JOB_NAME: e2e-shard-other-${{ matrix.config.name }} - KIND_HA: ${{matrix.config.ha}} - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - *step_k8s_cache - - *step_k8s_cache_miss - - - *step_kind_setup - - - name: e2e-kind-ovn-shard-other - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[^NnSs].* --disable-log-dump=false <%= ginkgo_skip %>' - - - *step_export_logs - - *step_upload_logs - - e2e-control-plane: - name: e2e-control-plane-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: *e2e_strategy - env: - JOB_NAME: e2e-control-plane-${{ matrix.config.name }} - KIND_HA: ${{matrix.config.ha}} - steps: - - *step_cleanup - - *step_gosetup - - *step_checkout - - *step_environment - - *step_k8s_cache - - *step_k8s_cache_miss - - - *step_kind_setup - - - name: e2e-control-plane - run: | - set -x - pushd test/e2e - go mod download - popd - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - - sed -E -i 's/"\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}" "\$\{e2e_test\}"/pushd \$GITHUB_WORKSPACE\/test\/e2e\nGO111MODULE=on "\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}"/' hack/ginkgo-e2e.sh - - kubetest --provider=local --deployment=kind --kind-cluster-name=kind-ovn --test --test_args='--num-nodes=3 --disable-log-dump=false' - - - *step_export_logs - - *step_upload_logs diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000..1a508304e2 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,173 @@ +name: ovn-ci + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + GO_VERSION: 1.12 + K8S_VERSION: v1.16.4 + KIND_CLUSTER_NAME: ovn + KIND_INSTALL_INGRESS: true + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Free up disk space + run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* mono-* msbuild php-* php7* ghc-* zulu-* + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: ${{ env.GO_VERSION }} + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Set up environment + run: | + export GOPATH=$(go env GOPATH) + echo "::set-env name=GOPATH::$GOPATH" + export PATH=$GOPATH/bin:$PATH + echo "::add-path::$GOPATH/bin" + + - name: Verify + run: | + pushd go-controller + make gofmt + make install.tools + make lint + popd + + - name: Build + run: | + set -x + go get golang.org/x/tools/cmd/cover + pushd go-controller + make + make windows + COVERALLS=1 make check + popd + pushd dist/images + if [ -n "$(git diff --stat origin/master.. | grep dist/images/Dockerfile)" ]; then make all; fi + popd + + # Combine separate code coverage profiles into one + go get github.com/modocache/gover + gover go-controller/ gover.coverprofile + + # Convert coverage profile to LCOV format for coveralls github action + go get github.com/jandelgado/gcov2lcov + mkdir -p src/github.com/ovn-org + ln -sf $(pwd) src/github.com/ovn-org/ovn-kubernetes + GOPATH=$(pwd) gcov2lcov -infile gover.coverprofile -outfile coverage.lcov + + - name: Submit code coverage to Coveralls + uses: coverallsapp/github-action@master + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + path-to-lcov: coverage.lcov + + k8s: + name: Build k8s + runs-on: ubuntu-latest + steps: + + - name: Set up environment + run: | + export GOPATH=$(go env GOPATH) + echo "::set-env name=GOPATH::$GOPATH" + export PATH=$GOPATH/bin:$PATH + echo "::add-path::$GOPATH/bin" + + - name: Cache Kubernetes + id: cache-k8s + uses: actions/cache@v1 + with: + path: ${{ env.GOPATH }}/src/k8s.io/kubernetes/ + key: k8s-go-2-${{ env.K8S_VERSION }} + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: ${{ env.GO_VERSION }} + id: go + if: steps.cache-k8s.outputs.cache-hit != 'true' + + - name: Build and install Kubernetes + if: steps.cache-k8s.outputs.cache-hit != 'true' + run: | + set -x + git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ + pushd $GOPATH/src/k8s.io/kubernetes/ + make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" + rm -rf .git + + e2e: + name: End-To-End Tests + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: + - e2e-kind-ovn-shard-n + - e2e-kind-ovn-shard-np + - e2e-kind-ovn-shard-s + - e2e-kind-ovn-shard-other + - e2e-control-plane + needs: k8s + env: + JOB_NAME: "${{ matrix.target }}" + steps: + + - name: Free up disk space + run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* mono-* msbuild php-* php7* ghc-* zulu-* + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: ${{ env.GO_VERSION }} + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Set up environment + run: | + export GOPATH=$(go env GOPATH) + echo "::set-env name=GOPATH::$GOPATH" + export PATH=$GOPATH/bin:$PATH + echo "::add-path::$GOPATH/bin" + + - name: Restore Kubernetes from cache + id: cache-k8s + uses: actions/cache@v1 + with: + path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" + key: k8s-go-2-${{ env.K8S_VERSION }} + + - name: kind setup + run: | + make -C test install-kind + + - name: Run Tests + run: | + make -C test ${{ matrix.target }} + + - name: Export logs + if: always() + run: | + mkdir -p /tmp/kind/logs + kind export logs --name ${KIND_CLUSTER_NAME} /tmp/kind/logs + + - name: Upload logs + if: always() + uses: actions/upload-artifact@v1 + with: + name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} + path: /tmp/kind/logs diff --git a/.github/workflows/test_generated.yml b/.github/workflows/test_generated.yml deleted file mode 100644 index 92c3e9b276..0000000000 --- a/.github/workflows/test_generated.yml +++ /dev/null @@ -1,526 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED -# DO NOT EDIT ---- -name: Go -'on': -- push -- pull_request -env: - K8S_VERSION: v1.16.4 - KIND_CLUSTER_NAME: ovn - KIND_INSTALL_INGRESS: true -jobs: - build: - name: Build - runs-on: ubuntu-latest - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Build - run: | - set -x - go get golang.org/x/tools/cmd/cover - pushd go-controller - make - make windows - COVERALLS=1 make check - popd - pushd dist/images - if [ -n "$(git diff --stat origin/master.. | grep dist/images/Dockerfile)" ]; then make all; fi - popd - - # Combine separate code coverage profiles into one - go get github.com/modocache/gover - gover go-controller/ gover.coverprofile - - # Convert coverage profile to LCOV format for coveralls github action - go get github.com/jandelgado/gcov2lcov - mkdir -p src/github.com/ovn-org - ln -sf $(pwd) src/github.com/ovn-org/ovn-kubernetes - GOPATH=$(pwd) gcov2lcov -infile gover.coverprofile -outfile coverage.lcov - - name: Submit code coverage to Coveralls - uses: coverallsapp/github-action@master - with: - github-token: "${{ secrets.GITHUB_TOKEN }}" - path-to-lcov: coverage.lcov - verify: - name: Verify - runs-on: ubuntu-latest - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Verify - run: | - pushd go-controller - make gofmt - make install.tools - make lint - popd - k8s: - name: Build k8s - runs-on: ubuntu-latest - steps: - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - if: steps.cache-k8s.outputs.cache-hit != 'true' - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - e2e-shard-n: - name: e2e-kind-ovn-shard-n-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: - matrix: - config: - - ha: 'false' - name: noHA - - ha: 'true' - name: HA - env: - KIND_HA: "${{matrix.config.ha}}" - JOB_NAME: e2e-shard-n-${{ matrix.config.name }} - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - name: e2e-kind-ovn-shard-n - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - - # all tests that don't have P as their sixth letter after the N - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Nn](.{6}[^Pp].*|.{0,6}$) --disable-log-dump=false --ginkgo.skip=Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' - - name: Export logs - if: always() - run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME} - /tmp/kind/logs\n" - - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: "/tmp/kind/logs" - e2e-shard-np: - name: e2e-kind-ovn-shard-np-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: - matrix: - config: - - ha: 'false' - name: noHA - - ha: 'true' - name: HA - env: - JOB_NAME: e2e-shard-np-${{ matrix.config.name }} - KIND_HA: "${{matrix.config.ha}}" - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - name: e2e-kind-ovn-shard-np - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - # all tests that have P as the sixth letter after the N - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Nn].{6}[Pp].*$ --disable-log-dump=false --ginkgo.skip=Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' - - name: Export logs - if: always() - run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME} - /tmp/kind/logs\n" - - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: "/tmp/kind/logs" - e2e-shard-s: - name: e2e-kind-ovn-shard-s-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: - matrix: - config: - - ha: 'false' - name: noHA - - ha: 'true' - name: HA - env: - JOB_NAME: e2e-shard-s-${{ matrix.config.name }} - KIND_HA: "${{matrix.config.ha}}" - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - name: e2e-kind-ovn-shard-s - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[Ss].* --disable-log-dump=false --ginkgo.skip=Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' - - name: Export logs - if: always() - run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME} - /tmp/kind/logs\n" - - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: "/tmp/kind/logs" - e2e-shard-other: - name: e2e-kind-ovn-shard-other-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: - matrix: - config: - - ha: 'false' - name: noHA - - ha: 'true' - name: HA - env: - JOB_NAME: e2e-shard-other-${{ matrix.config.name }} - KIND_HA: "${{matrix.config.ha}}" - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - name: e2e-kind-ovn-shard-other - run: | - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - kubetest --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--num-nodes=3 --ginkgo.focus=\[sig-network\]\s[^NnSs].* --disable-log-dump=false --ginkgo.skip=Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' - - name: Export logs - if: always() - run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME} - /tmp/kind/logs\n" - - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: "/tmp/kind/logs" - e2e-control-plane: - name: e2e-control-plane-${{ matrix.config.name }} - runs-on: ubuntu-latest - needs: k8s - strategy: - matrix: - config: - - ha: 'false' - name: noHA - - ha: 'true' - name: HA - env: - JOB_NAME: e2e-control-plane-${{ matrix.config.name }} - KIND_HA: "${{matrix.config.ha}}" - steps: - - name: Free up disk space - run: sudo eatmydata apt-get remove --auto-remove -y aspnetcore-* dotnet-* libmono-* - mono-* msbuild php-* php7* ghc-* zulu-* - - name: Set up Go 1.12 - uses: actions/setup-go@v1 - with: - go-version: 1.12 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) - echo "::set-env name=GOPATH::$GOPATH" - export PATH=$GOPATH/bin:$PATH - echo "::add-path::$GOPATH/bin" - - name: Cache Kubernetes - id: cache-k8s - uses: actions/cache@v1 - with: - path: "${{ env.GOPATH }}/src/k8s.io/kubernetes/" - key: k8s-go-2-${{ env.K8S_VERSION }} - - name: Build and install Kubernetes - if: steps.cache-k8s.outputs.cache-hit != 'true' - run: | - set -x - git clone --single-branch --branch $K8S_VERSION https://github.com/kubernetes/kubernetes.git $GOPATH/src/k8s.io/kubernetes/ - pushd $GOPATH/src/k8s.io/kubernetes/ - make WHAT="test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo cmd/kubectl" - rm -rf .git - - name: kind setup - run: | - set -x - - export GO111MODULE="on" - mkdir -p $GOPATH/bin - curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest - chmod +x $GOPATH/bin/kubetest - - pushd $GOPATH/src/k8s.io/kubernetes/ - sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl - popd - - GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 - pushd contrib - ./kind.sh - popd - - name: e2e-control-plane - run: | - set -x - pushd test/e2e - go mod download - popd - pushd $GOPATH/src/k8s.io/kubernetes/ - export KUBERNETES_CONFORMANCE_TEST=y - export KUBECONFIG=${HOME}/admin.conf - export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane - export NODE_NAMES=${MASTER_NAME} - - sed -E -i 's/"\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}" "\$\{e2e_test\}"/pushd \$GITHUB_WORKSPACE\/test\/e2e\nGO111MODULE=on "\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}"/' hack/ginkgo-e2e.sh - - kubetest --provider=local --deployment=kind --kind-cluster-name=kind-ovn --test --test_args='--num-nodes=3 --disable-log-dump=false' - - name: Export logs - if: always() - run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME} - /tmp/kind/logs\n" - - name: Upload logs - if: always() - uses: actions/upload-artifact@v1 - with: - name: kind-logs-${{ github.run_id }}-${{ env.JOB_NAME }} - path: "/tmp/kind/logs" diff --git a/test/Makefile b/test/Makefile new file mode 100644 index 0000000000..5bdcae579b --- /dev/null +++ b/test/Makefile @@ -0,0 +1,11 @@ +.PHONY: install-kind +install-kind: + ./scripts/install-kind.sh + +.PHONY: e2e-kind-ovn-shard-% +e2e-kind-ovn-shard-%: + ./scripts/e2e-kind.sh $@ + +.PHONY: e2e-control-plane +e2e-control-plane: + ./scripts/e2e-cp.sh diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh new file mode 100755 index 0000000000..30924740f7 --- /dev/null +++ b/test/scripts/e2e-cp.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -ex + +pushd e2e +go mod download +popd + +export KUBERNETES_CONFORMANCE_TEST=y +export KUBECONFIG=${HOME}/admin.conf +export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane +export NODE_NAMES=${MASTER_NAME} +export KIND_INSTALL_INGRESS=${KIND_INSTALL_INGRESS} + +sed -E -i 's/"\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}" "\$\{e2e_test\}"/pushd \$GITHUB_WORKSPACE\/test\/e2e\nGO111MODULE=on "\$\{ginkgo\}" "\$\{ginkgo_args\[\@\]\:\+\$\{ginkgo_args\[\@\]\}\}"/' ${GOPATH}/src/k8s.io/kubernetes/hack/ginkgo-e2e.sh + +pushd ${GOPATH}/src/k8s.io/kubernetes +kubetest --provider=local --deployment=kind --kind-cluster-name=kind-ovn --test --test_args='--disable-log-dump=false' +popd diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh new file mode 100755 index 0000000000..cae9881f80 --- /dev/null +++ b/test/scripts/e2e-kind.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -ex + +SHARD=$1 + +pushd $GOPATH/src/k8s.io/kubernetes/ +export KUBERNETES_CONFORMANCE_TEST=y +export KUBECONFIG=${HOME}/admin.conf +export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane +export NODE_NAMES=${MASTER_NAME} + +SKIPPED_TESTS='Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' +GINKGO_ARGS="--ginkgo.skip=${SKIPPED_TESTS} --disable-log-dump=false" + +case "$SHARD" in + e2e-kind-ovn-shard-n) + # all tests that don't have P as their sixth letter after the N + GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Nn](.{6}[^Pp].*|.{0,6}$)' + ;; + e2e-kind-ovn-shard-np) + # all tests that have P as the sixth letter after the N + GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Nn].{6}[Pp].*$' + ;; + e2e-kind-ovn-shard-s) + GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Ss].*' + ;; + e2e-kind-ovn-shard-other) + GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[^NnSs].*' + ;; + *) + echo "unknown shard" + exit 1 + ;; +esac +kubetest \ + --provider=local \ + --deployment=kind \ + --kind-cluster-name=${KIND_CLUSTER_NAME} \ + --test \ + --test_args="${GINKGO_ARGS}" + diff --git a/test/scripts/install-kind.sh b/test/scripts/install-kind.sh new file mode 100755 index 0000000000..efa7ac82da --- /dev/null +++ b/test/scripts/install-kind.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -ex + +export GO111MODULE="on" +mkdir -p $GOPATH/bin +curl -fs https://chunk.io/trozet/ba750701d0af4e2b94b249ab9de27b50 -o $GOPATH/bin/kubetest +chmod +x $GOPATH/bin/kubetest + +pushd $GOPATH/src/k8s.io/kubernetes/ +sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl +popd + +GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 +pushd ../contrib +./kind.sh +popd From d6706143715fcd2f8a7fbd4d52ac43e8c070d816 Mon Sep 17 00:00:00 2001 From: Dave Tucker Date: Mon, 6 Apr 2020 17:46:30 +0100 Subject: [PATCH 06/27] Add HA testing back in to the matrix This commit adds the HA testing back in to the matrix. It was easier to add this in after the ruby was removed than to rebase. Signed-off-by: Dave Tucker --- .github/workflows/test.yml | 18 ++++++++++++------ test/scripts/e2e-kind.sh | 2 +- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1a508304e2..d63e120a95 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -115,14 +115,20 @@ jobs: fail-fast: false matrix: target: - - e2e-kind-ovn-shard-n - - e2e-kind-ovn-shard-np - - e2e-kind-ovn-shard-s - - e2e-kind-ovn-shard-other - - e2e-control-plane + - e2e-kind-ovn-shard-n + - e2e-kind-ovn-shard-np + - e2e-kind-ovn-shard-s + - e2e-kind-ovn-shard-other + - e2e-control-plane + ha: + - enabled: "true" + name: "HA" + - enabled: "false" + name: "noHA" needs: k8s env: - JOB_NAME: "${{ matrix.target }}" + JOB_NAME: "${{ matrix.target }}-${{ matrix.ha.name }}" + KIND_HA: "${{ matrix.ha.enabled }}" steps: - name: Free up disk space diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh index cae9881f80..b73074dd55 100755 --- a/test/scripts/e2e-kind.sh +++ b/test/scripts/e2e-kind.sh @@ -11,7 +11,7 @@ export MASTER_NAME=${KIND_CLUSTER_NAME}-control-plane export NODE_NAMES=${MASTER_NAME} SKIPPED_TESTS='Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:IPv6DualStackAlphaFeature\]|NetworkPolicy\sbetween\sserver\sand\sclient.+(ingress\saccess|multiple\segress\spolicies|allow\segress\saccess)|\[Feature:NoSNAT\]|Services.+(ESIPP|cleanup\sfinalizer|session\saffinity)|\[Feature:Networking-IPv6\]|\[Feature:Federation\]|configMap\snameserver|ClusterDns\s\[Feature:Example\]|(Namespace|Pod)Selector\s\[Feature:NetworkPolicy\]|kube-proxy|should\sset\sTCP\sCLOSE_WAIT\stimeout' -GINKGO_ARGS="--ginkgo.skip=${SKIPPED_TESTS} --disable-log-dump=false" +GINKGO_ARGS="--num-nodes=3 --ginkgo.skip=${SKIPPED_TESTS} --disable-log-dump=false" case "$SHARD" in e2e-kind-ovn-shard-n) From 6dccf3a079d96c1c4a8ed1cfcb0660e19599cf51 Mon Sep 17 00:00:00 2001 From: Brent Salisbury Date: Tue, 7 Apr 2020 14:05:09 -0400 Subject: [PATCH 07/27] Add hybrid-sdn support to KIND Run using: OVN_HYBRID_OVERLAY_ENABLE=true OVN_HYBRID_OVERLAY_NET_CIDR= pushd contrib/; ./kind.sh Signed-off-by: Brent Salisbury --- dist/images/daemonset.sh | 8 ++++++++ dist/templates/ovnkube-master.yaml.j2 | 4 ++++ dist/templates/ovnkube-node.yaml.j2 | 4 ++++ 3 files changed, 16 insertions(+) diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 5462e2624a..c2e0a1d996 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -157,6 +157,10 @@ ovn_loglevel_controller=${OVN_LOGLEVEL_CONTROLLER:-"-vconsole:info"} echo "ovn_loglevel_controller: ${ovn_loglevel_controller}" ovn_loglevel_nbctld=${OVN_LOGLEVEL_NBCTLD:-"-vconsole:info"} echo "ovn_loglevel_nbctld: ${ovn_loglevel_nbctld}" +ovn_hybrid_overlay_enable=${OVN_HYBRID_OVERLAY_ENABLE} +echo "ovn_hybrid_overlay_enable: ${ovn_hybrid_overlay_enable}" +ovn_hybrid_overlay_net_cidr=${OVN_HYBRID_OVERLAY_NET_CIDR} +echo "ovn_hybrid_overlay_net_cidr: ${ovn_hybrid_overlay_net_cidr}" ovn_image=${image} \ ovn_image_pull_policy=${policy} \ @@ -165,6 +169,8 @@ ovn_image=${image} \ ovn_gateway_opts=${ovn_gateway_opts} \ ovnkube_node_loglevel=${node_loglevel} \ ovn_loglevel_controller=${ovn_loglevel_controller} \ + ovn_hybrid_overlay_net_cidr=${ovn_hybrid_overlay_net_cidr} \ + ovn_hybrid_overlay_enable=${ovn_hybrid_overlay_enable} \ j2 ../templates/ovnkube-node.yaml.j2 -o ../yaml/ovnkube-node.yaml ovn_image=${image} \ @@ -172,6 +178,8 @@ ovn_image=${image} \ ovnkube_master_loglevel=${master_loglevel} \ ovn_loglevel_northd=${ovn_loglevel_northd} \ ovn_loglevel_nbctld=${ovn_loglevel_nbctld} \ + ovn_hybrid_overlay_net_cidr=${ovn_hybrid_overlay_net_cidr} \ + ovn_hybrid_overlay_enable=${ovn_hybrid_overlay_enable} \ j2 ../templates/ovnkube-master.yaml.j2 -o ../yaml/ovnkube-master.yaml ovn_image=${image} \ diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index e855429fd6..f1f5fd2a19 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -214,6 +214,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OVN_HYBRID_OVERLAY_ENABLE + value: "{{ ovn_hybrid_overlay_enable }}" + - name: OVN_HYBRID_OVERLAY_NET_CIDR + value: "{{ ovn_hybrid_overlay_net_cidr }}" # end of container volumes: diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 02f26da7b8..891259af96 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -228,6 +228,10 @@ spec: value: "{{ ovn_gateway_mode }}" - name: OVN_GATEWAY_OPTS value: "{{ ovn_gateway_opts }}" + - name: OVN_HYBRID_OVERLAY_ENABLE + value: "{{ ovn_hybrid_overlay_enable }}" + - name: OVN_HYBRID_OVERLAY_NET_CIDR + value: "{{ ovn_hybrid_overlay_net_cidr }}" lifecycle: preStop: From 8b5356c5ba30d074e603fe853c48bd27ad9d201a Mon Sep 17 00:00:00 2001 From: Andrew Sun Date: Tue, 7 Apr 2020 15:38:21 -0400 Subject: [PATCH 08/27] Remove timer Signed-off-by: Andrew Sun --- test/e2e/e2e_test.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 95cde644ed..f7a08f4404 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -5,7 +5,6 @@ import ( "net/http" "strconv" "strings" - "time" "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo" @@ -102,9 +101,7 @@ var _ = Describe("e2e control plane", func() { go checkContinuousConnectivity(f, "", "connectivity-test-continuous", "8.8.8.8", 53, 30, podChan, errChan) testPod := <-podChan - framework.Logf("Test pod running on %q, waiting a few seconds", testPod.Spec.NodeName) - - time.Sleep(10 * time.Second) + framework.Logf("Test pod running on %q", testPod.Spec.NodeName) podClient := f.ClientSet.CoreV1().Pods("ovn-kubernetes") @@ -131,9 +128,7 @@ var _ = Describe("e2e control plane", func() { go checkContinuousConnectivity(f, "", "connectivity-test-continuous", "8.8.8.8", 53, 30, podChan, errChan) testPod := <-podChan - framework.Logf("Test pod running on %q, waiting a few seconds", testPod.Spec.NodeName) - - time.Sleep(10 * time.Second) + framework.Logf("Test pod running on %q", testPod.Spec.NodeName) podClient := f.ClientSet.CoreV1().Pods("ovn-kubernetes") From 3baf3bea450b6c128db7532c721886c495e93b44 Mon Sep 17 00:00:00 2001 From: Pardhakeswar Pacha Date: Tue, 7 Apr 2020 13:23:12 -0400 Subject: [PATCH 09/27] Obtaining Node IP's from status.Host IP instead of using DNS In Ovnkube.sh, currently Node IP's are obtained using DNS. Instead, Pod spec (status.HOST IP) is used to obtain the Node IP's. Signed-off-by: Pardhakeswar Pacha --- dist/images/ovndb-raft-functions.sh | 7 +++---- dist/images/ovnkube.sh | 4 ++-- dist/templates/ovnkube-db-raft.yaml.j2 | 8 ++++++++ dist/templates/ovnkube-db-vip.yaml.j2 | 4 ++++ dist/templates/ovnkube-db.yaml.j2 | 8 ++++++++ 5 files changed, 25 insertions(+), 6 deletions(-) diff --git a/dist/images/ovndb-raft-functions.sh b/dist/images/ovndb-raft-functions.sh index 20e215cc19..66f3d8c4b0 100644 --- a/dist/images/ovndb-raft-functions.sh +++ b/dist/images/ovndb-raft-functions.sh @@ -194,8 +194,7 @@ ovsdb-raft() { rm -f ${ovn_db_pidfile} verify-ovsdb-raft - local_ip=$(getent ahostsv4 $(hostname) | grep -v "^127\." | head -1 | awk '{ print $1 }') - if [[ ${local_ip} == "" ]]; then + if [[ ${ovn_db_host} == "" ]] ; then echo "failed to retrieve the IP address of the host $(hostname). Exiting..." exit 1 fi @@ -207,7 +206,7 @@ ovsdb-raft() { if [[ "${POD_NAME}" == "ovnkube-db-0" ]]; then run_as_ovs_user_if_needed \ ${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \ - --db-${db}-cluster-local-addr=${local_ip} \ + --db-${db}-cluster-local-addr=${ovn_db_host} \ --db-${db}-cluster-local-port=${raft_port} \ --ovn-${db}-log="${ovn_log_db}" & else @@ -217,7 +216,7 @@ ovsdb-raft() { fi run_as_ovs_user_if_needed \ ${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \ - --db-${db}-cluster-local-addr=${local_ip} --db-${db}-cluster-remote-addr=${init_ip} \ + --db-${db}-cluster-local-addr=${ovn_db_host} --db-${db}-cluster-remote-addr=${init_ip} \ --db-${db}-cluster-local-port=${raft_port} --db-${db}-cluster-remote-port=${raft_port} \ --ovn-${db}-log="${ovn_log_db}" & fi diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 4c848d24ce..978df331fd 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -123,8 +123,8 @@ mtu=${OVN_MTU:-1400} ovn_kubernetes_namespace=${OVN_KUBERNETES_NAMESPACE:-ovn-kubernetes} # host on which ovnkube-db POD is running and this POD contains both -# OVN NB and SB DB running in their own container. Ignore IPs in loopback range (127.0.0.0/8) -ovn_db_host=$(getent ahostsv4 $(hostname) | grep -v "^127\." | head -1 | awk '{ print $1 }') +# OVN NB and SB DB running in their own container. +ovn_db_host=${K8S_NODE_IP:-""} # OVN_NB_PORT - ovn north db port (default 6641) ovn_nb_port=${OVN_NB_PORT:-6641} diff --git a/dist/templates/ovnkube-db-raft.yaml.j2 b/dist/templates/ovnkube-db-raft.yaml.j2 index 40820a2d57..fdc137d490 100644 --- a/dist/templates/ovnkube-db-raft.yaml.j2 +++ b/dist/templates/ovnkube-db-raft.yaml.j2 @@ -151,6 +151,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP # end of container # sb-ovsdb - v3 @@ -211,6 +215,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP # end of container # db-metrics-exporter - v3 diff --git a/dist/templates/ovnkube-db-vip.yaml.j2 b/dist/templates/ovnkube-db-vip.yaml.j2 index 9331e37dc5..aac0ae2065 100644 --- a/dist/templates/ovnkube-db-vip.yaml.j2 +++ b/dist/templates/ovnkube-db-vip.yaml.j2 @@ -129,6 +129,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: OVN_DB_VIP value: "{{ ovn_db_vip }}" # end of container diff --git a/dist/templates/ovnkube-db.yaml.j2 b/dist/templates/ovnkube-db.yaml.j2 index 0c218fa75b..e5328d8e8d 100644 --- a/dist/templates/ovnkube-db.yaml.j2 +++ b/dist/templates/ovnkube-db.yaml.j2 @@ -112,6 +112,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db"] @@ -168,6 +172,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db"] From 02ed57dcf713506343116b796be9a27eb785602d Mon Sep 17 00:00:00 2001 From: Pardhakeswar Pacha Date: Tue, 7 Apr 2020 20:11:52 -0400 Subject: [PATCH 10/27] Renaming loglevel capture variable names in ovnkube.sh & yaml files Currently, ovn_log_northd variable captures the loglevel which should capture the log location. Renaming ovn_log_northd -> ovn_loglevel_northd and similar renaming is used for other variable names (ovn_log_nb, ovn_log_sb, ovn_log_controller). Similarly in the YAML files, OVN_LOG_NB and other log level variables(OVN_LOG_SB, OVN_LOG_NBCTLD) is used for for capturing the loglevel. Renaming them to OVN_LOGLEVEL_NB and others to similar nomenclature. Signed-off-by: Pardhakeswar Pacha --- dist/images/ovnkube.sh | 36 +++++++++++++------------- dist/templates/ovnkube-db-raft.yaml.j2 | 4 +-- dist/templates/ovnkube-db-vip.yaml.j2 | 2 +- dist/templates/ovnkube-db.yaml.j2 | 4 +-- dist/templates/ovnkube-master.yaml.j2 | 4 +-- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 978df331fd..93ded3ff02 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -50,11 +50,11 @@ fi # OVN_GATEWAY_MODE - the gateway mode (shared or local) - v3 # OVN_GATEWAY_OPTS - the options for the ovn gateway # OVNKUBE_LOGLEVEL - log level for ovnkube (0..5, default 4) - v3 -# OVN_LOG_NORTHD - log level (ovn-ctl default: -vconsole:emer -vsyslog:err -vfile:info) - v3 -# OVN_LOG_NB - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 -# OVN_LOG_SB - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 -# OVN_LOG_CONTROLLER - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 -# OVN_LOG_NBCTLD - log level (ovn-ctl default: -vconsole:off -vfile:info) +# OVN_LOGLEVEL_NORTHD - log level (ovn-ctl default: -vconsole:emer -vsyslog:err -vfile:info) - v3 +# OVN_LOGLEVEL_NB - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 +# OVN_LOGLEVEL_SB - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 +# OVN_LOGLEVEL_CONTROLLER - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 +# OVN_LOGLEVEL_NBCTLD - log level (ovn-ctl default: -vconsole:off -vfile:info) - v3 # OVN_NB_PORT - ovn north db port (default 6641) # OVN_SB_PORT - ovn south db port (default 6642) # OVN_NB_RAFT_PORT - ovn north db raft port (default 6643) @@ -68,11 +68,11 @@ fi cmd=${1:-""} # ovn daemon log levels -ovn_log_northd=${OVN_LOG_NORTHD:-"-vconsole:info"} -ovn_log_nb=${OVN_LOG_NB:-"-vconsole:info"} -ovn_log_sb=${OVN_LOG_SB:-"-vconsole:info"} -ovn_log_controller=${OVN_LOG_CONTROLLER:-"-vconsole:info"} -ovn_log_nbctld=${OVN_LOG_NBCTLD:-"-vfile:info"} +ovn_loglevel_northd=${OVN_LOGLEVEL_NORTHD:-"-vconsole:info"} +ovn_loglevel_nb=${OVN_LOGLEVEL_NB:-"-vconsole:info"} +ovn_loglevel_sb=${OVN_LOGLEVEL_SB:-"-vconsole:info"} +ovn_loglevel_controller=${OVN_LOGLEVEL_CONTROLLER:-"-vconsole:info"} +ovn_loglevel_nbctld= ${OVN_LOGLEVEL_NBCTLD:"-vconsole:info"} ovnkubelogdir=/var/log/ovn-kubernetes @@ -430,7 +430,7 @@ display_env() { echo OVN_NORTHD_OPTS ${ovn_northd_opts} echo OVN_SOUTH ${ovn_sbdb} echo OVN_CONTROLLER_OPTS ${ovn_controller_opts} - echo OVN_LOG_CONTROLLER ${ovn_log_controller} + echo OVN_LOGLEVEL_CONTROLLER ${ovn_loglevel_controller} echo OVN_GATEWAY_MODE ${ovn_gateway_mode} echo OVN_GATEWAY_OPTS ${ovn_gateway_opts} echo OVN_NET_CIDR ${net_cidr} @@ -610,7 +610,7 @@ nb-ovsdb() { echo "=============== run nb_ovsdb ========== MASTER ONLY" run_as_ovs_user_if_needed \ ${OVNCTL_PATH} run_nb_ovsdb --no-monitor \ - --ovn-nb-log="${ovn_log_nb}" & + --ovn-nb-log="${ovn_loglevel_nb}" & wait_for_event attempts=3 process_ready ovnnb_db echo "=============== nb-ovsdb ========== RUNNING" @@ -641,7 +641,7 @@ sb-ovsdb() { echo "=============== run sb_ovsdb ========== MASTER ONLY" run_as_ovs_user_if_needed \ ${OVNCTL_PATH} run_sb_ovsdb --no-monitor \ - --ovn-sb-log="${ovn_log_sb}" & + --ovn-sb-log="${ovn_loglevel_sb}" & wait_for_event attempts=3 process_ready ovnsb_db echo "=============== sb-ovsdb ========== RUNNING" @@ -670,7 +670,7 @@ run-ovn-northd() { echo "=============== run_ovn_northd ========== MASTER ONLY" echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb}" echo "ovn_northd_opts=${ovn_northd_opts}" - echo "ovn_log_northd=${ovn_log_northd}" + echo "ovn_loglevel_northd=${ovn_loglevel_northd}" # no monitor (and no detach), start northd which connects to the # ovnkube-db service @@ -680,7 +680,7 @@ run-ovn-northd() { ${OVNCTL_PATH} start_northd \ --no-monitor --ovn-manage-ovsdb=no \ --ovn-northd-nb-db=${ovn_nbdb_i} --ovn-northd-sb-db=${ovn_sbdb_i} \ - --ovn-northd-log="${ovn_log_northd}" \ + --ovn-northd-log="${ovn_loglevel_northd}" \ ${ovn_northd_opts} wait_for_event attempts=3 process_ready ovn-northd @@ -772,7 +772,7 @@ ovn-controller() { run_as_ovs_user_if_needed \ ${OVNCTL_PATH} --no-monitor start_controller \ - --ovn-controller-log="${ovn_log_controller}" \ + --ovn-controller-log="${ovn_loglevel_controller}" \ ${ovn_controller_opts} wait_for_event attempts=3 process_ready ovn-controller @@ -877,10 +877,10 @@ run-nbctld() { wait_for_event ready_to_start_node echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_test ${ovn_nbdb_test}" - echo "ovn_log_nbctld=${ovn_log_nbctld}" + echo "ovn_loglevel_nbctld=${ovn_loglevel_nbctld}" # use unix socket - /usr/bin/ovn-nbctl ${ovn_log_nbctld} --pidfile --db=${ovn_nbdb_test} --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach + /usr/bin/ovn-nbctl ${ovn_loglevel_nbctld} --pidfile --db=${ovn_nbdb_test} --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach wait_for_event attempts=3 process_ready ovn-nbctl echo "=============== run_ovn_nbctl ========== RUNNING" diff --git a/dist/templates/ovnkube-db-raft.yaml.j2 b/dist/templates/ovnkube-db-raft.yaml.j2 index fdc137d490..001997f25c 100644 --- a/dist/templates/ovnkube-db-raft.yaml.j2 +++ b/dist/templates/ovnkube-db-raft.yaml.j2 @@ -136,7 +136,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_NB + - name: OVN_LOGLEVEL_NB value: "{{ ovn_loglevel_nb }}" - name: K8S_APISERVER valueFrom: @@ -200,7 +200,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_SB + - name: OVN_LOGLEVEL_SB value: "{{ ovn_loglevel_sb }}" - name: K8S_APISERVER valueFrom: diff --git a/dist/templates/ovnkube-db-vip.yaml.j2 b/dist/templates/ovnkube-db-vip.yaml.j2 index aac0ae2065..556d9ebf50 100644 --- a/dist/templates/ovnkube-db-vip.yaml.j2 +++ b/dist/templates/ovnkube-db-vip.yaml.j2 @@ -118,7 +118,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_NB + - name: OVN_LOGLEVEL_NB value: "{{ ovn_loglevel_nb }}" - name: K8S_APISERVER valueFrom: diff --git a/dist/templates/ovnkube-db.yaml.j2 b/dist/templates/ovnkube-db.yaml.j2 index e5328d8e8d..b12b0dca61 100644 --- a/dist/templates/ovnkube-db.yaml.j2 +++ b/dist/templates/ovnkube-db.yaml.j2 @@ -101,7 +101,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_NB + - name: OVN_LOGLEVEL_NB value: "{{ ovn_loglevel_nb }}" - name: K8S_APISERVER valueFrom: @@ -161,7 +161,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_SB + - name: OVN_LOGLEVEL_SB value: "{{ ovn_loglevel_sb }}" - name: K8S_APISERVER valueFrom: diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index e855429fd6..29ed767e28 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -98,7 +98,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_NORTHD + - name: OVN_LOGLEVEL_NORTHD value: "{{ ovn_loglevel_northd }}" - name: K8S_APISERVER valueFrom: @@ -144,7 +144,7 @@ spec: env: - name: OVN_DAEMONSET_VERSION value: "3" - - name: OVN_LOG_NBCTLD + - name: OVN_LOGLEVEL_NBCTLD value: "{{ ovn_loglevel_nbctld }}" - name: K8S_APISERVER valueFrom: From f5d93f285c3f197f933d163f6e2f83f4404d2711 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Mon, 16 Mar 2020 19:27:15 -0400 Subject: [PATCH 11/27] util: move pod annotation code into its own file, add tests Signed-off-by: Dan Winship --- go-controller/pkg/util/pod_annotation.go | 140 ++++++++++++++++++ go-controller/pkg/util/pod_annotation_test.go | 98 ++++++++++++ go-controller/pkg/util/util.go | 133 +---------------- 3 files changed, 239 insertions(+), 132 deletions(-) create mode 100644 go-controller/pkg/util/pod_annotation.go create mode 100644 go-controller/pkg/util/pod_annotation_test.go diff --git a/go-controller/pkg/util/pod_annotation.go b/go-controller/pkg/util/pod_annotation.go new file mode 100644 index 0000000000..db3dd068ad --- /dev/null +++ b/go-controller/pkg/util/pod_annotation.go @@ -0,0 +1,140 @@ +package util + +import ( + "encoding/json" + "fmt" + "net" + + "k8s.io/klog" +) + +const ( + // OvnPodAnnotationName is the constant string representing the POD annotation key + OvnPodAnnotationName = "k8s.ovn.org/pod-networks" + // OvnPodDefaultNetwork is the constant string representing the first OVN interface to the Pod + OvnPodDefaultNetwork = "default" +) + +// PodAnnotation describes the pod's assigned network details +type PodAnnotation struct { + // IP is the pod's assigned IP address and prefix + IP *net.IPNet + // MAC is the pod's assigned MAC address + MAC net.HardwareAddr + // GW is the pod's gateway IP address + GW net.IP + // Routes are routes to add to the pod's network namespace + Routes []PodRoute +} + +// PodRoute describes any routes to be added to the pod's network namespace +type PodRoute struct { + // Dest is the route destination + Dest *net.IPNet + // NextHop is the IP address of the next hop for traffic destined for Dest + NextHop net.IP +} + +// Internal struct used to correctly marshal IPs to JSON +type podAnnotation struct { + IP string `json:"ip_address"` + MAC string `json:"mac_address"` + GW string `json:"gateway_ip"` + Routes []podRoute `json:"routes,omitempty"` +} + +// Internal struct used to correctly marshal IPs to JSON +type podRoute struct { + Dest string `json:"dest"` + NextHop string `json:"nextHop"` +} + +// MarshalPodAnnotation returns a JSON-formatted annotation describing the pod's +// network details +func MarshalPodAnnotation(podInfo *PodAnnotation) (map[string]string, error) { + var gw string + if podInfo.GW != nil { + gw = podInfo.GW.String() + } + pa := podAnnotation{ + IP: podInfo.IP.String(), + MAC: podInfo.MAC.String(), + GW: gw, + } + for _, r := range podInfo.Routes { + var nh string + if r.NextHop != nil { + nh = r.NextHop.String() + } + pa.Routes = append(pa.Routes, podRoute{ + Dest: r.Dest.String(), + NextHop: nh, + }) + } + + podNetworks := map[string]podAnnotation{ + OvnPodDefaultNetwork: pa, + } + bytes, err := json.Marshal(podNetworks) + if err != nil { + klog.Errorf("failed marshaling podNetworks map %v", podNetworks) + return nil, err + } + return map[string]string{ + OvnPodAnnotationName: string(bytes), + }, nil +} + +// UnmarshalPodAnnotation returns a the unmarshalled pod annotation +func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, error) { + ovnAnnotation, ok := annotations[OvnPodAnnotationName] + if !ok { + return nil, fmt.Errorf("could not find OVN pod annotation in %v", annotations) + } + + podNetworks := make(map[string]podAnnotation) + if err := json.Unmarshal([]byte(ovnAnnotation), &podNetworks); err != nil { + return nil, fmt.Errorf("failed to unmarshal ovn pod annotation %q: %v", + ovnAnnotation, err) + } + tempA := podNetworks[OvnPodDefaultNetwork] + a := &tempA + + podAnnotation := &PodAnnotation{} + // Minimal validation + ip, ipnet, err := net.ParseCIDR(a.IP) + if err != nil { + return nil, fmt.Errorf("failed to parse pod IP %q: %v", a.IP, err) + } + ipnet.IP = ip + podAnnotation.IP = ipnet + + podAnnotation.MAC, err = net.ParseMAC(a.MAC) + if err != nil { + return nil, fmt.Errorf("failed to parse pod MAC %q: %v", a.MAC, err) + } + + if a.GW != "" { + podAnnotation.GW = net.ParseIP(a.GW) + if podAnnotation.GW == nil { + return nil, fmt.Errorf("failed to parse pod gateway %q", a.GW) + } + } + + for _, r := range a.Routes { + route := PodRoute{} + _, route.Dest, err = net.ParseCIDR(r.Dest) + if err != nil { + return nil, fmt.Errorf("failed to parse pod route dest %q: %v", r.Dest, err) + } + if r.NextHop != "" { + route.NextHop = net.ParseIP(r.NextHop) + if route.NextHop == nil { + return nil, fmt.Errorf("failed to parse pod route next hop %q", r.NextHop) + } + } + podAnnotation.Routes = append(podAnnotation.Routes, route) + } + + return podAnnotation, nil +} diff --git a/go-controller/pkg/util/pod_annotation_test.go b/go-controller/pkg/util/pod_annotation_test.go new file mode 100644 index 0000000000..f4ad16d7f8 --- /dev/null +++ b/go-controller/pkg/util/pod_annotation_test.go @@ -0,0 +1,98 @@ +package util + +import ( + "net" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Pod annotation tests", func() { + It("marshals network info to pod annotations", func() { + type testcase struct { + name string + in *PodAnnotation + out map[string]string + } + + testcases := []testcase{ + { + name: "Single-stack IPv4", + in: &PodAnnotation{ + IP: mustParseCIDRAddress("192.168.0.5/24"), + MAC: mustParseMAC("0A:58:FD:98:00:01"), + GW: net.ParseIP("192.168.0.1"), + }, + out: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":"192.168.0.1"}}`, + }, + }, + { + name: "No GW", + in: &PodAnnotation{ + IP: mustParseCIDRAddress("192.168.0.5/24"), + MAC: mustParseMAC("0A:58:FD:98:00:01"), + }, + out: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":""}}`, + }, + }, + { + name: "Routes", + in: &PodAnnotation{ + IP: mustParseCIDRAddress("192.168.0.5/24"), + MAC: mustParseMAC("0A:58:FD:98:00:01"), + GW: net.ParseIP("192.168.0.1"), + Routes: []PodRoute{ + { + Dest: mustParseCIDR("192.168.1.0/24"), + NextHop: net.ParseIP("192.168.1.1"), + }, + }, + }, + out: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":"192.168.0.1","routes":[{"dest":"192.168.1.0/24","nextHop":"192.168.1.1"}]}}`, + }, + }, + { + name: "Single-stack IPv6", + in: &PodAnnotation{ + IP: mustParseCIDRAddress("fd01::1234/64"), + MAC: mustParseMAC("0A:58:FD:98:00:01"), + GW: net.ParseIP("fd01::1"), + }, + out: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"fd01::1234/64","mac_address":"0a:58:fd:98:00:01","gateway_ip":"fd01::1"}}`, + }, + }, + } + + for _, tc := range testcases { + marshalled, err := MarshalPodAnnotation(tc.in) + Expect(err).NotTo(HaveOccurred(), "test case %q got unexpected marshalling error", tc.name) + Expect(marshalled).To(Equal(tc.out), "test case %q marshalled to wrong value", tc.name) + unmarshalled, err := UnmarshalPodAnnotation(marshalled) + Expect(err).NotTo(HaveOccurred(), "test case %q got unexpected unmarshalling error", tc.name) + Expect(unmarshalled).To(Equal(tc.in), "test case %q unmarshalled to wrong value", tc.name) + } + }) +}) + +func mustParseCIDRAddress(addr string) *net.IPNet { + ip, subnet, err := net.ParseCIDR(addr) + Expect(err).NotTo(HaveOccurred()) + subnet.IP = ip + return subnet +} + +func mustParseCIDR(cidr string) *net.IPNet { + _, subnet, err := net.ParseCIDR(cidr) + Expect(err).NotTo(HaveOccurred()) + return subnet +} + +func mustParseMAC(mac string) net.HardwareAddr { + parsed, err := net.ParseMAC(mac) + Expect(err).NotTo(HaveOccurred()) + return parsed +} diff --git a/go-controller/pkg/util/util.go b/go-controller/pkg/util/util.go index 4f477fed77..f79d494be8 100644 --- a/go-controller/pkg/util/util.go +++ b/go-controller/pkg/util/util.go @@ -1,7 +1,6 @@ package util import ( - "encoding/json" "fmt" "net" "strings" @@ -11,6 +10,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/urfave/cli" + "k8s.io/klog" ) @@ -117,134 +117,3 @@ func UpdateNodeSwitchExcludeIPs(nodeName string, subnet *net.IPNet) error { } return nil } - -const ( - // OvnPodAnnotationName is the constant string representing the POD annotation key - OvnPodAnnotationName = "k8s.ovn.org/pod-networks" - // OvnPodDefaultNetwork is the constant string representing the first OVN interface to the Pod - OvnPodDefaultNetwork = "default" -) - -// PodAnnotation describes the pod's assigned network details -type PodAnnotation struct { - // IP is the pod's assigned IP address and prefix - IP *net.IPNet - // MAC is the pod's assigned MAC address - MAC net.HardwareAddr - // GW is the pod's gateway IP address - GW net.IP - // Routes are routes to add to the pod's network namespace - Routes []PodRoute -} - -// PodRoute describes any routes to be added to the pod's network namespace -type PodRoute struct { - // Dest is the route destination - Dest *net.IPNet - // NextHop is the IP address of the next hop for traffic destined for Dest - NextHop net.IP -} - -// Internal struct used to correctly marshal IPs to JSON -type podAnnotation struct { - IP string `json:"ip_address"` - MAC string `json:"mac_address"` - GW string `json:"gateway_ip"` - Routes []podRoute `json:"routes,omitempty"` -} - -// Internal struct used to correctly marshal IPs to JSON -type podRoute struct { - Dest string `json:"dest"` - NextHop string `json:"nextHop"` -} - -// MarshalPodAnnotation returns a JSON-formatted annotation describing the pod's -// network details -func MarshalPodAnnotation(podInfo *PodAnnotation) (map[string]string, error) { - var gw string - if podInfo.GW != nil { - gw = podInfo.GW.String() - } - pa := podAnnotation{ - IP: podInfo.IP.String(), - MAC: podInfo.MAC.String(), - GW: gw, - } - for _, r := range podInfo.Routes { - var nh string - if r.NextHop != nil { - nh = r.NextHop.String() - } - pa.Routes = append(pa.Routes, podRoute{ - Dest: r.Dest.String(), - NextHop: nh, - }) - } - - podNetworks := map[string]podAnnotation{ - OvnPodDefaultNetwork: pa, - } - bytes, err := json.Marshal(podNetworks) - if err != nil { - klog.Errorf("failed marshaling podNetworks map %v", podNetworks) - return nil, err - } - return map[string]string{ - OvnPodAnnotationName: string(bytes), - }, nil -} - -// UnmarshalPodAnnotation returns a the unmarshalled pod annotation -func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, error) { - ovnAnnotation, ok := annotations[OvnPodAnnotationName] - if !ok { - return nil, fmt.Errorf("could not find OVN pod annotation in %v", annotations) - } - - podNetworks := make(map[string]podAnnotation) - if err := json.Unmarshal([]byte(ovnAnnotation), &podNetworks); err != nil { - return nil, fmt.Errorf("failed to unmarshal ovn pod annotation %q: %v", - ovnAnnotation, err) - } - tempA := podNetworks[OvnPodDefaultNetwork] - a := &tempA - - podAnnotation := &PodAnnotation{} - // Minimal validation - ip, ipnet, err := net.ParseCIDR(a.IP) - if err != nil { - return nil, fmt.Errorf("failed to parse pod IP %q: %v", a.IP, err) - } - ipnet.IP = ip - podAnnotation.IP = ipnet - - podAnnotation.MAC, err = net.ParseMAC(a.MAC) - if err != nil { - return nil, fmt.Errorf("failed to parse pod MAC %q: %v", a.MAC, err) - } - - if a.GW != "" { - podAnnotation.GW = net.ParseIP(a.GW) - if podAnnotation.GW == nil { - return nil, fmt.Errorf("failed to parse pod gateway %q", a.GW) - } - } - - for _, r := range a.Routes { - route := PodRoute{} - _, route.Dest, err = net.ParseCIDR(r.Dest) - if err != nil { - return nil, fmt.Errorf("failed to parse pod route dest %q: %v", r.Dest, err) - } - if r.NextHop != "" { - route.NextHop = net.ParseIP(r.NextHop) - if route.NextHop == nil { - return nil, fmt.Errorf("failed to parse pod route next hop %q", a.GW) - } - } - podAnnotation.Routes = append(podAnnotation.Routes, route) - } - - return podAnnotation, nil -} From d50a16caa1ca610711a7be7b0337c7031ba37de2 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 19 Mar 2020 12:10:57 -0400 Subject: [PATCH 12/27] Remove some unused pod routing code The CNI code had code to check if there was a default route specified in the "routes" field of the pod annotation and ignore the "gateway_ip" field in that case, but we never actually used that functionality. Remove the code, and sanity-check the routes when marshalling/unmarshalling the pod annotation. Signed-off-by: Dan Winship --- go-controller/pkg/cni/helper_linux.go | 22 ++++++---------------- go-controller/pkg/ovn/pods.go | 5 +---- go-controller/pkg/util/pod_annotation.go | 6 ++++++ 3 files changed, 13 insertions(+), 20 deletions(-) diff --git a/go-controller/pkg/cni/helper_linux.go b/go-controller/pkg/cni/helper_linux.go index 82e8969d13..738e2c90ee 100644 --- a/go-controller/pkg/cni/helper_linux.go +++ b/go-controller/pkg/cni/helper_linux.go @@ -65,25 +65,15 @@ func setupNetwork(link netlink.Link, ifInfo *PodInterfaceInfo) error { return fmt.Errorf("failed to add IP addr %s to %s: %v", ifInfo.IP, link.Attrs().Name, err) } - var foundDefault bool - for _, route := range ifInfo.Routes { - if err := ip.AddRoute(route.Dest, route.NextHop, link); err != nil { - return fmt.Errorf("failed to add pod route %v via %v: %v", route.Dest, route.NextHop, err) - } - - if ones, _ := route.Dest.Mask.Size(); ones == 0 { - foundDefault = true + if ifInfo.GW != nil { + if err := ip.AddRoute(nil, ifInfo.GW, link); err != nil { + return fmt.Errorf("failed to add gateway route: %v", err) } } - if !foundDefault { - // If the pod routes did not include a default route, - // add a "default" default route via the pod's gateway, if - // one exists - if ifInfo.GW != nil { - if err := ip.AddRoute(nil, ifInfo.GW, link); err != nil { - return fmt.Errorf("failed to add gateway route: %v", err) - } + for _, route := range ifInfo.Routes { + if err := ip.AddRoute(route.Dest, route.NextHop, link); err != nil { + return fmt.Errorf("failed to add pod route %v via %v: %v", route.Dest, route.NextHop, err) } } diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 325dcdd214..763206e40b 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -229,14 +229,12 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { var podMac net.HardwareAddr var podCIDR *net.IPNet - var gatewayCIDR *net.IPNet var args []string annotation, err := util.UnmarshalPodAnnotation(pod.Annotations) if err == nil { podMac = annotation.MAC podCIDR = annotation.IP - gatewayCIDR = &net.IPNet{IP: annotation.GW, Mask: annotation.IP.Mask} // Check if the pod's logical switch port already exists. If it // does don't re-add the port to OVN as this will change its @@ -255,8 +253,6 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { "--", "--if-exists", "clear", "logical_switch_port", portName, "dynamic_addresses", ) } else { - gatewayCIDR, _ = util.GetNodeWellKnownAddresses(nodeSubnet) - addresses := "dynamic" networks, err := util.GetPodNetSelAnnotation(pod, util.DefNetworkAnnotation) if err != nil || (networks != nil && len(networks) != 1) { @@ -329,6 +325,7 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { } if annotation == nil { + gatewayCIDR, _ := util.GetNodeWellKnownAddresses(nodeSubnet) routes, gwIP, err := getRoutesGatewayIP(pod, gatewayCIDR) if err != nil { return err diff --git a/go-controller/pkg/util/pod_annotation.go b/go-controller/pkg/util/pod_annotation.go index db3dd068ad..a167369397 100644 --- a/go-controller/pkg/util/pod_annotation.go +++ b/go-controller/pkg/util/pod_annotation.go @@ -62,6 +62,9 @@ func MarshalPodAnnotation(podInfo *PodAnnotation) (map[string]string, error) { GW: gw, } for _, r := range podInfo.Routes { + if r.Dest.IP.IsUnspecified() { + return nil, fmt.Errorf("bad podNetwork data: default route %v should be specified as gateway", r) + } var nh string if r.NextHop != nil { nh = r.NextHop.String() @@ -127,6 +130,9 @@ func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, erro if err != nil { return nil, fmt.Errorf("failed to parse pod route dest %q: %v", r.Dest, err) } + if route.Dest.IP.IsUnspecified() { + return nil, fmt.Errorf("bad podNetwork data: default route %v should be specified as gateway", route) + } if r.NextHop != "" { route.NextHop = net.ParseIP(r.NextHop) if route.NextHop == nil { From 0303efa4de5a134a06b6701679fc185b7efc0ed4 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Mon, 16 Mar 2020 16:34:46 -0400 Subject: [PATCH 13/27] Update pod annotation and CNI shim to allow dual-stack Signed-off-by: Dan Winship --- .../pkg/controller/node_linux.go | 31 +++-- go-controller/pkg/cni/cni.go | 38 ++++-- go-controller/pkg/cni/helper_linux.go | 21 +-- go-controller/pkg/cni/helper_windows.go | 18 ++- go-controller/pkg/ovn/pods.go | 13 +- go-controller/pkg/ovn/pods_test.go | 20 +-- go-controller/pkg/ovn/policy.go | 6 +- go-controller/pkg/util/pod_annotation.go | 126 +++++++++++++----- go-controller/pkg/util/pod_annotation_test.go | 43 ++++-- 9 files changed, 217 insertions(+), 99 deletions(-) diff --git a/go-controller/hybrid-overlay/pkg/controller/node_linux.go b/go-controller/hybrid-overlay/pkg/controller/node_linux.go index 78bfc1c245..573d4e76cc 100644 --- a/go-controller/hybrid-overlay/pkg/controller/node_linux.go +++ b/go-controller/hybrid-overlay/pkg/controller/node_linux.go @@ -55,17 +55,19 @@ func podToCookie(pod *kapi.Pod) string { } func (n *NodeController) addOrUpdatePod(pod *kapi.Pod) error { - podIP, podMAC, err := getPodDetails(pod, n.nodeName) + podIPs, podMAC, err := getPodDetails(pod, n.nodeName) if err != nil { klog.V(5).Infof("cleaning up hybrid overlay pod %s/%s because %v", pod.Namespace, pod.Name, err) return n.deletePod(pod) } cookie := podToCookie(pod) - _, _, err = util.RunOVSOfctl("add-flow", extBridgeName, - fmt.Sprintf("table=10,cookie=0x%s,priority=100,ip,nw_dst=%s,actions=set_field:%s->eth_src,set_field:%s->eth_dst,output:ext", cookie, podIP.IP, n.drMAC, podMAC)) - if err != nil { - return fmt.Errorf("failed to add flows for pod %s/%s: %v", pod.Namespace, pod.Name, err) + for _, podIP := range podIPs { + _, _, err = util.RunOVSOfctl("add-flow", extBridgeName, + fmt.Sprintf("table=10,cookie=0x%s,priority=100,ip,nw_dst=%s,actions=set_field:%s->eth_src,set_field:%s->eth_dst,output:ext", cookie, podIP.IP, n.drMAC, podMAC)) + if err != nil { + return fmt.Errorf("failed to add flows for pod %s/%s: %v", pod.Namespace, pod.Name, err) + } } return nil } @@ -79,7 +81,7 @@ func (n *NodeController) deletePod(pod *kapi.Pod) error { return nil } -func getPodDetails(pod *kapi.Pod, nodeName string) (*net.IPNet, net.HardwareAddr, error) { +func getPodDetails(pod *kapi.Pod, nodeName string) ([]*net.IPNet, net.HardwareAddr, error) { if pod.Spec.NodeName != nodeName { return nil, nil, fmt.Errorf("not scheduled") } @@ -88,14 +90,23 @@ func getPodDetails(pod *kapi.Pod, nodeName string) (*net.IPNet, net.HardwareAddr if err != nil { return nil, nil, err } - return podInfo.IP, podInfo.MAC, nil + return podInfo.IPs, podInfo.MAC, nil } // podChanged returns true if any relevant pod attributes changed func podChanged(pod1 *kapi.Pod, pod2 *kapi.Pod, nodeName string) bool { - podIP1, mac1, _ := getPodDetails(pod1, nodeName) - podIP2, mac2, _ := getPodDetails(pod2, nodeName) - return !reflect.DeepEqual(podIP1, podIP2) || !reflect.DeepEqual(mac1, mac2) + podIPs1, mac1, _ := getPodDetails(pod1, nodeName) + podIPs2, mac2, _ := getPodDetails(pod2, nodeName) + + if len(podIPs1) != len(podIPs2) || !reflect.DeepEqual(mac1, mac2) { + return false + } + for i := range podIPs1 { + if podIPs1[i].String() != podIPs2[i].String() { + return false + } + } + return true } func (n *NodeController) syncPods(pods []interface{}) { diff --git a/go-controller/pkg/cni/cni.go b/go-controller/pkg/cni/cni.go index d775795a7f..492e848ac6 100644 --- a/go-controller/pkg/cni/cni.go +++ b/go-controller/pkg/cni/cni.go @@ -3,6 +3,7 @@ package cni import ( "encoding/json" "fmt" + "net" "net/http" "time" @@ -174,22 +175,33 @@ func (pr *PodRequest) getCNIResult(podInterfaceInfo *PodInterfaceInfo) (*current return nil, fmt.Errorf("failed to configure pod interface: %v", err) } + gateways := map[string]net.IP{} + for _, gw := range podInterfaceInfo.Gateways { + if gw.To4() != nil && gateways["4"] == nil { + gateways["4"] = gw + } else if gw.To4() == nil && gateways["6"] == nil { + gateways["6"] = gw + } + } + // Build the result structure to pass back to the runtime - var ipVersion string - if utilnet.IsIPv6(podInterfaceInfo.IP.IP) { - ipVersion = "6" - } else { - ipVersion = "4" + ips := []*current.IPConfig{} + for _, ipcidr := range podInterfaceInfo.IPs { + ip := ¤t.IPConfig{ + Interface: current.Int(1), + Address: *ipcidr, + } + if utilnet.IsIPv6CIDR(ipcidr) { + ip.Version = "6" + } else { + ip.Version = "4" + } + ip.Gateway = gateways[ip.Version] + ips = append(ips, ip) } + return ¤t.Result{ Interfaces: interfacesArray, - IPs: []*current.IPConfig{ - { - Version: ipVersion, - Interface: current.Int(1), - Address: *podInterfaceInfo.IP, - Gateway: podInterfaceInfo.GW, - }, - }, + IPs: ips, }, nil } diff --git a/go-controller/pkg/cni/helper_linux.go b/go-controller/pkg/cni/helper_linux.go index 738e2c90ee..f54d6454fb 100644 --- a/go-controller/pkg/cni/helper_linux.go +++ b/go-controller/pkg/cni/helper_linux.go @@ -60,17 +60,17 @@ func setupNetwork(link netlink.Link, ifInfo *PodInterfaceInfo) error { if err := netlink.LinkSetHardwareAddr(link, ifInfo.MAC); err != nil { return fmt.Errorf("failed to add mac address %s to %s: %v", ifInfo.MAC, link.Attrs().Name, err) } - addr := &netlink.Addr{IPNet: ifInfo.IP} - if err := netlink.AddrAdd(link, addr); err != nil { - return fmt.Errorf("failed to add IP addr %s to %s: %v", ifInfo.IP, link.Attrs().Name, err) + for _, ip := range ifInfo.IPs { + addr := &netlink.Addr{IPNet: ip} + if err := netlink.AddrAdd(link, addr); err != nil { + return fmt.Errorf("failed to add IP addr %s to %s: %v", ip, link.Attrs().Name, err) + } } - - if ifInfo.GW != nil { - if err := ip.AddRoute(nil, ifInfo.GW, link); err != nil { + for _, gw := range ifInfo.Gateways { + if err := ip.AddRoute(nil, gw, link); err != nil { return fmt.Errorf("failed to add gateway route: %v", err) } } - for _, route := range ifInfo.Routes { if err := ip.AddRoute(route.Dest, route.NextHop, link); err != nil { return fmt.Errorf("failed to add pod route %v via %v: %v", route.Dest, route.NextHop, err) @@ -253,13 +253,18 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf } } + ipStrs := make([]string, len(ifInfo.IPs)) + for i, ip := range ifInfo.IPs { + ipStrs[i] = ip.String() + } + // Add the new sandbox's OVS port ovsArgs := []string{ "add-port", "br-int", hostIface.Name, "--", "set", "interface", hostIface.Name, fmt.Sprintf("external_ids:attached_mac=%s", ifInfo.MAC), fmt.Sprintf("external_ids:iface-id=%s", ifaceID), - fmt.Sprintf("external_ids:ip_address=%s", ifInfo.IP), + fmt.Sprintf("external_ids:ip_addresses=%s", strings.Join(ipStrs, ",")), fmt.Sprintf("external_ids:sandbox=%s", pr.SandboxID), } diff --git a/go-controller/pkg/cni/helper_windows.go b/go-controller/pkg/cni/helper_windows.go index 307703bab3..9865199e2e 100644 --- a/go-controller/pkg/cni/helper_windows.go +++ b/go-controller/pkg/cni/helper_windows.go @@ -23,12 +23,12 @@ import ( // attach it to the desired network. This function finds the HNS Id of the // network based on the gatewayIP. If more than one suitable network it's found, // return an error asking to give the HNS Network Id in config. -func getHNSIdFromConfigOrByGatewayIP(gatewayIP net.IP) (string, error) { +func getHNSIdFromConfigOrByGatewayIP(gatewayIPs []net.IP) (string, error) { if config.CNI.WinHNSNetworkID != "" { klog.Infof("Using HNS Network Id from config: %v", config.CNI.WinHNSNetworkID) return config.CNI.WinHNSNetworkID, nil } - if gatewayIP == nil { + if len(gatewayIPs) == 0 { return "", fmt.Errorf("no gateway IP and no HNS Network ID given") } hnsNetworkId := "" @@ -38,7 +38,7 @@ func getHNSIdFromConfigOrByGatewayIP(gatewayIP net.IP) (string, error) { } for _, hnsNW := range hnsNetworks { for _, hnsNWSubnet := range hnsNW.Subnets { - if strings.Compare(gatewayIP.String(), hnsNWSubnet.GatewayAddress) == 0 { + if strings.Compare(gatewayIPs[0].String(), hnsNWSubnet.GatewayAddress) == 0 { if len(hnsNetworkId) == 0 { hnsNetworkId = hnsNW.Id } else { @@ -118,7 +118,11 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf if conf.DeviceID != "" { return nil, fmt.Errorf("failure OVS-Offload is not supported in Windows") } - ipMaskSize, _ := ifInfo.IP.Mask.Size() + if len(ifInfo.IPs) != 1 { + return nil, fmt.Errorf("dual-stack is not supported in Windows") + } + + ipMaskSize, _ := ifInfo.IPs[0].Mask.Size() // NOTE(abalutoiu): The endpoint name should not depend on the container ID. // This is for backwards compatibility in kubernetes which calls the CNI // even for containers that are not the infra container. @@ -137,7 +141,7 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf }() var hnsNetworkId string - hnsNetworkId, err = getHNSIdFromConfigOrByGatewayIP(ifInfo.GW) + hnsNetworkId, err = getHNSIdFromConfigOrByGatewayIP(ifInfo.Gateways) if err != nil { klog.Infof("Error when detecting the HNS Network Id: %q", err) return nil, err @@ -156,7 +160,7 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf hnsEndpoint := &hcsshim.HNSEndpoint{ Name: endpointName, VirtualNetwork: hnsNetworkId, - IPAddress: ifInfo.IP.IP, + IPAddress: ifInfo.IPs[0].IP, MacAddress: macAddressIpFormat, PrefixLength: uint8(ipMaskSize), DNSServerList: strings.Join(conf.DNS.Nameservers, ","), @@ -196,7 +200,7 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf "interface", endpointName, fmt.Sprintf("external_ids:attached_mac=%s", ifInfo.MAC), fmt.Sprintf("external_ids:iface-id=%s", ifaceID), - fmt.Sprintf("external_ids:ip_address=%s", ifInfo.IP), + fmt.Sprintf("external_ids:ip_address=%s", ifInfo.IPs[0]), } var out []byte out, err = exec.Command("ovs-vsctl", ovsArgs...).CombinedOutput() diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 763206e40b..5dff265aa8 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -234,7 +234,8 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { annotation, err := util.UnmarshalPodAnnotation(pod.Annotations) if err == nil { podMac = annotation.MAC - podCIDR = annotation.IP + // DUAL-STACK FIXME: handle multiple IPs + podCIDR = annotation.IPs[0] // Check if the pod's logical switch port already exists. If it // does don't re-add the port to OVN as this will change its @@ -249,7 +250,7 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { // If the pod already has annotations use the existing static // IP/MAC from the annotation. args = append(args, - "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", podMac, annotation.IP.IP), + "--", "lsp-set-addresses", portName, fmt.Sprintf("%s %s", podMac, podCIDR.IP), "--", "--if-exists", "clear", "logical_switch_port", portName, "dynamic_addresses", ) } else { @@ -331,10 +332,10 @@ func (oc *Controller) addLogicalPort(pod *kapi.Pod) error { return err } marshalledAnnotation, err := util.MarshalPodAnnotation(&util.PodAnnotation{ - IP: podCIDR, - MAC: podMac, - GW: gwIP, - Routes: routes, + IPs: []*net.IPNet{podCIDR}, + MAC: podMac, + Gateways: []net.IP{gwIP}, + Routes: routes, }) if err != nil { return fmt.Errorf("error creating pod network annotation: %v", err) diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index c52bb59962..389a1b4bcb 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -244,7 +244,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -291,7 +291,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -335,7 +335,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) Eventually(fExec.CalledMatchesExpected).Should(BeTrue(), fExec.ErrorDesc) // Delete it @@ -399,7 +399,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -456,7 +456,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -533,7 +533,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -580,7 +580,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) // Simulate an OVN restart with a new IP assignment and verify that the pod annotation is updated. fExec.AddFakeCmd(&ovntest.ExpectedCmd{ @@ -606,7 +606,7 @@ var _ = Describe("OVN Pod Operations", func() { // Check that pod annotations have been re-written to correct values podAnnotation, ok = pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } @@ -650,7 +650,7 @@ var _ = Describe("OVN Pod Operations", func() { podAnnotation, ok := pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) // Simulate an OVN restart with a new IP assignment and verify that the pod annotation is updated. fExec.AddFakeCmd(&ovntest.ExpectedCmd{ @@ -676,7 +676,7 @@ var _ = Describe("OVN Pod Operations", func() { // Check that pod annotations have been re-written to correct values podAnnotation, ok = pod.Annotations[util.OvnPodAnnotationName] Expect(ok).To(BeTrue()) - Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_address":"` + t.podIP + `/24", "mac_address":"` + t.podMAC + `", "gateway_ip": "` + t.nodeGWIP + `"}}`)) + Expect(podAnnotation).To(MatchJSON(`{"default": {"ip_addresses":["` + t.podIP + `/24"], "mac_address":"` + t.podMAC + `", "gateway_ips": ["` + t.nodeGWIP + `"], "ip_address":"` + t.podIP + `/24", "gateway_ip": "` + t.nodeGWIP + `"}}`)) return nil } diff --git a/go-controller/pkg/ovn/policy.go b/go-controller/pkg/ovn/policy.go index d79bfe3741..37f6f03191 100644 --- a/go-controller/pkg/ovn/policy.go +++ b/go-controller/pkg/ovn/policy.go @@ -1145,7 +1145,8 @@ func (oc *Controller) handlePeerPodSelectorAddUpdate(np *namespacePolicy, if err != nil { return } - ipAddress := podAnnotation.IP.IP.String() + // DUAL-STACK FIXME: handle multiple IPs + ipAddress := podAnnotation.IPs[0].IP.String() if addressMap[ipAddress] { return } @@ -1181,7 +1182,8 @@ func (oc *Controller) handlePeerPodSelectorDelete(np *namespacePolicy, if err != nil { return } - ipAddress := podAnnotation.IP.IP.String() + // DUAL-STACK FIXME: handle multiple IPs + ipAddress := podAnnotation.IPs[0].IP.String() np.Lock() defer np.Unlock() diff --git a/go-controller/pkg/util/pod_annotation.go b/go-controller/pkg/util/pod_annotation.go index a167369397..3902cfbbe1 100644 --- a/go-controller/pkg/util/pod_annotation.go +++ b/go-controller/pkg/util/pod_annotation.go @@ -6,8 +6,38 @@ import ( "net" "k8s.io/klog" + utilnet "k8s.io/utils/net" ) +// This handles the "k8s.ovn.org/pod-networks" annotation on Pods, used to pass +// information about networking from the master to the nodes. (The util.PodAnnotation +// struct is also embedded in the cni.PodInterfaceInfo type that is passed from the +// cniserver to the CNI shim.) +// +// The annotation looks like: +// +// annotations: +// k8s.ovn.org/pod-networks: | +// { +// "default": { +// "ip_addresses": ["192.168.0.5/24"], +// "mac_address": "0a:58:fd:98:00:01", +// "gateway_ips": ["192.168.0.1"] +// +// # for backward compatibility +// "ip_address": "192.168.0.5/24", +// "gateway_ip": "192.168.0.1" +// } +// } +// +// (With optional additional "routes" also indicated; in particular, if a pod has an +// additional network attachment that claims the default route, then the "default" network +// will have explicit routes to the cluster and service subnets.) +// +// The "ip_address" and "gateway_ip" fields are deprecated and will eventually go away. +// (And they are not output when "ip_addresses" or "gateway_ips" contains multiple +// values.) + const ( // OvnPodAnnotationName is the constant string representing the POD annotation key OvnPodAnnotationName = "k8s.ovn.org/pod-networks" @@ -15,15 +45,17 @@ const ( OvnPodDefaultNetwork = "default" ) -// PodAnnotation describes the pod's assigned network details +// PodAnnotation describes the assigned network details for a single pod network. (The +// actual annotation may include the equivalent of multiple PodAnnotations.) type PodAnnotation struct { - // IP is the pod's assigned IP address and prefix - IP *net.IPNet + // IPs are the pod's assigned IP addresses/prefixes + IPs []*net.IPNet // MAC is the pod's assigned MAC address MAC net.HardwareAddr - // GW is the pod's gateway IP address - GW net.IP - // Routes are routes to add to the pod's network namespace + // Gateways are the pod's gateway IP addresses; note that there may be + // fewer Gateways than IPs. + Gateways []net.IP + // Routes are additional routes to add to the pod's network namespace Routes []PodRoute } @@ -35,15 +67,18 @@ type PodRoute struct { NextHop net.IP } -// Internal struct used to correctly marshal IPs to JSON +// Internal struct used to marshal PodAnnotation to the pod annotation type podAnnotation struct { - IP string `json:"ip_address"` - MAC string `json:"mac_address"` - GW string `json:"gateway_ip"` - Routes []podRoute `json:"routes,omitempty"` + IPs []string `json:"ip_addresses"` + MAC string `json:"mac_address"` + Gateways []string `json:"gateway_ips,omitempty"` + Routes []podRoute `json:"routes,omitempty"` + + IP string `json:"ip_address,omitempty"` + Gateway string `json:"gateway_ip,omitempty"` } -// Internal struct used to correctly marshal IPs to JSON +// Internal struct used to marshal PodRoute to the pod annotation type podRoute struct { Dest string `json:"dest"` NextHop string `json:"nextHop"` @@ -52,15 +87,25 @@ type podRoute struct { // MarshalPodAnnotation returns a JSON-formatted annotation describing the pod's // network details func MarshalPodAnnotation(podInfo *PodAnnotation) (map[string]string, error) { - var gw string - if podInfo.GW != nil { - gw = podInfo.GW.String() - } pa := podAnnotation{ - IP: podInfo.IP.String(), MAC: podInfo.MAC.String(), - GW: gw, } + + if len(podInfo.IPs) == 1 { + pa.IP = podInfo.IPs[0].String() + if len(podInfo.Gateways) == 1 { + pa.Gateway = podInfo.Gateways[0].String() + } else if len(podInfo.Gateways) > 1 { + return nil, fmt.Errorf("bad podNetwork data: single-stack network can only have a single gateway") + } + } + for _, ip := range podInfo.IPs { + pa.IPs = append(pa.IPs, ip.String()) + } + for _, gw := range podInfo.Gateways { + pa.Gateways = append(pa.Gateways, gw.String()) + } + for _, r := range podInfo.Routes { if r.Dest.IP.IsUnspecified() { return nil, fmt.Errorf("bad podNetwork data: default route %v should be specified as gateway", r) @@ -88,7 +133,7 @@ func MarshalPodAnnotation(podInfo *PodAnnotation) (map[string]string, error) { }, nil } -// UnmarshalPodAnnotation returns a the unmarshalled pod annotation +// UnmarshalPodAnnotation returns the default network info from pod.Annotations func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, error) { ovnAnnotation, ok := annotations[OvnPodAnnotationName] if !ok { @@ -104,24 +149,43 @@ func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, erro a := &tempA podAnnotation := &PodAnnotation{} - // Minimal validation - ip, ipnet, err := net.ParseCIDR(a.IP) - if err != nil { - return nil, fmt.Errorf("failed to parse pod IP %q: %v", a.IP, err) - } - ipnet.IP = ip - podAnnotation.IP = ipnet + var err error podAnnotation.MAC, err = net.ParseMAC(a.MAC) if err != nil { return nil, fmt.Errorf("failed to parse pod MAC %q: %v", a.MAC, err) } - if a.GW != "" { - podAnnotation.GW = net.ParseIP(a.GW) - if podAnnotation.GW == nil { - return nil, fmt.Errorf("failed to parse pod gateway %q", a.GW) + if len(a.IPs) == 0 { + if a.IP == "" { + return nil, fmt.Errorf("bad annotation data (neither ip_address nor ip_addresses is set)") + } + a.IPs = append(a.IPs, a.IP) + } else if a.IP != "" && a.IP != a.IPs[0] { + return nil, fmt.Errorf("bad annotation data (ip_address and ip_addresses conflict)") + } + for _, ipstr := range a.IPs { + ip, ipnet, err := net.ParseCIDR(ipstr) + if err != nil { + return nil, fmt.Errorf("failed to parse pod IP %q: %v", ipstr, err) + } + ipnet.IP = ip + podAnnotation.IPs = append(podAnnotation.IPs, ipnet) + } + + if len(a.Gateways) == 0 { + if a.Gateway != "" { + a.Gateways = append(a.Gateways, a.Gateway) + } + } else if a.Gateway != "" && a.Gateway != a.Gateways[0] { + return nil, fmt.Errorf("bad annotation data (gateway_ip and gateway_ips conflict)") + } + for _, gwstr := range a.Gateways { + gw := net.ParseIP(gwstr) + if err != nil { + return nil, fmt.Errorf("failed to parse pod gateway %q", gwstr) } + podAnnotation.Gateways = append(podAnnotation.Gateways, gw) } for _, r := range a.Routes { @@ -137,6 +201,8 @@ func UnmarshalPodAnnotation(annotations map[string]string) (*PodAnnotation, erro route.NextHop = net.ParseIP(r.NextHop) if route.NextHop == nil { return nil, fmt.Errorf("failed to parse pod route next hop %q", r.NextHop) + } else if utilnet.IsIPv6(route.NextHop) != utilnet.IsIPv6CIDR(route.Dest) { + return nil, fmt.Errorf("pod route %s has next hop %s of different family", r.Dest, r.NextHop) } } podAnnotation.Routes = append(podAnnotation.Routes, route) diff --git a/go-controller/pkg/util/pod_annotation_test.go b/go-controller/pkg/util/pod_annotation_test.go index f4ad16d7f8..f1b2fc98bd 100644 --- a/go-controller/pkg/util/pod_annotation_test.go +++ b/go-controller/pkg/util/pod_annotation_test.go @@ -19,30 +19,30 @@ var _ = Describe("Pod annotation tests", func() { { name: "Single-stack IPv4", in: &PodAnnotation{ - IP: mustParseCIDRAddress("192.168.0.5/24"), - MAC: mustParseMAC("0A:58:FD:98:00:01"), - GW: net.ParseIP("192.168.0.1"), + IPs: []*net.IPNet{mustParseCIDRAddress("192.168.0.5/24")}, + MAC: mustParseMAC("0A:58:FD:98:00:01"), + Gateways: []net.IP{net.ParseIP("192.168.0.1")}, }, out: map[string]string{ - "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":"192.168.0.1"}}`, + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["192.168.0.5/24"],"mac_address":"0a:58:fd:98:00:01","gateway_ips":["192.168.0.1"],"ip_address":"192.168.0.5/24","gateway_ip":"192.168.0.1"}}`, }, }, { name: "No GW", in: &PodAnnotation{ - IP: mustParseCIDRAddress("192.168.0.5/24"), + IPs: []*net.IPNet{mustParseCIDRAddress("192.168.0.5/24")}, MAC: mustParseMAC("0A:58:FD:98:00:01"), }, out: map[string]string{ - "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":""}}`, + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["192.168.0.5/24"],"mac_address":"0a:58:fd:98:00:01","ip_address":"192.168.0.5/24"}}`, }, }, { name: "Routes", in: &PodAnnotation{ - IP: mustParseCIDRAddress("192.168.0.5/24"), - MAC: mustParseMAC("0A:58:FD:98:00:01"), - GW: net.ParseIP("192.168.0.1"), + IPs: []*net.IPNet{mustParseCIDRAddress("192.168.0.5/24")}, + MAC: mustParseMAC("0A:58:FD:98:00:01"), + Gateways: []net.IP{net.ParseIP("192.168.0.1")}, Routes: []PodRoute{ { Dest: mustParseCIDR("192.168.1.0/24"), @@ -51,18 +51,35 @@ var _ = Describe("Pod annotation tests", func() { }, }, out: map[string]string{ - "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"192.168.0.5/24","mac_address":"0a:58:fd:98:00:01","gateway_ip":"192.168.0.1","routes":[{"dest":"192.168.1.0/24","nextHop":"192.168.1.1"}]}}`, + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["192.168.0.5/24"],"mac_address":"0a:58:fd:98:00:01","gateway_ips":["192.168.0.1"],"routes":[{"dest":"192.168.1.0/24","nextHop":"192.168.1.1"}],"ip_address":"192.168.0.5/24","gateway_ip":"192.168.0.1"}}`, }, }, { name: "Single-stack IPv6", in: &PodAnnotation{ - IP: mustParseCIDRAddress("fd01::1234/64"), + IPs: []*net.IPNet{mustParseCIDRAddress("fd01::1234/64")}, + MAC: mustParseMAC("0A:58:FD:98:00:01"), + Gateways: []net.IP{net.ParseIP("fd01::1")}, + }, + out: map[string]string{ + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["fd01::1234/64"],"mac_address":"0a:58:fd:98:00:01","gateway_ips":["fd01::1"],"ip_address":"fd01::1234/64","gateway_ip":"fd01::1"}}`, + }, + }, + { + name: "Dual-stack", + in: &PodAnnotation{ + IPs: []*net.IPNet{ + mustParseCIDRAddress("192.168.0.5/24"), + mustParseCIDRAddress("fd01::1234/64"), + }, MAC: mustParseMAC("0A:58:FD:98:00:01"), - GW: net.ParseIP("fd01::1"), + Gateways: []net.IP{ + net.ParseIP("192.168.1.0"), + net.ParseIP("fd01::1"), + }, }, out: map[string]string{ - "k8s.ovn.org/pod-networks": `{"default":{"ip_address":"fd01::1234/64","mac_address":"0a:58:fd:98:00:01","gateway_ip":"fd01::1"}}`, + "k8s.ovn.org/pod-networks": `{"default":{"ip_addresses":["192.168.0.5/24","fd01::1234/64"],"mac_address":"0a:58:fd:98:00:01","gateway_ips":["192.168.1.0","fd01::1"]}}`, }, }, } From 6c94d5a34d6728eeb08426e724e54fa99c59ec2e Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Tue, 7 Apr 2020 18:17:32 -0700 Subject: [PATCH 14/27] nit: change ovsdb server connection method variable name - currently the variable is named with _test suffix, as in, ovn_nbdb_test. change it to ovn_nbdb_conn - add similar variable for SB DB - use the above two variables everywhere in the script Signed-off-by: Girish Moodalbail --- dist/images/ovnkube.sh | 47 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 93ded3ff02..82b183d2c1 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -239,7 +239,7 @@ ready_to_start_node() { # cannot use ovsdb-client in the case of raft, since it will succeed even if one of the # instance of DB is up and running. HOwever, ovn-nbctl always connects to the leader in the clustered # database, so use it. - ovn-nbctl --db=${ovn_nbdb_test} list NB_Global >/dev/null 2>&1 + ovn-nbctl --db=${ovn_nbdb_conn} list NB_Global >/dev/null 2>&1 if [[ $? != 0 ]]; then return 1 fi @@ -277,7 +277,9 @@ get_ovn_db_vars() { echo ovn_nbdb=$ovn_nbdb echo ovn_sbdb=$ovn_sbdb - ovn_nbdb_test=$(echo ${ovn_nbdb} | sed 's;//;;g') + # ovsdb server connection method :: + ovn_nbdb_conn=$(echo ${ovn_nbdb} | sed 's;//;;g') + ovn_sbdb_conn=$(echo ${ovn_sbdb} | sed 's;//;;g') } # OVS must be up before OVN comes up. @@ -446,18 +448,19 @@ display_env() { ovn_debug() { wait_for_event attempts=3 ready_to_start_node echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb}" - echo "ovn_nbdb_test ${ovn_nbdb_test}" + echo "ovn_nbdb_conn ${ovn_nbdb_conn}" + echo "ovn_sbdb_conn ${ovn_sbdb_conn}" # get ovs/ovn info from the node for debug purposes echo "=========== ovn_debug hostname: ${ovn_pod_host} =============" - echo "=========== ovn-nbctl --db=${ovn_nbdb_test} show =============" - ovn-nbctl --db=${ovn_nbdb_test} show + echo "=========== ovn-nbctl --db=${ovn_nbdb_conn} show =============" + ovn-nbctl --db=${ovn_nbdb_conn} show echo " " echo "=========== ovn-nbctl list ACL =============" - ovn-nbctl --db=${ovn_nbdb_test} list ACL + ovn-nbctl --db=${ovn_nbdb_conn} list ACL echo " " echo "=========== ovn-nbctl list address_set =============" - ovn-nbctl --db=${ovn_nbdb_test} list address_set + ovn-nbctl --db=${ovn_nbdb_conn} list address_set echo " " echo "=========== ovs-vsctl show =============" ovs-vsctl show @@ -471,19 +474,17 @@ ovn_debug() { echo "=========== ovs-ofctl dump-flows br-int =============" ovs-ofctl dump-flows br-int echo " " - echo "=========== ovn-sbctl show =============" - ovn_sbdb_test=$(echo ${ovn_sbdb} | sed 's;//;;g') - echo "=========== ovn-sbctl --db=${ovn_sbdb_test} show =============" - ovn-sbctl --db=${ovn_sbdb_test} show + echo "=========== ovn-sbctl --db=${ovn_sbdb_conn} show =============" + ovn-sbctl --db=${ovn_sbdb_conn} show echo " " - echo "=========== ovn-sbctl --db=${ovn_sbdb_test} lflow-list =============" - ovn-sbctl --db=${ovn_sbdb_test} lflow-list + echo "=========== ovn-sbctl --db=${ovn_sbdb_conn} lflow-list =============" + ovn-sbctl --db=${ovn_sbdb_conn} lflow-list echo " " - echo "=========== ovn-sbctl --db=${ovn_sbdb_test} list datapath =============" - ovn-sbctl --db=${ovn_sbdb_test} list datapath + echo "=========== ovn-sbctl --db=${ovn_sbdb_conn} list datapath =============" + ovn-sbctl --db=${ovn_sbdb_conn} list datapath echo " " - echo "=========== ovn-sbctl --db=${ovn_sbdb_test} list port_binding =============" - ovn-sbctl --db=${ovn_sbdb_test} list port_binding + echo "=========== ovn-sbctl --db=${ovn_sbdb_conn} list port_binding =============" + ovn-sbctl --db=${ovn_sbdb_conn} list port_binding } ovs-server() { @@ -674,12 +675,10 @@ run-ovn-northd() { # no monitor (and no detach), start northd which connects to the # ovnkube-db service - ovn_nbdb_i=$(echo ${ovn_nbdb} | sed 's;//;;g') - ovn_sbdb_i=$(echo ${ovn_sbdb} | sed 's;//;;g') run_as_ovs_user_if_needed \ ${OVNCTL_PATH} start_northd \ --no-monitor --ovn-manage-ovsdb=no \ - --ovn-northd-nb-db=${ovn_nbdb_i} --ovn-northd-sb-db=${ovn_sbdb_i} \ + --ovn-northd-nb-db=${ovn_nbdb_conn} --ovn-northd-sb-db=${ovn_sbdb_conn} \ --ovn-northd-log="${ovn_loglevel_northd}" \ ${ovn_northd_opts} @@ -760,7 +759,7 @@ ovn-controller() { wait_for_event ready_to_start_node echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb}" - echo "ovn_nbdb_test ${ovn_nbdb_test}" + echo "ovn_nbdb_conn ${ovn_nbdb_conn}" # cleanup any stale ovn-nb and ovn-remote keys in Open_vSwitch table ovs-vsctl remove Open_vSwitch . external_ids ovn-remote @@ -797,7 +796,7 @@ ovn-node() { echo "=============== ovn-node - (wait for ready_to_start_node)" wait_for_event ready_to_start_node - echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_test ${ovn_nbdb_test}" + echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_conn ${ovn_nbdb_conn}" echo "=============== ovn-node - (ovn-node wait for ovn-controller.pid)" wait_for_event process_ready ovn-controller @@ -876,11 +875,11 @@ run-nbctld() { echo "=============== run-nbctld - (wait for ready_to_start_node)" wait_for_event ready_to_start_node - echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_test ${ovn_nbdb_test}" + echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_conn ${ovn_nbdb_conn}" echo "ovn_loglevel_nbctld=${ovn_loglevel_nbctld}" # use unix socket - /usr/bin/ovn-nbctl ${ovn_loglevel_nbctld} --pidfile --db=${ovn_nbdb_test} --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach + /usr/bin/ovn-nbctl ${ovn_loglevel_nbctld} --pidfile --db=${ovn_nbdb_conn} --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach wait_for_event attempts=3 process_ready ovn-nbctl echo "=============== run_ovn_nbctl ========== RUNNING" From a9fec6c006c855ebe04b0602a3151459ecfd3333 Mon Sep 17 00:00:00 2001 From: Pardhakeswar Pacha Date: Wed, 8 Apr 2020 04:30:07 -0400 Subject: [PATCH 15/27] ovnkube.sh: Adding iptable rules to open up the OVN NB/SB DB raft ports Signed-off-by: Pardhakeswar Pacha --- dist/images/ovndb-raft-functions.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dist/images/ovndb-raft-functions.sh b/dist/images/ovndb-raft-functions.sh index 66f3d8c4b0..6653248fbc 100644 --- a/dist/images/ovndb-raft-functions.sh +++ b/dist/images/ovndb-raft-functions.sh @@ -198,6 +198,8 @@ ovsdb-raft() { echo "failed to retrieve the IP address of the host $(hostname). Exiting..." exit 1 fi + iptables-rules ${raft_port} + echo "=============== run ${db}-ovsdb-raft pod ${POD_NAME} ==========" if [[ ! -e ${ovn_db_file} ]] || ovsdb-tool db-is-standalone ${ovn_db_file}; then From 72e4c22c45756746cb5f416b33f4112db75ea148 Mon Sep 17 00:00:00 2001 From: Andrew Sun Date: Wed, 8 Apr 2020 15:17:23 -0400 Subject: [PATCH 16/27] Also run CI on non-master branches Signed-off-by: Andrew Sun --- .github/workflows/test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d63e120a95..67a13494d8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,7 +2,6 @@ name: ovn-ci on: push: - branches: [ master ] pull_request: branches: [ master ] From b744abc57e4769f9d2b5d3c775cc7eed6cf9f475 Mon Sep 17 00:00:00 2001 From: Andrew Sun Date: Wed, 8 Apr 2020 15:34:31 -0400 Subject: [PATCH 17/27] Download kind in CI instead of go getting it Signed-off-by: Andrew Sun --- test/scripts/install-kind.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/scripts/install-kind.sh b/test/scripts/install-kind.sh index efa7ac82da..becdf4095d 100755 --- a/test/scripts/install-kind.sh +++ b/test/scripts/install-kind.sh @@ -11,7 +11,8 @@ pushd $GOPATH/src/k8s.io/kubernetes/ sudo ln ./_output/local/go/bin/kubectl /usr/local/bin/kubectl popd -GO111MODULE="on" go get sigs.k8s.io/kind@v0.7.0 +wget -O $GOPATH/bin/kind https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64 +chmod +x $GOPATH/bin/kind pushd ../contrib ./kind.sh popd From 17776fa2627316e9a4c4d1f38c1b9d6081e6f272 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2020 16:36:36 -0500 Subject: [PATCH 18/27] ci: shorter GitHub action job names Instead of "End-To-End Tests (e2e-k..." in the Github show more of the relevant bits eg "e2e (shard-n,...". Since all the tests use KIND, and all the tests use OVN, "kind-ovn-" seems less relevant too. Signed-off-by: Dan Williams --- .github/workflows/test.yml | 12 ++++++------ test/Makefile | 8 ++++---- test/scripts/e2e-kind.sh | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d63e120a95..6aed361d79 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -109,17 +109,17 @@ jobs: rm -rf .git e2e: - name: End-To-End Tests + name: e2e runs-on: ubuntu-latest strategy: fail-fast: false matrix: target: - - e2e-kind-ovn-shard-n - - e2e-kind-ovn-shard-np - - e2e-kind-ovn-shard-s - - e2e-kind-ovn-shard-other - - e2e-control-plane + - shard-n + - shard-np + - shard-s + - shard-other + - control-plane ha: - enabled: "true" name: "HA" diff --git a/test/Makefile b/test/Makefile index 5bdcae579b..e68bf9ddc6 100644 --- a/test/Makefile +++ b/test/Makefile @@ -2,10 +2,10 @@ install-kind: ./scripts/install-kind.sh -.PHONY: e2e-kind-ovn-shard-% -e2e-kind-ovn-shard-%: +.PHONY: shard-% +shard-%: ./scripts/e2e-kind.sh $@ -.PHONY: e2e-control-plane -e2e-control-plane: +.PHONY: control-plane +control-plane: ./scripts/e2e-cp.sh diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh index b73074dd55..5e7d1426f3 100755 --- a/test/scripts/e2e-kind.sh +++ b/test/scripts/e2e-kind.sh @@ -14,18 +14,18 @@ SKIPPED_TESTS='Networking\sIPerf\sIPv[46]|\[Feature:PerformanceDNS\]|\[Feature:I GINKGO_ARGS="--num-nodes=3 --ginkgo.skip=${SKIPPED_TESTS} --disable-log-dump=false" case "$SHARD" in - e2e-kind-ovn-shard-n) + shard-n) # all tests that don't have P as their sixth letter after the N GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Nn](.{6}[^Pp].*|.{0,6}$)' ;; - e2e-kind-ovn-shard-np) + shard-np) # all tests that have P as the sixth letter after the N GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Nn].{6}[Pp].*$' ;; - e2e-kind-ovn-shard-s) + shard-s) GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[Ss].*' ;; - e2e-kind-ovn-shard-other) + shard-other) GINKGO_ARGS="${GINKGO_ARGS} "'--ginkgo.focus=\[sig-network\]\s[^NnSs].*' ;; *) From 8ca49c0e1bc6caf32851e3cf44dc1fc3ad66eb90 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Sun, 5 Apr 2020 09:34:14 -0400 Subject: [PATCH 19/27] ovn: add comments, rename a few things related to load balancers In particular, the loadbalancer functions are used for all types of services (ClusterIP, NodePort, ExternalIP) so it's confusing to refer to "service IP" when the IP might actually be, eg, a node IP. So say "source IP" instead. Signed-off-by: Dan Winship --- go-controller/pkg/ovn/endpoints.go | 2 +- go-controller/pkg/ovn/gateway.go | 28 ++++++------ go-controller/pkg/ovn/loadbalancer.go | 64 ++++++++++++++------------- go-controller/pkg/ovn/service.go | 14 +++--- 4 files changed, 55 insertions(+), 53 deletions(-) diff --git a/go-controller/pkg/ovn/endpoints.go b/go-controller/pkg/ovn/endpoints.go index b28c6e9a69..016c2cae2b 100644 --- a/go-controller/pkg/ovn/endpoints.go +++ b/go-controller/pkg/ovn/endpoints.go @@ -76,7 +76,7 @@ func (ovn *Controller) AddEndpoints(ep *kapi.Endpoints) error { } if util.ServiceTypeHasNodePort(svc) { klog.V(5).Infof("Creating Gateways IP for NodePort: %d, %v", svcPort.NodePort, ips) - err = ovn.createGatewaysVIP(svcPort.Protocol, svcPort.NodePort, targetPort, ips) + err = ovn.createGatewayVIPs(svcPort.Protocol, svcPort.NodePort, ips, targetPort) if err != nil { klog.Errorf("Error in creating Node Port for svc %s, node port: %d - %v\n", svc.Name, svcPort.NodePort, err) continue diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index fa8935afe1..8f5b388444 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -40,9 +40,8 @@ func (ovn *Controller) getGatewayLoadBalancer(physicalGateway string, protocol k return loadBalancer, nil } -func (ovn *Controller) createGatewaysVIP(protocol kapi.Protocol, port, targetPort int32, ips []string) error { - - klog.V(5).Infof("Creating Gateway VIP - %s, %d, %d, %v", protocol, port, targetPort, ips) +func (ovn *Controller) createGatewayVIPs(protocol kapi.Protocol, sourcePort int32, targetIPs []string, targetPort int32) error { + klog.V(5).Infof("Creating Gateway VIPs - %s, %d, [%v], %d", protocol, sourcePort, targetIPs, targetPort) // Each gateway has a separate load-balancer for N/S traffic @@ -52,11 +51,10 @@ func (ovn *Controller) createGatewaysVIP(protocol kapi.Protocol, port, targetPor } for _, physicalGateway := range physicalGateways { - loadBalancer, err := ovn.getGatewayLoadBalancer(physicalGateway, - protocol) + loadBalancer, err := ovn.getGatewayLoadBalancer(physicalGateway, protocol) if err != nil { - klog.Errorf("physical gateway %s does not have load_balancer "+ - "(%v)", physicalGateway, err) + klog.Errorf("physical gateway %s does not have load_balancer (%v)", + physicalGateway, err) continue } if loadBalancer == "" { @@ -68,9 +66,9 @@ func (ovn *Controller) createGatewaysVIP(protocol kapi.Protocol, port, targetPor physicalGateway, err) continue } - // With the physical_ip:port as the VIP, add an entry in + // With the physical_ip:sourcePort as the VIP, add an entry in // 'load_balancer'. - err = ovn.createLoadBalancerVIP(loadBalancer, physicalIP, port, ips, targetPort) + err = ovn.createLoadBalancerVIP(loadBalancer, physicalIP, sourcePort, targetIPs, targetPort) if err != nil { klog.Errorf("Failed to create VIP in load balancer %s - %v", loadBalancer, err) continue @@ -79,8 +77,8 @@ func (ovn *Controller) createGatewaysVIP(protocol kapi.Protocol, port, targetPor return nil } -func (ovn *Controller) deleteGatewaysVIP(protocol kapi.Protocol, port int32) { - klog.V(5).Infof("Searching to remove Gateway VIP - %s, %d", protocol, port) +func (ovn *Controller) deleteGatewayVIPs(protocol kapi.Protocol, sourcePort int32) { + klog.V(5).Infof("Searching to remove Gateway VIPs - %s, %d", protocol, sourcePort) physicalGateways, _, err := ovn.getOvnGateways() if err != nil { klog.Errorf("Error while searching for gateways: %v", err) @@ -90,8 +88,8 @@ func (ovn *Controller) deleteGatewaysVIP(protocol kapi.Protocol, port int32) { for _, physicalGateway := range physicalGateways { loadBalancer, err := ovn.getGatewayLoadBalancer(physicalGateway, protocol) if err != nil { - klog.Errorf("physical gateway %s does not have load_balancer "+ - "(%v)", physicalGateway, err) + klog.Errorf("physical gateway %s does not have load_balancer (%v)", + physicalGateway, err) continue } if loadBalancer == "" { @@ -103,8 +101,8 @@ func (ovn *Controller) deleteGatewaysVIP(protocol kapi.Protocol, port int32) { physicalGateway, err) continue } - // With the physical_ip:port as the VIP, delete an entry in 'load_balancer'. - vip := util.JoinHostPortInt32(physicalIP, port) + // With the physical_ip:sourcePort as the VIP, delete an entry in 'load_balancer'. + vip := util.JoinHostPortInt32(physicalIP, sourcePort) klog.V(5).Infof("Removing gateway VIP: %s from loadbalancer: %s", vip, loadBalancer) ovn.deleteLoadBalancerVIP(loadBalancer, vip) } diff --git a/go-controller/pkg/ovn/loadbalancer.go b/go-controller/pkg/ovn/loadbalancer.go index eab6fe8e83..361e0328e0 100644 --- a/go-controller/pkg/ovn/loadbalancer.go +++ b/go-controller/pkg/ovn/loadbalancer.go @@ -13,8 +13,7 @@ import ( utilnet "k8s.io/utils/net" ) -func (ovn *Controller) getLoadBalancer(protocol kapi.Protocol) (string, - error) { +func (ovn *Controller) getLoadBalancer(protocol kapi.Protocol) (string, error) { if outStr, ok := ovn.loadbalancerClusterCache[protocol]; ok { return outStr, nil } @@ -44,6 +43,8 @@ func (ovn *Controller) getLoadBalancer(protocol kapi.Protocol) (string, return out, nil } +// getDefaultGatewayLoadBalancer returns the load balancer for the node with the lowest gateway IP. +// This is used in the implementation of ExternalIPs func (ovn *Controller) getDefaultGatewayLoadBalancer(protocol kapi.Protocol) string { if outStr, ok := ovn.loadbalancerGWCache[protocol]; ok { return outStr @@ -66,8 +67,8 @@ func (ovn *Controller) getDefaultGatewayLoadBalancer(protocol kapi.Protocol) str return lb } -func (ovn *Controller) getLoadBalancerVIPS( - loadBalancer string) (map[string]interface{}, error) { +// getLoadBalancerVIPs returns a map whose keys are VIPs (IP:port) on loadBalancer +func (ovn *Controller) getLoadBalancerVIPs(loadBalancer string) (map[string]interface{}, error) { outStr, _, err := util.RunOVNNbctl("--data=bare", "--no-heading", "get", "load_balancer", loadBalancer, "vips") if err != nil { @@ -105,37 +106,40 @@ func (ovn *Controller) deleteLoadBalancerVIP(loadBalancer, vip string) { ovn.removeServiceLB(loadBalancer, vip) } -func (ovn *Controller) configureLoadBalancer(lb, serviceIP string, port int32, endpoints []string) error { +// configureLoadBalancer updates the VIP for sourceIP:sourcePort to point to targets (an +// array of IP:port strings) +func (ovn *Controller) configureLoadBalancer(lb, sourceIP string, sourcePort int32, targets []string) error { ovn.serviceLBLock.Lock() defer ovn.serviceLBLock.Unlock() - commaSeparatedEps := strings.Join(endpoints, ",") - target := fmt.Sprintf(`vips:"%s"="%s"`, util.JoinHostPortInt32(serviceIP, port), commaSeparatedEps) - out, stderr, err := util.RunOVNNbctl("set", "load_balancer", lb, target) + vip := util.JoinHostPortInt32(sourceIP, sourcePort) + lbTarget := fmt.Sprintf(`vips:"%s"="%s"`, vip, strings.Join(targets, ",")) + + out, stderr, err := util.RunOVNNbctl("set", "load_balancer", lb, lbTarget) if err != nil { return fmt.Errorf("error in configuring load balancer: %s "+ "stdout: %q, stderr: %q, error: %v", lb, out, stderr, err) } - ovn.setServiceEndpointsToLB(lb, util.JoinHostPortInt32(serviceIP, port), endpoints) - klog.V(5).Infof("lb entry set for %s, %s, %v", lb, target, - ovn.serviceLBMap[lb][util.JoinHostPortInt32(serviceIP, port)]) + ovn.setServiceEndpointsToLB(lb, vip, targets) + klog.V(5).Infof("lb entry set for %s, %s, %v", lb, lbTarget, + ovn.serviceLBMap[lb][vip]) return nil } -// createLoadBalancerVIP either creates or updates a load balancer VIP -// Calls to this method assume that if ips are passed that those endpoints actually exist -// and thus the reject ACL is removed -func (ovn *Controller) createLoadBalancerVIP(lb, serviceIP string, port int32, ips []string, targetPort int32) error { - klog.V(5).Infof("Creating lb with %s, %s, %d, [%v], %d", lb, serviceIP, port, ips, targetPort) +// createLoadBalancerVIP either creates or updates a load balancer VIP mapping from +// sourceIP:sourcePort to targetIP:targetPort for each IP in targetIPs. If targetIPs +// is non-empty then the reject ACL for the service is removed. +func (ovn *Controller) createLoadBalancerVIP(lb, sourceIP string, sourcePort int32, targetIPs []string, targetPort int32) error { + klog.V(5).Infof("Creating lb with %s, %s, %d, [%v], %d", lb, sourceIP, sourcePort, targetIPs, targetPort) - var endpoints []string - for _, ip := range ips { - endpoints = append(endpoints, util.JoinHostPortInt32(ip, targetPort)) + var targets []string + for _, targetIP := range targetIPs { + targets = append(targets, util.JoinHostPortInt32(targetIP, targetPort)) } - err := ovn.configureLoadBalancer(lb, serviceIP, port, endpoints) - if len(ips) > 0 { + err := ovn.configureLoadBalancer(lb, sourceIP, sourcePort, targets) + if len(targets) > 0 { // ensure the ACL is removed if it exists - ovn.deleteLoadBalancerRejectACL(lb, util.JoinHostPortInt32(serviceIP, port)) + ovn.deleteLoadBalancerRejectACL(lb, util.JoinHostPortInt32(sourceIP, sourcePort)) } return err } @@ -169,7 +173,7 @@ func (ovn *Controller) getLogicalSwitchesForLoadBalancer(lb string) ([]string, e return nil, fmt.Errorf("router detected with load balancer that is not a GR") } -func (ovn *Controller) createLoadBalancerRejectACL(lb string, serviceIP string, port int32, proto kapi.Protocol) (string, error) { +func (ovn *Controller) createLoadBalancerRejectACL(lb string, sourceIP string, sourcePort int32, proto kapi.Protocol) (string, error) { ovn.serviceLBLock.Lock() defer ovn.serviceLBLock.Unlock() switches, err := ovn.getLogicalSwitchesForLoadBalancer(lb) @@ -182,9 +186,9 @@ func (ovn *Controller) createLoadBalancerRejectACL(lb string, serviceIP string, return "", nil } - ip := net.ParseIP(serviceIP) + ip := net.ParseIP(sourceIP) if ip == nil { - return "", fmt.Errorf("cannot create reject ACL, invalid cluster IP: %s", serviceIP) + return "", fmt.Errorf("cannot create reject ACL, invalid source IP: %s", sourceIP) } var aclMatch string var l3Prefix string @@ -193,7 +197,7 @@ func (ovn *Controller) createLoadBalancerRejectACL(lb string, serviceIP string, } else { l3Prefix = "ip4" } - vip := util.JoinHostPortInt32(serviceIP, port) + vip := util.JoinHostPortInt32(sourceIP, sourcePort) aclName := fmt.Sprintf("%s-%s", lb, vip) // If ovn-k8s was restarted, we lost the cache, and an ACL may already exist in OVN. In that case we need to check // using ACL name @@ -210,12 +214,12 @@ func (ovn *Controller) createLoadBalancerRejectACL(lb string, serviceIP string, klog.Warningf("Unable to add reject ACL: %s for switch: %s", aclUUID, ls) } } - ovn.setServiceACLToLB(lb, util.JoinHostPortInt32(serviceIP, port), aclUUID) + ovn.setServiceACLToLB(lb, vip, aclUUID) return aclUUID, nil } - aclMatch = fmt.Sprintf("match=\"%s.dst==%s && %s && %s.dst==%d\"", l3Prefix, serviceIP, - strings.ToLower(string(proto)), strings.ToLower(string(proto)), port) + aclMatch = fmt.Sprintf("match=\"%s.dst==%s && %s && %s.dst==%d\"", l3Prefix, sourceIP, + strings.ToLower(string(proto)), strings.ToLower(string(proto)), sourcePort) cmd := []string{"--id=@acl", "create", "acl", "direction=from-lport", "priority=1000", aclMatch, "action=reject", fmt.Sprintf("name=%s", strings.ReplaceAll(aclName, ":", "\\:"))} @@ -230,7 +234,7 @@ func (ovn *Controller) createLoadBalancerRejectACL(lb string, serviceIP string, } else { // Associate ACL UUID with load balancer and ip+port so we can remove this ACL if // backends are re-added. - ovn.setServiceACLToLB(lb, util.JoinHostPortInt32(serviceIP, port), aclUUID) + ovn.setServiceACLToLB(lb, vip, aclUUID) } return aclUUID, nil } diff --git a/go-controller/pkg/ovn/service.go b/go-controller/pkg/ovn/service.go index dde52f67e9..f485a7aaaf 100644 --- a/go-controller/pkg/ovn/service.go +++ b/go-controller/pkg/ovn/service.go @@ -88,17 +88,17 @@ func (ovn *Controller) syncServices(services []interface{}) { continue } - loadBalancerVIPS, err := ovn.getLoadBalancerVIPS(loadBalancer) + loadBalancerVIPs, err := ovn.getLoadBalancerVIPs(loadBalancer) if err != nil { klog.Errorf("failed to get load-balancer vips for %s (%v)", loadBalancer, err) continue } - if loadBalancerVIPS == nil { + if loadBalancerVIPs == nil { continue } - for vip := range loadBalancerVIPS { + for vip := range loadBalancerVIPs { if !stringSliceMembership(clusterServices[protocol], vip) { klog.V(5).Infof("Deleting stale cluster vip %s in "+ "loadbalancer %s", vip, loadBalancer) @@ -128,17 +128,17 @@ func (ovn *Controller) syncServices(services []interface{}) { continue } - loadBalancerVIPS, err := ovn.getLoadBalancerVIPS(loadBalancer) + loadBalancerVIPs, err := ovn.getLoadBalancerVIPs(loadBalancer) if err != nil { klog.Errorf("failed to get load-balancer vips for %s (%v)", loadBalancer, err) continue } - if loadBalancerVIPS == nil { + if loadBalancerVIPs == nil { continue } - for vip := range loadBalancerVIPS { + for vip := range loadBalancerVIPs { _, port, err := net.SplitHostPort(vip) if err != nil { // In a OVN load-balancer, we should always have vip:port. @@ -319,7 +319,7 @@ func (ovn *Controller) deleteService(service *kapi.Service) { var targetPort int32 if util.ServiceTypeHasNodePort(service) { // Delete the 'NodePort' service from a load-balancer instantiated in gateways. - ovn.deleteGatewaysVIP(protocol, port) + ovn.deleteGatewayVIPs(protocol, port) } if util.ServiceTypeHasClusterIP(service) { loadBalancer, err := ovn.getLoadBalancer(protocol) From f2dbc63c5efbb23743a31c1a46db589d9a5bb6dd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2020 16:42:49 -0500 Subject: [PATCH 20/27] install: remove unsupported install methods Going forward officially supported install methods will be KIND (Kubernetes-in-Docker) and daemonset YAML files. Signed-off-by: Dan Williams --- .gitignore | 9 - README.md | 9 - README_MANUAL.md | 4 - contrib/README.md | 83 ---- contrib/ansible.cfg | 2 - contrib/inventory/README.md | 12 - contrib/inventory/group_vars/all | 23 -- contrib/inventory/group_vars/kube-master | 7 - .../inventory/group_vars/kube-minions-linux | 6 - .../inventory/group_vars/kube-minions-windows | 22 -- contrib/inventory/host_vars/README.md | 33 -- contrib/inventory/hosts | 10 - contrib/ovn-kubernetes-cluster.yml | 158 -------- contrib/roles/linux/common/tasks/golang.yml | 58 --- .../linux/common/tasks/k8s_client_certs.yml | 127 ------- .../linux/common/tasks/kubectl_client.yml | 45 --- contrib/roles/linux/common/vars/ubuntu.yml | 7 - contrib/roles/linux/docker/tasks/main.yml | 40 -- .../linux/docker/tasks/restart_docker.yml | 25 -- contrib/roles/linux/docker/vars/ubuntu.yml | 20 - .../linux/kubernetes/tasks/deploy_coredns.yml | 19 - .../kubernetes/tasks/distribute_binaries.yml | 24 -- .../tasks/download_k8s_binaries.yml | 98 ----- .../kubernetes/tasks/fetch_k8s_binaries.yml | 20 - .../tasks/generate_certs_master.yml | 98 ----- .../kubernetes/tasks/generate_global_vars.yml | 39 -- .../linux/kubernetes/tasks/init_gateway.yml | 49 --- contrib/roles/linux/kubernetes/tasks/main.yml | 64 ---- .../linux/kubernetes/tasks/prepare_master.yml | 346 ----------------- .../linux/kubernetes/tasks/prepare_minion.yml | 164 -------- .../linux/kubernetes/tasks/set_ip_facts.yml | 37 -- .../roles/linux/kubernetes/vars/ubuntu.yml | 41 -- .../tasks/build_install_release_ovs.yml | 108 ------ .../tasks/download_install_packages.yml | 83 ---- .../roles/linux/openvswitch/tasks/main.yml | 22 -- .../roles/linux/openvswitch/vars/ubuntu.yml | 40 -- .../tasks/build_install_bins.yml | 74 ---- .../ovn-kubernetes/tasks/distribute_bins.yml | 30 -- .../linux/ovn-kubernetes/tasks/fetch_bins.yml | 18 - .../roles/linux/ovn-kubernetes/tasks/main.yml | 42 -- .../linux/ovn-kubernetes/vars/ubuntu.yml | 16 - contrib/roles/linux/validation/tasks/main.yml | 56 --- .../validation/tasks/validate_service.yml | 25 -- .../roles/linux/validation/vars/ubuntu.yml | 11 - .../roles/linux/version_check/tasks/main.yml | 10 - .../windows/docker/tasks/install_docker.yml | 101 ----- contrib/roles/windows/docker/tasks/main.yml | 14 - contrib/roles/windows/docker/vars/windows.yml | 4 - .../tasks/create_infracontainer.yml | 24 -- .../kubernetes/tasks/get_ovn_subnet.yml | 31 -- .../roles/windows/kubernetes/tasks/main.yml | 115 ------ .../kubernetes/tasks/run_minion_init.yml | 114 ------ .../windows/kubernetes/tasks/set_ip_facts.yml | 93 ----- .../kubernetes/tasks/setup_ovs_hns.yml | 74 ---- .../windows/kubernetes/tasks/setup_sdn.yml | 48 --- .../kubernetes/tasks/start_kubelet.yml | 108 ------ .../roles/windows/kubernetes/vars/windows.yml | 54 --- .../openvswitch/tasks/install_custom_ovs.yml | 54 --- .../windows/openvswitch/tasks/install_ovs.yml | 13 - .../roles/windows/openvswitch/tasks/main.yml | 19 - .../windows/openvswitch/vars/windows.yml | 4 - .../tasks/distribute_binaries.yml | 24 -- .../windows/ovn-kubernetes/tasks/main.yml | 6 - .../windows/ovn-kubernetes/vars/windows.yml | 3 - .../requirements/tasks/fetch_utils.yml | 66 ---- .../roles/windows/requirements/tasks/main.yml | 52 --- .../windows/requirements/vars/windows.yml | 7 - .../roles/windows/validation/tasks/main.yml | 7 - .../validation/tasks/validate_service.yml | 20 - .../roles/windows/validation/vars/windows.yml | 8 - .../windows/version_check/tasks/main.yml | 8 - .../windows/version_check/vars/windows.yml | 5 - dist/Makefile | 54 --- dist/READMEcontainer.md | 20 - dist/READMEopenshiftdevpreview.md | 132 ------- dist/READMEopenshifttechpreview.md | 341 ----------------- dist/ansible/hosts | 43 --- dist/ansible/ovn-playbook.yaml | 84 ---- dist/ansible/ovn-uninstall.yaml | 75 ---- dist/ansible/run-playbook | 27 -- dist/ansible/scripts/ovn-debug | 53 --- dist/ansible/scripts/ovn-display | 55 --- dist/ansible/scripts/ovn-logs | 55 --- dist/files/ovn-kubernetes-master.service | 15 - dist/files/ovn-kubernetes-master.sh | 16 - dist/files/ovn-kubernetes-node.service | 15 - dist/files/ovn-kubernetes-node.sh | 17 - dist/files/ovn-kubernetes.sysconfig | 15 - dist/images/Makefile | 2 +- dist/images/daemonset.sh | 78 +--- dist/openvswitch-ovn-kubernetes.spec | 145 ------- dist/templates/ovn-setup.yaml.j2 | 6 +- test/integration/README.md | 19 - test/integration/ansible.cfg | 359 ------------------ test/integration/build/openshift.yml | 16 - test/integration/build/ovnkube.yml | 39 -- test/integration/golang.yml | 49 --- test/integration/hosts | 2 - test/integration/main.yml | 28 -- test/integration/openshift-dind-test.yml | 23 -- test/integration/system.yml | 79 ---- test/integration/vars.yml | 7 - vagrant/README.md | 80 ---- vagrant/Vagrantfile | 123 ------ vagrant/ovnkube-rbac.yaml | 46 --- vagrant/provisioning/setup-hostnames.sh | 29 -- vagrant/provisioning/setup-master.sh | 316 --------------- vagrant/provisioning/setup-minion.sh | 185 --------- vagrant/provisioning/vm_config.conf.yml | 21 - 109 files changed, 22 insertions(+), 5957 deletions(-) delete mode 100644 contrib/README.md delete mode 100644 contrib/ansible.cfg delete mode 100644 contrib/inventory/README.md delete mode 100644 contrib/inventory/group_vars/all delete mode 100644 contrib/inventory/group_vars/kube-master delete mode 100644 contrib/inventory/group_vars/kube-minions-linux delete mode 100644 contrib/inventory/group_vars/kube-minions-windows delete mode 100644 contrib/inventory/host_vars/README.md delete mode 100644 contrib/inventory/hosts delete mode 100644 contrib/ovn-kubernetes-cluster.yml delete mode 100644 contrib/roles/linux/common/tasks/golang.yml delete mode 100644 contrib/roles/linux/common/tasks/k8s_client_certs.yml delete mode 100644 contrib/roles/linux/common/tasks/kubectl_client.yml delete mode 100644 contrib/roles/linux/common/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/docker/tasks/main.yml delete mode 100644 contrib/roles/linux/docker/tasks/restart_docker.yml delete mode 100644 contrib/roles/linux/docker/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/deploy_coredns.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/distribute_binaries.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/download_k8s_binaries.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/fetch_k8s_binaries.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/generate_certs_master.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/generate_global_vars.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/init_gateway.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/main.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/prepare_master.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/prepare_minion.yml delete mode 100644 contrib/roles/linux/kubernetes/tasks/set_ip_facts.yml delete mode 100644 contrib/roles/linux/kubernetes/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/openvswitch/tasks/build_install_release_ovs.yml delete mode 100644 contrib/roles/linux/openvswitch/tasks/download_install_packages.yml delete mode 100644 contrib/roles/linux/openvswitch/tasks/main.yml delete mode 100644 contrib/roles/linux/openvswitch/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/ovn-kubernetes/tasks/build_install_bins.yml delete mode 100644 contrib/roles/linux/ovn-kubernetes/tasks/distribute_bins.yml delete mode 100644 contrib/roles/linux/ovn-kubernetes/tasks/fetch_bins.yml delete mode 100644 contrib/roles/linux/ovn-kubernetes/tasks/main.yml delete mode 100644 contrib/roles/linux/ovn-kubernetes/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/validation/tasks/main.yml delete mode 100644 contrib/roles/linux/validation/tasks/validate_service.yml delete mode 100644 contrib/roles/linux/validation/vars/ubuntu.yml delete mode 100644 contrib/roles/linux/version_check/tasks/main.yml delete mode 100644 contrib/roles/windows/docker/tasks/install_docker.yml delete mode 100644 contrib/roles/windows/docker/tasks/main.yml delete mode 100644 contrib/roles/windows/docker/vars/windows.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/create_infracontainer.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/get_ovn_subnet.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/main.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/run_minion_init.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/set_ip_facts.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/setup_ovs_hns.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/setup_sdn.yml delete mode 100644 contrib/roles/windows/kubernetes/tasks/start_kubelet.yml delete mode 100644 contrib/roles/windows/kubernetes/vars/windows.yml delete mode 100644 contrib/roles/windows/openvswitch/tasks/install_custom_ovs.yml delete mode 100644 contrib/roles/windows/openvswitch/tasks/install_ovs.yml delete mode 100644 contrib/roles/windows/openvswitch/tasks/main.yml delete mode 100644 contrib/roles/windows/openvswitch/vars/windows.yml delete mode 100644 contrib/roles/windows/ovn-kubernetes/tasks/distribute_binaries.yml delete mode 100644 contrib/roles/windows/ovn-kubernetes/tasks/main.yml delete mode 100644 contrib/roles/windows/ovn-kubernetes/vars/windows.yml delete mode 100644 contrib/roles/windows/requirements/tasks/fetch_utils.yml delete mode 100644 contrib/roles/windows/requirements/tasks/main.yml delete mode 100644 contrib/roles/windows/requirements/vars/windows.yml delete mode 100644 contrib/roles/windows/validation/tasks/main.yml delete mode 100644 contrib/roles/windows/validation/tasks/validate_service.yml delete mode 100644 contrib/roles/windows/validation/vars/windows.yml delete mode 100644 contrib/roles/windows/version_check/tasks/main.yml delete mode 100644 contrib/roles/windows/version_check/vars/windows.yml delete mode 100644 dist/Makefile delete mode 100644 dist/READMEopenshiftdevpreview.md delete mode 100644 dist/READMEopenshifttechpreview.md delete mode 100644 dist/ansible/hosts delete mode 100644 dist/ansible/ovn-playbook.yaml delete mode 100644 dist/ansible/ovn-uninstall.yaml delete mode 100755 dist/ansible/run-playbook delete mode 100755 dist/ansible/scripts/ovn-debug delete mode 100755 dist/ansible/scripts/ovn-display delete mode 100755 dist/ansible/scripts/ovn-logs delete mode 100644 dist/files/ovn-kubernetes-master.service delete mode 100644 dist/files/ovn-kubernetes-master.sh delete mode 100644 dist/files/ovn-kubernetes-node.service delete mode 100644 dist/files/ovn-kubernetes-node.sh delete mode 100644 dist/files/ovn-kubernetes.sysconfig delete mode 100644 dist/openvswitch-ovn-kubernetes.spec delete mode 100644 test/integration/README.md delete mode 100644 test/integration/ansible.cfg delete mode 100644 test/integration/build/openshift.yml delete mode 100644 test/integration/build/ovnkube.yml delete mode 100644 test/integration/golang.yml delete mode 100644 test/integration/hosts delete mode 100644 test/integration/main.yml delete mode 100644 test/integration/openshift-dind-test.yml delete mode 100644 test/integration/system.yml delete mode 100644 test/integration/vars.yml delete mode 100644 vagrant/README.md delete mode 100644 vagrant/Vagrantfile delete mode 100644 vagrant/ovnkube-rbac.yaml delete mode 100755 vagrant/provisioning/setup-hostnames.sh delete mode 100755 vagrant/provisioning/setup-master.sh delete mode 100755 vagrant/provisioning/setup-minion.sh delete mode 100644 vagrant/provisioning/vm_config.conf.yml diff --git a/.gitignore b/.gitignore index c5d5559e84..dc039cb2e6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,2 @@ -vagrant/pki/ -vagrant/.vagrant/ -vagrant/ubuntu-xenial-16.04-cloudimg-console.log -vagrant/kubeadm.log - -# Ansible specific -contrib/*.retry -contrib/tmp - #IDE (GoLand) specific .idea/ diff --git a/README.md b/README.md index a993365a93..1671b0a3ff 100644 --- a/README.md +++ b/README.md @@ -85,12 +85,3 @@ kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovn NOTE: You don't need kube-proxy for OVN to work. You can delete that from your cluster. - -## Manual installation and Vagrant - -For Windows, (and to understand what daemonsets run internally), please read -[MANUAL.md]. For more advanced use cases too (like SSL, HA of databases, and various -gateway modes supported), please read [MANUAL.md]. - -[INSTALL.rst]: http://docs.openvswitch.org/en/latest/intro/install -[MANUAL.md]: README_MANUAL.md diff --git a/README_MANUAL.md b/README_MANUAL.md index 6636a3a4f1..2b6642cb25 100644 --- a/README_MANUAL.md +++ b/README_MANUAL.md @@ -217,10 +217,6 @@ For more control on the options to ovnkube, please read [config.md] Please read [debugging.md]. -## Vagrant - -There is a vagrant available to bring up a simple cluster at [vagrant]. - ### Overlay mode architecture diagram: The following digaram represents the internal architecture details diff --git a/contrib/README.md b/contrib/README.md deleted file mode 100644 index c508cb22c5..0000000000 --- a/contrib/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Ansible playbooks to deploy ovn-kubernetes - -The ansible playbooks are able to deploy a kubernetes cluster with -Linux and Windows minion nodes. - -## Ansible requirements - -Minimum required ansible version is `2.4.2.0`. The recommended version is `2.7.2`. - -For Linux: Make sure that you are able to SSH into the target nodes without being -asked for the password. You can read more [here](http://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html). - -For Windows: Follow [this guide](https://docs.ansible.com/ansible/devel/user_guide/windows_setup.html) -to setup the node to be used with ansible. - -#### Verifying the setup - -To verify the setup and that ansible has been successfully configured you can run the following: - -``` -ansible -m setup all -``` - -This will connect to the target hosts and will gather host facts. -If the command succeeds and everything is green, you're good to go with running the playbook. - -## How to use - -Make sure to update first the [inventory](/contrib/inventory) with -details about the nodes. - -To start the playbook, please run the following: -``` -ansible-playbook ovn-kubernetes-cluster.yml -``` - -Currently supported Linux nodes: -- Ubuntu 16.04 and 18.04 - -Currently supported Windows nodes: -- Windows Server 2016 build version 1709 (OS Version 10.0.16299.0) -- Windows Server 2016 build version 1803 (OS Version 10.0.17134.0) -- Windows Server 2019 LTSC and build version 1809 (OS Version 10.0.17763.0) - -## Ports that have to be opened on public clouds when using the playbooks - -The following ports need to be opened if we access the cluster machines via the public address. - -#### Kubernetes ports - -- Kubernetes service ports (deployment specific): UDP and TCP `30000 - 32767` -- Kubelet (default port): TCP `10250` -- Kubernetes API: TCP `8080` for HTTP and TCP `443` for HTTPS - -#### Ansible related ports - -- WinRM via HTTPS: TCP `5986` (for HTTP also TCP `5985`) -- SSH: TCP `22` - -### OVN related ports - -- OVN Northbound (NB): TCP `6641` -- OVN Southbound (SB): TCP `6642` - -### OVS related encapsulation ports - -- GENEVE encapsulation (used by default): UDP `6081` -- STT encapsulation (optional encapsulation type, [no special NIC required](https://networkheresy.com/2012/03/04/network-virtualization-encapsulation-and-stateless-tcp-transport-stt/)): TCP `7471` - -### Further useful ports/types - -- Windows RDP Port: 3389 (TCP) -- ICMP: useful for debugging - -## Work in progress - -- Support for hybrid cluster with master/minion nodes on different cloud providers. - -- Different Linux versions support (currently only Ubuntu 16.04 and 18.04 supported) - -### Known issues - -- Windows containers do not support IPv6 at the moment. You can read more [here](https://docs.microsoft.com/en-us/virtualization/windowscontainers/container-networking/architecture#unsupported-features-and-network-options) diff --git a/contrib/ansible.cfg b/contrib/ansible.cfg deleted file mode 100644 index f8fc6cdba7..0000000000 --- a/contrib/ansible.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[defaults] -inventory = inventory diff --git a/contrib/inventory/README.md b/contrib/inventory/README.md deleted file mode 100644 index 8eed8f1630..0000000000 --- a/contrib/inventory/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Ansible inventory - -Make sure to update the [hosts](/contrib/inventory/hosts) file with the nodes details. - -Ensure that [group_vars](/contrib/inventory/group_vars) contain the right details to connect to the target servers. - -For Linux nodes make sure that the username is set up correctly in variable ``ansible_ssh_user`` inside file [kube-master](/contrib/inventory/group_vars/kube-master). -Check the same for [kube-minions-linux](/contrib/inventory/group_vars/kube-minions-linux). - -For Windows nodes, check the login credentials stored in [kube-minions-windows](/contrib/inventory/group_vars/kube-minions-windows). Make sure that ``ansible_user`` and ``ansible_password`` match to your target Windows servers. - -Optional, in case you want to define different ssh users or login credentials for different minion nodes, please look into [host_vars](/contrib/inventory/host_vars). diff --git a/contrib/inventory/group_vars/all b/contrib/inventory/group_vars/all deleted file mode 100644 index 1b3a5dbb88..0000000000 --- a/contrib/inventory/group_vars/all +++ /dev/null @@ -1,23 +0,0 @@ -# the master node will overwrite this value -master: false -# the minion nodes will overwrite this value -minion: false - -# Timeout variable for OVS/OVN commands in seconds -TIMEOUT : 15 - -CLUSTER_SUBNET: "10.0.0.0/16" -MASTER_INTERNAL_IP: "10.0.0.2" # Should always be the second IP of CLUSTER_SUBNET - -SERVICE_CLUSTER_IP_RANGE: "10.1.0.0/24" -K8S_DNS_DOMAIN: "cluster.local" -K8S_DNS_SERVICE_IP: "10.1.0.10" -K8S_API_SERVICE_IP: "10.1.0.1" - -# set this to true to build ovn-kubernetes only on master and then -# distribute on the minion nodes. Set this to false if you have different -# Linux versions -distribute_binaries: true - -# the place where the temporary binaries and files are stored -ansible_tmp_dir: "{{playbook_dir}}/tmp" diff --git a/contrib/inventory/group_vars/kube-master b/contrib/inventory/group_vars/kube-master deleted file mode 100644 index 8a49460701..0000000000 --- a/contrib/inventory/group_vars/kube-master +++ /dev/null @@ -1,7 +0,0 @@ -ansible_ssh_user: ubuntu -docker_bin_dir: /usr/bin - -# this will be the master node -master: true -minion: false -controller: true diff --git a/contrib/inventory/group_vars/kube-minions-linux b/contrib/inventory/group_vars/kube-minions-linux deleted file mode 100644 index 64146098ea..0000000000 --- a/contrib/inventory/group_vars/kube-minions-linux +++ /dev/null @@ -1,6 +0,0 @@ -ansible_ssh_user: ubuntu -docker_bin_dir: /usr/bin - -master: false -minion: true -controller: false diff --git a/contrib/inventory/group_vars/kube-minions-windows b/contrib/inventory/group_vars/kube-minions-windows deleted file mode 100644 index 6f400e1cf7..0000000000 --- a/contrib/inventory/group_vars/kube-minions-windows +++ /dev/null @@ -1,22 +0,0 @@ -# username and password of the nodes (if the nodes have different passwords, please look into host_vars folder) -ansible_user: Administrator -ansible_password: Passw0rd -# This is the default port for HTTPS used by winrm -ansible_port: 5986 -ansible_connection: winrm -# The following is necessary for Python 2.7.9+ (or any older Python that has backported SSLContext, eg, Python 2.7.5 on RHEL7) when using default WinRM self-signed certificates: -ansible_winrm_server_cert_validation: ignore - -master: false -minion: true -controller: false - -docker_version: 18.03.1-ee-3 -# install_path: where kubernetes binaries and CNI plugin will be installed -install_path: C:\kubernetes -install_beta_ovs: true -service_wrapper_link: https://github.com/cloudbase/OpenStackService/releases/download/v0.1/service-wrapper-sdk10-x64.zip - -windows1709: 10.0.16299.0 -windows1803: 10.0.17134.0 -windows2019: 10.0.17763.0 diff --git a/contrib/inventory/host_vars/README.md b/contrib/inventory/host_vars/README.md deleted file mode 100644 index 934eb506bf..0000000000 --- a/contrib/inventory/host_vars/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# host variables - -To assign variables only for some specific groups use this directory. -Create a file here with the node name and then put the vars inside. - -Example: create the file "node1" (is the node name from hosts file) -``` -GCE: true -``` - -This place is the most useful when using different Windows nodes with -different passwords. For example 2 Windows Server nodes can have the -same username and different passwords. - -Just create a file for the nodes that have different password from the -one present in group_vars. Example for "node5": -``` -ansible_user: Administrator -ansible_password: different_password -``` - -If you want a preferred network interface for the SDN setup, use the -following configuration option: -``` -sdn_preferred_nic_name: "Ethernet 2" -``` - -By default, all the Kubernetes minions will be configured as gateway nodes. -If you don't want a particular node to be a gateway, use the following -configuration option: -``` -init_gateway: false -``` diff --git a/contrib/inventory/hosts b/contrib/inventory/hosts deleted file mode 100644 index 7de95b9ff8..0000000000 --- a/contrib/inventory/hosts +++ /dev/null @@ -1,10 +0,0 @@ -[kube-master] -node1 - -[kube-minions-linux] -node2 -node3 - -[kube-minions-windows] -node4 -node5 diff --git a/contrib/ovn-kubernetes-cluster.yml b/contrib/ovn-kubernetes-cluster.yml deleted file mode 100644 index fe7bd854a9..0000000000 --- a/contrib/ovn-kubernetes-cluster.yml +++ /dev/null @@ -1,158 +0,0 @@ ---- -- hosts: localhost - any_errors_fatal: true - tasks: - # This directory is used only for caching files on the Ansible host - # in order to speed up the deployment time on subsequent playbook runs. - - name: ovn-kubernetes | Create local Ansible temporary directory - file: - path: "{{ ansible_tmp_dir }}" - state: directory - -- hosts: kube-master - any_errors_fatal: true - gather_facts: true - become: true - tasks: - - import_role: - name: linux/version_check - - import_role: - name: linux/docker - - import_role: - name: linux/openvswitch - vars: - force_ovs_reinstall: false # clean reinstall OVS if it exists - # make sure to give the link to prebuilt OVS packages in - # roles/linux/openvswitch/vars/ubuntu.yml if this var is set to true - ovs_install_prebuilt_packages: false - - import_role: - name: linux/ovn-kubernetes - - import_role: - name: linux/kubernetes - -- hosts: kube-minions-linux - any_errors_fatal: true - gather_facts: true - become: true - tasks: - - import_role: - name: linux/version_check - - import_role: - name: linux/docker - - import_role: - name: linux/openvswitch - vars: - force_ovs_reinstall: false # clean reinstall OVS if it exists - # make sure to give the link to prebuilt OVS packages in - # roles/linux/openvswitch/vars/ubuntu.yml if this var is set to true - ovs_install_prebuilt_packages: false - - import_role: - name: linux/ovn-kubernetes - - import_role: - name: linux/kubernetes - -# TODO(alinbalutoiu): Move the hosts file update above once ansible fixes -# lineinfile concurrency issues https://github.com/ansible/ansible/issues/30413 -- hosts: kube-minions-linux - any_errors_fatal: true - gather_facts: true - become: true - serial: 1 - tasks: - - include_tasks: roles/linux/kubernetes/tasks/set_ip_facts.yml - - name: Ensure /etc/hosts is updated on kube-master - lineinfile: - path: /etc/hosts - regexp: ' {{ ansible_hostname | lower }}$' - line: '{{ host_public_ip }} {{ ansible_hostname | lower }}' - delegate_to: "{{ item }}" - with_items: "{{ groups['kube-master'] }}" - -- hosts: kube-minions-windows - remote_user: Administrator - gather_facts: true - become_method: runas - any_errors_fatal: true - tasks: - - set_fact: - windows_container_tag: 1709 - when: ansible_kernel == windows1709 - - set_fact: - windows_container_tag: 1803 - when: ansible_kernel == windows1803 - - set_fact: - windows_container_tag: 1809 - when: ansible_kernel == windows2019 - - import_role: - name: windows/version_check - - import_role: - name: windows/requirements - - import_role: - name: windows/docker - - import_role: - name: windows/openvswitch - vars: - # This is useful when using custom OVS MSI, it should also provide the link - # to the certificates which need to be added in certstore in order to be able - # to install the MSI in unattended mode - # Setting install_beta_ovs to false will install the latest stable OVS - install_custom_ovs: true - custom_ovs_link: "https://cloudbase.it/downloads/openvswitch-hyperv-installer-beta-2.10.msi" - # This is useful for dev purposes and when using custom MSI which is not signed - bcdedit_needed: false - # ovs_certs_link: "" # link to certificates for OVS if required - - import_role: - name: windows/ovn-kubernetes - - import_role: - name: windows/kubernetes - -# TODO(alinbalutoiu): Move the hosts file update above once ansible fixes -# lineinfile concurrency issues https://github.com/ansible/ansible/issues/30413 -- hosts: kube-minions-windows - remote_user: Administrator - gather_facts: true - become_method: runas - any_errors_fatal: true - serial: 1 - tasks: - - include_vars: roles/windows/kubernetes/vars/{{ ansible_os_family|lower }}.yml - - include_tasks: roles/windows/kubernetes/tasks/set_ip_facts.yml - - name: Ensure /etc/hosts is updated on kube-master - become: true - become_method: sudo - lineinfile: - path: /etc/hosts - regexp: ' {{ ansible_hostname | lower }}$' - line: '{{ host_public_ip }} {{ ansible_hostname | lower }}' - delegate_to: "{{ item }}" - with_items: "{{ groups['kube-master'] }}" - -# TODO(ionutbalutoiu): -# Remove below workaround when this is fixed: https://github.com/ovn-org/ovn-kubernetes/issues/531 -# Right now, the CoreDNS pods will never reach "running" state. We need to kill the existing pods and -# new ones will be successfully created. -- hosts: kube-master - any_errors_fatal: true - gather_facts: true - become: true - tasks: - - name: Kill the kube-dns pods - run_once: true - shell: kubectl delete pod --force --grace-period=0 --namespace kube-system --selector k8s-app=kube-dns - -- hosts: kube-master:kube-minions-linux - any_errors_fatal: true - gather_facts: true - become: true - tasks: - - import_role: - name: linux/validation - -- hosts: kube-minions-windows - remote_user: Administrator - gather_facts: true - become_method: runas - any_errors_fatal: true - tasks: - - import_role: - name: windows/validation diff --git a/contrib/roles/linux/common/tasks/golang.yml b/contrib/roles/linux/common/tasks/golang.yml deleted file mode 100644 index c9a97d009d..0000000000 --- a/contrib/roles/linux/common/tasks/golang.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: golang | include vars - include_vars: "{{ ansible_distribution | lower }}.yml" - -- name: golang | Detecting golang architecture - set_fact: - golang_arch: "amd64" - when: ansible_architecture == "x86_64" - -- name: golang | Detecting golang architecture - set_fact: - golang_arch: "386" - when: ansible_architecture != "x86_64" - -- name: golang | Assume golang needs installation - set_fact: - install_golang: true - -- name: golang | Get the golang bin file stat - stat: - path: /usr/local/bin/go - register: golang_bin - -- name: golang | Get the golang version (if golang is installed) - block: - - name: golang | Get the golang version - shell: /usr/local/bin/go version - register: cmd_output - - - name: golang | Do not require golang installation if already installed with the proper version - set_fact: - install_golang: false - when: (cmd_output.stdout | trim) == ("go version go" ~ golang.version ~ " linux/" ~ golang_arch) - when: golang_bin.stat.exists - -- name: golang | Install golang {{ golang.version }} - block: - - name: golang | get go{{ golang.version }}.{{ansible_system | lower}}-{{golang_arch}}.tar.gz - get_url: - url: "https://dl.google.com/go/go{{ golang.version }}.{{ ansible_system | lower }}-{{ golang_arch }}.tar.gz" - dest: "/tmp/go{{ golang.version }}.tar.gz" - force_basic_auth: yes - mode: 0755 - timeout: 30 - retries: 3 - - - name: golang | Install golang - unarchive: - src: "/tmp/go{{ golang.version }}.tar.gz" - dest: "/usr/local/" - remote_src: yes - - - name: golang | Create symlink in /usr/local/bin - file: - state: link - src: /usr/local/go/bin/go - dest: /usr/local/bin/go - when: install_golang diff --git a/contrib/roles/linux/common/tasks/k8s_client_certs.yml b/contrib/roles/linux/common/tasks/k8s_client_certs.yml deleted file mode 100644 index fcd8ef1d3f..0000000000 --- a/contrib/roles/linux/common/tasks/k8s_client_certs.yml +++ /dev/null @@ -1,127 +0,0 @@ ---- -- name: Kubectl | Fail if playbook is executed on K8s master - fail: - msg: "The k8s_client_certs.yml playbook is not meant to run on K8s master" - when: master - -- name: K8s Client Certificates | Include vars - include_vars: "{{ ansible_distribution | lower }}.yml" - -- name: K8s Client Certificates | Include global vars - include_vars: "{{ ansible_tmp_dir }}/generated_global_vars.yml" - -- name: K8s Client Certificates | Assume client certs are not missing - set_fact: - k8s_client_certs_missing: false - -- name: K8s Client Certificates | Verify if all the client certs are present - block: - - name: K8s Client Certificates | Get the client cert file stat - stat: - path: "{{ k8s_client_certs.directory }}/node.pem" - register: client_cert - - - name: K8s Client Certificates | Get the client cert key file stat - stat: - path: "{{ k8s_client_certs.directory }}/node-key.pem" - register: client_cert_key - - - name: K8s Client Certificates | Set the "k8s_client_certs_missing" flag if either cert or cert key is missing - set_fact: - k8s_client_certs_missing: true - when: not client_cert.stat.exists or not client_cert_key.stat.exists - -- name: K8s Client Certificates | Generate client certificates - block: - - name: K8s Client Certificates | Create k8s certs directory - file: - path: "{{ k8s_client_certs.directory }}" - state: directory - mode: "u=rwx,g=rx,o=rx" - - - name: K8s Client Certificates | Copy the CA cert and key - copy: - src: "{{ansible_tmp_dir}}/k8s_{{item}}" - dest: "{{ k8s_client_certs.directory }}/{{item}}" - with_items: - - ca.pem - - ca-key.pem - - - name: K8s Client Certificates | Remove the tmp certs directory if exists - file: - path: "{{ k8s_client_certs.tmp_generate_path }}" - state: absent - - - name: K8s Client Certificates | Create the tmp certs directory - file: - path: "{{ k8s_client_certs.tmp_generate_path }}" - state: directory - mode: "u=rwx,g=rx,o=rx" - - - name: K8s Client Certificates | Create the make-certs bash script - lineinfile: - path: "{{ k8s_client_certs.tmp_generate_path }}/make-certs" - mode: "u=rwx,g=rx,o=rx" - create: yes - line: | - #!/bin/bash - - - set -o errexit - set -o nounset - set -o pipefail - - cert_group=kube-cert - cert_dir="{{ k8s_client_certs.directory }}" - - pem_ca=$cert_dir/ca.pem - pem_ca_key=$cert_dir/ca-key.pem - - pem_node=$cert_dir/node.pem - pem_node_key=$cert_dir/node-key.pem - pem_node_csr=$cert_dir/node-csr.pem - - # Make sure cert group exists - [ $(getent group $cert_group) ] || groupadd -r $cert_group - - # Make sure perms are right - chgrp $cert_group $pem_ca $pem_ca_key - chmod 600 $pem_ca_key - chmod 660 $pem_ca - - # Generate TLS artifacts - openssl genrsa -out $pem_node_key 2048 - openssl req -new -key $pem_node_key -out $pem_node_csr -subj "/CN={{ ansible_hostname }}" -config openssl.cnf - openssl x509 -req -in $pem_node_csr -CA $pem_ca -CAkey $pem_ca_key -CAcreateserial -out $pem_node -days 365 -extensions v3_req -extfile openssl.cnf - - # Make server certs accessible to apiserver. - chgrp $cert_group $pem_node $pem_node_key - chmod 600 $pem_node_key - chmod 660 $pem_node $pem_ca - - - name: K8s Client Certificates | Create the openssl conf for certs generation - lineinfile: - path: "{{ k8s_client_certs.tmp_generate_path }}/openssl.cnf" - create: yes - line: | - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = nonRepudiation, digitalSignature, keyEncipherment - subjectAltName = @alt_names - [alt_names] - IP.1 = {{ ansible_default_ipv4.address }} - - - name: K8s Client Certificates | Generate k8s client certificates for the current machine - shell: ./make-certs - args: - executable: /bin/bash - chdir: "{{ k8s_client_certs.tmp_generate_path }}" - - - name: K8s Client Certificates | Remove tmp dir for certs generation - file: - path: "{{ k8s_client_certs.tmp_generate_path }}" - state: absent - when: k8s_client_certs_missing or certs_generated diff --git a/contrib/roles/linux/common/tasks/kubectl_client.yml b/contrib/roles/linux/common/tasks/kubectl_client.yml deleted file mode 100644 index b25ecbd3c0..0000000000 --- a/contrib/roles/linux/common/tasks/kubectl_client.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: Kubectl | Fail if playbook is executed on K8s master - fail: - msg: "The kubectl_client.yml playbook is not meant to run on K8s master" - when: master - -- name: Kubectl | Generate K8s client certs for kubectl - import_tasks: ./k8s_client_certs.yml - -- name: Kubectl | Include global vars - include_vars: "{{ ansible_tmp_dir }}/generated_global_vars.yml" - -- name: Kubectl | Fail if MASTER_IP global var is not set - fail: - msg: "The global config MASTER_IP is not set" - when: MASTER_IP is not defined - -- name: Kubectl | Get the Kubectl file stat - stat: - path: /usr/bin/kubectl - register: kubectl_bin - -- name: Kubectl | Setup kubectl binary - block: - - name: Kubectl | Get the tmp Kubectl file stat - stat: - path: "{{ ansible_tmp_dir }}/kubectl" - register: tmp_kubectl_bin - - - name: Kubectl | Copy the binary from the Ansible machine - copy: - src: "{{ ansible_tmp_dir }}/kubectl" - dest: /usr/bin/kubectl - owner: root - group: root - mode: "u=rwx,g=rx,o=rx" - when: not kubectl_bin.stat.exists - -- name: Kubectl | Setting kubectl context - shell: | - set -o errexit - kubectl config set-cluster default-cluster --server=https://{{ MASTER_IP }} --certificate-authority=/etc/kubernetes/tls/ca.pem - kubectl config set-credentials default-admin --certificate-authority=/etc/kubernetes/tls/ca.pem --client-key=/etc/kubernetes/tls/node-key.pem --client-certificate=/etc/kubernetes/tls/node.pem - kubectl config set-context local --cluster=default-cluster --user=default-admin - kubectl config use-context local diff --git a/contrib/roles/linux/common/vars/ubuntu.yml b/contrib/roles/linux/common/vars/ubuntu.yml deleted file mode 100644 index 201649c6c4..0000000000 --- a/contrib/roles/linux/common/vars/ubuntu.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -golang: - version: "{{ golang_version | default('1.11.4') }}" - -k8s_client_certs: - tmp_generate_path: /tmp/k8s_certs - directory: /etc/kubernetes/tls diff --git a/contrib/roles/linux/docker/tasks/main.yml b/contrib/roles/linux/docker/tasks/main.yml deleted file mode 100644 index a11316b5e9..0000000000 --- a/contrib/roles/linux/docker/tasks/main.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Docker | include global vars for minions - include_vars: "{{ansible_tmp_dir}}/generated_global_vars.yml" - when: not master - -- name: Docker | include vars - include_vars: "{{ ansible_distribution|lower }}.yml" - -- name: Docker | ensure docker repository public key is installed - action: "{{ docker_repo_key_info.pkg_key }}" - args: - id: "{{item}}" - url: "{{docker_repo_key_info.url}}" - state: present - register: keyserver_task_result - until: keyserver_task_result|succeeded - retries: 3 - with_items: "{{ docker_repo_key_info.repo_keys }}" - -- name: Docker | ensure docker repository is enabled - action: "{{ docker_repo_info.pkg_repo }}" - args: - repo: "{{item}}" - state: present - with_items: "{{ docker_repo_info.repos }}" - -- name: Docker | ensure docker packages are installed - action: "{{ docker_package_info.pkg_mgr }}" - args: - pkg: "{{item.name}}" - force: "{{item.force|default(omit)}}" - state: present - register: docker_task_result - until: docker_task_result|succeeded - retries: 3 - with_items: "{{ docker_package_info.pkgs }}" - -- name: Docker | restart docker - include_tasks: ./restart_docker.yml - when: docker_task_result.changed diff --git a/contrib/roles/linux/docker/tasks/restart_docker.yml b/contrib/roles/linux/docker/tasks/restart_docker.yml deleted file mode 100644 index bcf48bd0df..0000000000 --- a/contrib/roles/linux/docker/tasks/restart_docker.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Docker | reload systemd - shell: systemctl daemon-reload - -- name: Docker | reload docker.socket - service: - name: docker.socket - state: restarted - -- name: Docker | reload docker - service: - name: docker - state: restarted - -- name: Docker | pause while Docker restarts - pause: - seconds: 10 - prompt: "Waiting for docker restart" - -- name: Docker | wait for docker - command: "{{ docker_bin_dir }}/docker images" - register: docker_ready - retries: 10 - delay: 5 - until: docker_ready.rc == 0 diff --git a/contrib/roles/linux/docker/vars/ubuntu.yml b/contrib/roles/linux/docker/vars/ubuntu.yml deleted file mode 100644 index 30b3650203..0000000000 --- a/contrib/roles/linux/docker/vars/ubuntu.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -docker_package_info: - pkg_mgr: apt - pkgs: - - name: "docker-ce" - force: yes - -docker_repo_key_info: - pkg_key: apt_key - url: https://download.docker.com/linux/ubuntu/gpg - repo_keys: - - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 - -docker_repo_info: - pkg_repo: apt_repository - repos: - - > - deb https://download.docker.com/linux/{{ ansible_distribution|lower }} - {{ ansible_distribution_release|lower }} - stable diff --git a/contrib/roles/linux/kubernetes/tasks/deploy_coredns.yml b/contrib/roles/linux/kubernetes/tasks/deploy_coredns.yml deleted file mode 100644 index c1a2bb8675..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/deploy_coredns.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: CoreDNS | check if CoreDNS is cloned - stat: - path: "{{ kubernetes_dns.tmp_path }}/coredns-checkout" - register: coredns_repo - -- name: CoreDNS | git clone - git: - repo: "{{ kubernetes_dns.coredns_git_url }}" - dest: "{{ kubernetes_dns.tmp_path }}/coredns-checkout" - version: "{{ kubernetes_dns.branch }}" - when: not coredns_repo.stat.exists - -- name: CoreDNS | generate yaml and apply it - shell: | - set -o errexit - cd {{ kubernetes_dns.tmp_path }}/coredns-checkout/kubernetes - kubectl delete --namespace=kube-system deployment coredns --wait --ignore-not-found=true - ./deploy.sh -i {{K8S_DNS_SERVICE_IP}} -d {{K8S_DNS_DOMAIN}} -s coredns.yaml.sed | kubectl apply --wait -f - diff --git a/contrib/roles/linux/kubernetes/tasks/distribute_binaries.yml b/contrib/roles/linux/kubernetes/tasks/distribute_binaries.yml deleted file mode 100644 index 03cafb988e..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/distribute_binaries.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Kubernetes bins | distribute Linux minion binaries - copy: - src: "{{ansible_tmp_dir}}/{{item}}" - dest: "{{ kubernetes_binaries_info.install_path }}/{{item}}" - owner: root - group: root - mode: 0755 - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_minion}}" - when: minion - -- name: Kubernetes bins | distribute Linux master binaries - copy: - src: "{{ansible_tmp_dir}}/{{item}}" - dest: "{{ kubernetes_binaries_info.install_path }}/{{item}}" - owner: root - group: root - mode: 0755 - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_master}}" - when: master diff --git a/contrib/roles/linux/kubernetes/tasks/download_k8s_binaries.yml b/contrib/roles/linux/kubernetes/tasks/download_k8s_binaries.yml deleted file mode 100644 index 35ead79752..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/download_k8s_binaries.yml +++ /dev/null @@ -1,98 +0,0 @@ ---- -- name: Kubernetes bins | expecting all bins to be downloaded already - set_fact: - linux_kube_bins_missing: false - windows_kube_bins_missing: false - -- name: Kubernetes bins | checking if linux kubernetes bins have been downloaded - action: stat path="{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/server/kubernetes/server/bin/{{item}}" - register: linux_kube_stat_bins_exists - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_master}}" - - "{{kubernetes_binaries.linux_minion}}" - -- name: Kubernetes bins | checking linux bins - set_fact: - linux_kube_bins_missing: true - with_items: - - "{{linux_kube_stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- name: Kubernetes bins | Download linux bins - block: - - name: Kubernetes bins | create temp directory - file: - path: "{{ kubernetes_binaries_info.tmp_download_path }}" - state: directory - mode: 0755 - - - name: Kubernetes bins | get kubernetes.tar.gz archive - get_url: - url: "{{ kubernetes_binaries_info.kubernetes_targz_link }}" - dest: "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes.tar.gz" - force_basic_auth: yes - timeout: 30 - retries: 3 - - - name: Kubernetes bins | unarchive tar.gz - unarchive: - src: "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes.tar.gz" - dest: "{{ kubernetes_binaries_info.tmp_download_path }}" - remote_src: yes - - - name: Kubernetes bins | download kubernetes binaries - action: shell yes | "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/cluster/get-kube-binaries.sh" - - - name: Kubernetes bins | unarchive binaries - unarchive: - src: "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/server/kubernetes-server-linux-amd64.tar.gz" - dest: "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/server" - remote_src: yes - - - name: Kubernetes bins | install Linux master binaries - copy: - src: "{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/server/kubernetes/server/bin/{{item}}" - dest: "{{ kubernetes_binaries_info.install_path }}/{{item}}" - owner: root - group: root - mode: 0755 - remote_src: yes - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_master}}" - when: linux_kube_bins_missing - -- name: Kubernetes bins | checking if windows kubernetes bins have been downloaded - action: stat path="{{ kubernetes_binaries_info.tmp_download_path }}/kubernetes/node/bin/{{item}}" - register: windows_kube_stat_bins_exists - with_items: - - "{{kubernetes_binaries.windows}}" - -- name: Kubernetes bins | checking windows bins - set_fact: - windows_kube_bins_missing: true - with_items: - - "{{windows_kube_stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- name: Kubernetes bins | Download Windows bins - block: - - name: Kubernetes bins | downloading Windows binaries - get_url: - url: "{{ kubernetes_binaries_info.windows_bins_link }}" - dest: "{{ kubernetes_binaries_info.tmp_download_path }}/winbins.tar.gz" - force_basic_auth: yes - timeout: 30 - retries: 3 - - - name: Kubernetes bins | unarchive Windows binaries - unarchive: - src: "{{ kubernetes_binaries_info.tmp_download_path }}/winbins.tar.gz" - dest: "{{ kubernetes_binaries_info.tmp_download_path }}/" - remote_src: yes - when: windows_kube_bins_missing diff --git a/contrib/roles/linux/kubernetes/tasks/fetch_k8s_binaries.yml b/contrib/roles/linux/kubernetes/tasks/fetch_k8s_binaries.yml deleted file mode 100644 index a563e521bd..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/fetch_k8s_binaries.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Kubernetes bins | fetch Linux binaries to the ansible host - synchronize: - mode: pull - src: "{{kubernetes_binaries_info.tmp_download_path}}/kubernetes/server/kubernetes/server/bin/{{item}}" - dest: "{{ansible_tmp_dir}}/{{item}}" - use_ssh_args: yes - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_master}}" - - "{{kubernetes_binaries.linux_minion}}" - -- name: Kubernetes bins | fetch Windows binaries to the ansible host - synchronize: - mode: pull - src: "{{kubernetes_binaries_info.tmp_download_path}}/kubernetes/node/bin/{{item}}" - dest: "{{ansible_tmp_dir}}/{{item}}" - use_ssh_args: yes - with_items: - - "{{kubernetes_binaries.windows}}" diff --git a/contrib/roles/linux/kubernetes/tasks/generate_certs_master.yml b/contrib/roles/linux/kubernetes/tasks/generate_certs_master.yml deleted file mode 100644 index 0e6e413634..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/generate_certs_master.yml +++ /dev/null @@ -1,98 +0,0 @@ ---- -- name: Kubernetes Certs | create temp folder - file: - path: "{{ kubernetes_certificates.tmp_generate_path }}" - state: directory - mode: 0755 - -- name: Kubernetes Certs | create make-certs - lineinfile: - path: "{{ kubernetes_certificates.tmp_generate_path }}/make-certs" - create: yes - mode: 0755 - line: | - #!/bin/bash - - - set -o errexit - set -o nounset - set -o pipefail - - cert_group=kube-cert - cert_dir={{ kubernetes_certificates.directory }} - - pem_ca=$cert_dir/ca.pem - pem_ca_key=$cert_dir/ca-key.pem - pem_server=$cert_dir/apiserver.pem - pem_server_key=$cert_dir/apiserver-key.pem - pem_server_csr=$cert_dir/apiserver-csr.pem - - pem_admin=$cert_dir/admin.pem - pem_admin_key=$cert_dir/admin-key.pem - pem_admin_csr=$cert_dir/admin-csr.pem - - # Make sure cert group exists - [ $(getent group $cert_group) ] || groupadd -r $cert_group - - # Generate TLS artifacts - rm -rf $cert_dir - mkdir -p $cert_dir - - openssl genrsa -out $pem_ca_key 2048 - openssl req -x509 -new -nodes -key $pem_ca_key -days 10000 -out $pem_ca -subj "/CN=kube-ca" - - openssl genrsa -out $pem_server_key 2048 - openssl req -new -key $pem_server_key -out $pem_server_csr -subj "/CN={{ansible_hostname}}" -config openssl.cnf - openssl x509 -req -in $pem_server_csr -CA $pem_ca -CAkey $pem_ca_key -CAcreateserial -out $pem_server -days 365 -extensions v3_req -extfile openssl.cnf - - # Make server certs accessible to apiserver. - chgrp $cert_group $pem_ca $pem_ca_key $pem_server $pem_server_key - chmod 600 $pem_ca_key $pem_server_key - chmod 660 $pem_ca $pem_server - - # Generate admin - openssl genrsa -out $pem_admin_key 2048 - openssl req -new -key $pem_admin_key -out $pem_admin_csr -subj "/CN=kube-admin" - openssl x509 -req -in $pem_admin_csr -CA $pem_ca -CAkey $pem_ca_key -CAcreateserial -out $pem_admin -days 365 - -- name: Kubernetes Certs | create openssl.cnf - lineinfile: - path: "{{ kubernetes_certificates.tmp_generate_path }}/openssl.cnf" - create: yes - line: | - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = nonRepudiation, digitalSignature, keyEncipherment - subjectAltName = @alt_names - [alt_names] - DNS.1 = kubernetes - DNS.2 = kubernetes.default - DNS.3 = kubernetes.default.svc - DNS.4 = kubernetes.default.svc.{{ kubernetes_cluster_info.K8S_DNS_DOMAIN }} - IP.1 = {{ kubernetes_cluster_info.K8S_API_SERVICE_IP }} - IP.2 = {{ host_internal_ip }} - -- name: Kubernetes Certs | remove existing kubernetes certs - file: - path: /etc/kubernetes - state: absent - -- name: Kubernetes Certs | create kubernetes certs folder - file: - path: /etc/kubernetes - state: directory - mode: 0755 - -- name: Kubernetes Certs | Generating Certificates - shell: | - set -o errexit - cd {{ kubernetes_certificates.tmp_generate_path }} - ./make-certs - -- name: Kubernetes Certs | Removing temp directory - file: - path: "{{ kubernetes_certificates.tmp_generate_path }}" - state: absent diff --git a/contrib/roles/linux/kubernetes/tasks/generate_global_vars.yml b/contrib/roles/linux/kubernetes/tasks/generate_global_vars.yml deleted file mode 100644 index f9639563c3..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/generate_global_vars.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Kubernetes global vars | Fetch Token - shell: kubectl describe secret $(kubectl get secrets | grep default | cut -f1 -d ' ') | grep -E '^token' | cut -f2 -d':' | tr -d '\t' - register: fetch_token - changed_when: false - -- name: Kubernetes global vars | Register Token - set_fact: - TOKEN: "{{fetch_token.stdout | trim}}" - -- name: Kubernetes global vars | Checking Token - fail: - msg: Could not fetch Token, make sure kubectl can run without any errors on the node {{ansible_hostname}} - when: TOKEN == "" - -- name: Kubernetes global vars | Fetch global variables - blockinfile: - path: "/tmp/generated_global_vars.yml" - create: yes - block: | - --- - MASTER_IP: {{host_public_ip}} - MASTER_INTERNAL_IP_OVN: {{MASTER_INTERNAL_IP}} - MASTER_INTERNAL_IP: {{host_internal_ip}} - TOKEN: {{TOKEN}} - CLUSTER_SUBNET: {{CLUSTER_SUBNET}} - SERVICE_CLUSTER_IP_RANGE: {{SERVICE_CLUSTER_IP_RANGE}} - K8S_DNS_DOMAIN: {{K8S_DNS_DOMAIN}} - K8S_DNS_SERVICE_IP: {{K8S_DNS_SERVICE_IP}} - K8S_API_SERVICE_IP: {{K8S_API_SERVICE_IP}} - certs_generated: {{certs_generated}} - changed_when: false - -- name: Kubernetes global vars | Fetch vars - synchronize: - mode: pull - src: /tmp/generated_global_vars.yml - dest: "{{ansible_tmp_dir}}/generated_global_vars.yml" - use_ssh_args: yes diff --git a/contrib/roles/linux/kubernetes/tasks/init_gateway.yml b/contrib/roles/linux/kubernetes/tasks/init_gateway.yml deleted file mode 100644 index bddb1f0e8d..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/init_gateway.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# The following makes sure that the second time we enter init_gateway we actually -# fetch the physical interface name and not the bridge name -# TODO: interfaces that start with 'br' are not supported -- name: OVN Gateway | Physical interface - set_fact: - physical_interface_name: "{{ interface_name }}" - when: interface_name[0:2] != "br" - -- name: OVN Gateway | Physical interface - set_fact: - physical_interface_name: "{{ interface_name[2:] }}" - when: interface_name[0:2] == "br" - -- name: OVN Gateway | Create service OVN Kube Gateway - blockinfile: - path: /etc/systemd/system/ovn-kubernetes-node.service - create: yes - block: | - [Unit] - Description=OVN Gateway Helper - Documentation=https://github.com/openvswitch/ovn-kubernetes - [Service] - ExecStart=/usr/bin/ovnkube \ - -init-node "{{ ansible_hostname }}" \ - -cluster-subnets "{{ kubernetes_cluster_info.CLUSTER_SUBNET }}" \ - -k8s-token {{TOKEN}} \ - -k8s-cacert /etc/openvswitch/k8s-ca.crt \ - -k8s-apiserver "http://{{ kubernetes_cluster_info.MASTER_IP }}:8080" \ - -k8s-service-cidr "{{ kubernetes_cluster_info.SERVICE_CLUSTER_IP_RANGE }}" \ - -nodeport \ - -init-gateways \ - -gateway-interface={{physical_interface_name}} \ - -gateway-nexthop="{{gateway_next_hop}}" \ - -nb-address tcp://{{ kubernetes_cluster_info.MASTER_IP }}:6641 \ - -sb-address tcp://{{ kubernetes_cluster_info.MASTER_IP }}:6642 - Restart=on-failure - RestartSec=10 - WorkingDirectory=/root/ - [Install] - WantedBy=multi-user.target - -- name: OVN Gateway | start OVN Kube Gateway Helper - service: - name: "ovn-kubernetes-node" - enabled: yes - state: restarted - daemon_reload: yes - changed_when: false diff --git a/contrib/roles/linux/kubernetes/tasks/main.yml b/contrib/roles/linux/kubernetes/tasks/main.yml deleted file mode 100644 index eaffbb3523..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/main.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -- name: Kubernetes | include global vars for minions - include_vars: "{{ansible_tmp_dir}}/generated_global_vars.yml" - when: not master - -- name: Kubernetes | include vars - include_vars: "{{ ansible_distribution|lower }}.yml" - -- name: Kubernetes | fetch network details - include_tasks: set_ip_facts.yml - -- debug: - msg: "{{ansible_hostname}}" - -- name: Kubernetes | Expecting all binaries to be already present - set_fact: - binaries_missing: false - -- name: Kubernetes | Checking if binaries are already present on ansible machine - local_action: stat path="{{ansible_tmp_dir}}/{{item}}" - register: stat_bins_exists - with_items: - - "{{kubernetes_binaries.linux_common}}" - - "{{kubernetes_binaries.linux_master}}" - - "{{kubernetes_binaries.linux_minion}}" - - "{{kubernetes_binaries.windows}}" - -- name: Kubernetes | Checking all binaries - set_fact: - binaries_missing: true - with_items: - - "{{stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- debug: - msg: "Binaries are missing: {{binaries_missing}}" - -- name: Kubernetes | Get Kubernetes binaries - include_tasks: ./download_k8s_binaries.yml - when: binaries_missing - -- name: Kubernetes | Fetch Kubernetes binaries to the ansible host - include_tasks: ./fetch_k8s_binaries.yml - when: binaries_missing - -- name: Kubernetes | Distribute binaries - include_tasks: ./distribute_binaries.yml - -- name: Kubernetes | Prepare master - include_tasks: ./prepare_master.yml - when: master - -- name: Kubernetes | Prepare minion - include_tasks: ./prepare_minion.yml - when: minion - -- name: Kubernetes | Deploy CoreDNS - include_tasks: ./deploy_coredns.yml - when: master - -- include_tasks: generate_global_vars.yml - when: master diff --git a/contrib/roles/linux/kubernetes/tasks/prepare_master.yml b/contrib/roles/linux/kubernetes/tasks/prepare_master.yml deleted file mode 100644 index 9ef925495e..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/prepare_master.yml +++ /dev/null @@ -1,346 +0,0 @@ ---- -- name: Kubernetes Master | register kube-apiserver - blockinfile: - path: /etc/systemd/system/kube-apiserver.service - create: yes - block: | - [Unit] - Description=Kubernetes API Server - Documentation=https://github.com/GoogleCloudPlatform/kubernetes - [Service] - ExecStart=/usr/bin/kube-apiserver \ - --bind-address=0.0.0.0 \ - --service-cluster-ip-range={{ kubernetes_cluster_info.SERVICE_CLUSTER_IP_RANGE }} \ - --address=0.0.0.0 \ - --etcd-servers=http://127.0.0.1:2379 \ - --v=2 \ - --insecure-bind-address={{ host_public_ip }} \ - --allow-privileged=true \ - --anonymous-auth=false \ - --secure-port=443 \ - --advertise-address={{ host_internal_ip }} \ - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ - --tls-cert-file={{ kubernetes_certificates.directory }}/apiserver.pem \ - --tls-private-key-file={{ kubernetes_certificates.directory }}/apiserver-key.pem \ - --client-ca-file={{ kubernetes_certificates.directory }}/ca.pem \ - --service-account-key-file={{ kubernetes_certificates.directory }}/apiserver-key.pem \ - --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true,batch/v2alpha1=true - Restart=on-failure - RestartSec=10 - WorkingDirectory=/root/ - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Master | register kube-controller-manager - blockinfile: - path: /etc/systemd/system/kube-controller-manager.service - create: yes - block: | - [Unit] - Description=Kubernetes Controller Manager - Documentation=https://github.com/GoogleCloudPlatform/kubernetes - [Service] - ExecStart=/usr/bin/kube-controller-manager \ - --master={{ host_public_ip }}:8080 \ - --v=2 \ - --cluster-cidr={{ kubernetes_cluster_info.CLUSTER_SUBNET }} \ - --service-account-private-key-file={{ kubernetes_certificates.directory }}/apiserver-key.pem \ - --root-ca-file={{ kubernetes_certificates.directory }}/ca.pem \ - --cluster-signing-cert-file={{ kubernetes_certificates.directory }}/ca.pem \ - --cluster-signing-key-file={{ kubernetes_certificates.directory }}/ca-key.pem - Restart=on-failure - RestartSec=10 - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Master | register kube-scheduler - blockinfile: - path: /etc/systemd/system/kube-scheduler.service - create: yes - block: | - [Unit] - Description=Kubernetes Scheduler - Documentation=https://github.com/GoogleCloudPlatform/kubernetes - [Service] - ExecStart=/usr/bin/kube-scheduler \ - --master={{ host_public_ip }}:8080 \ - --v=2 \ - --leader-elect=true - Restart=on-failure - RestartSec=10 - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Master | register etcd3 - blockinfile: - path: /etc/systemd/system/etcd3.service - create: yes - block: | - [Unit] - Description=Etcd store - Documentation=https://github.com/coreos/etcd - [Service] - ExecStartPre=-/usr/bin/docker rm -f etcd - ExecStart=/usr/bin/docker run --name etcd \ - --net=host \ - --volume /var/etcd:/var/etcd \ - quay.io/coreos/etcd:v{{ kubernetes_cluster_info.ETCD_VERSION }} \ - /usr/local/bin/etcd \ - --data-dir /var/etcd/data - Restart=on-failure - RestartSec=10 - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Master | Assume that the certs were not generated now - set_fact: - certs_generated: false - -- name: Kubernetes Master | Check if all the certs are present - block: - - stat: - path: "{{ kubernetes_certificates.directory }}/ca.pem" - register: ca_cert - - - stat: - path: "{{ kubernetes_certificates.directory }}/ca-key.pem" - register: ca_cert_key - - - stat: - path: "{{ kubernetes_certificates.directory }}/apiserver.pem" - register: apiserver_cert - - - stat: - path: "{{ kubernetes_certificates.directory }}/apiserver-key.pem" - register: apiserver_cert_key - - - stat: - path: "{{ kubernetes_certificates.directory }}/admin.pem" - register: admin_cert - - - stat: - path: "{{ kubernetes_certificates.directory }}/admin-key.pem" - register: admin_cert_key - -- name: Kubernetes Master | Generate certs if needed - block: - - include_tasks: ./generate_certs_master.yml - - - set_fact: - certs_generated: true - when: (not ca_cert.stat.exists or - not ca_cert_key.stat.exists or - not apiserver_cert.stat.exists or - not apiserver_cert_key.stat.exists or - not admin_cert.stat.exists or - not admin_cert_key.stat.exists) - -- name: Kubernetes Master | Fetching certificates on the ansible host - synchronize: - mode: pull - src: "{{ kubernetes_certificates.directory }}/{{ item }}" - dest: "{{ ansible_tmp_dir }}/k8s_{{ item }}" - use_ssh_args: yes - with_items: - - ca.pem - - ca-key.pem - -- name: Kubernetes Master | restart all services - service: - name: "{{item}}" - enabled: yes - state: restarted - daemon_reload: yes - with_items: - - etcd3 - - kube-apiserver - - kube-controller-manager - - kube-scheduler - changed_when: false - -- name: Kubernetes Master | Setting kubectl context - shell: | - set -o errexit - kubectl config set-cluster default-cluster --server=https://{{ host_public_ip }} --certificate-authority={{ kubernetes_certificates.directory }}/ca.pem - kubectl config set-credentials default-admin --certificate-authority={{ kubernetes_certificates.directory }}/ca.pem --client-key={{ kubernetes_certificates.directory }}/admin-key.pem --client-certificate={{ kubernetes_certificates.directory }}/admin.pem - kubectl config set-context local --cluster=default-cluster --user=default-admin - kubectl config use-context local - changed_when: false - -- name: Kubernetes Master | Waiting for kube-apiserver to start - block: - - name: Kubernetes Master | Running kubectl version - shell: kubectl version - changed_when: false - register: kubeapiserver_ready - until: kubeapiserver_ready.rc == 0 - retries: 30 - delay: 3 - rescue: - - fail: - msg: Kubernetes API server could not reach ready status, check the service log - -- name: Kubernetes Master | Fetch Token - shell: kubectl describe secret $(kubectl get secrets | grep default | cut -f1 -d ' ') | grep -E '^token' | cut -f2 -d':' | tr -d '\t' - changed_when: false - register: fetch_token - until: fetch_token.stdout | trim != "" - retries: 10 - delay: 3 - -- name: Kubernetes Master | Register Token - set_fact: - TOKEN: "{{fetch_token.stdout | trim}}" - -- name: Kubernetes Master | Checking Token - fail: - msg: Could not fetch Token, make sure kubectl can connect to the API server - when: TOKEN == "" - -- name: Kubernetes Master | Create service OVN Kube - blockinfile: - path: /etc/systemd/system/ovn-kubernetes-master.service - create: yes - block: | - [Unit] - Description=OVN watches the Kubernetes API - Documentation=https://github.com/ovn-org/ovn-kubernetes#watchers-on-master-node - [Service] - ExecStart=/usr/bin/ovnkube \ - -init-master "{{ ansible_hostname }}" \ - -cluster-subnets "{{ kubernetes_cluster_info.CLUSTER_SUBNET }}" \ - -k8s-service-cidr "{{ kubernetes_cluster_info.SERVICE_CLUSTER_IP_RANGE }}" \ - -nodeport \ - -k8s-token {{TOKEN}} \ - -k8s-cacert /etc/openvswitch/k8s-ca.crt \ - -k8s-apiserver "https://{{ host_public_ip }}" \ - -nb-address tcp://127.0.0.1:6641 \ - -sb-address tcp://127.0.0.1:6642 - Restart=on-failure - RestartSec=10 - WorkingDirectory=/root/ - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Master | Ensure /etc/hosts is updated - lineinfile: - path: /etc/hosts - line: "{{ host_public_ip }} {{ ansible_hostname }}" - -- name: Kubernetes Master | Enabling OVN to listen on 6641 and 6642 - shell: | - set -o errexit - ovn-nbctl --timeout={{TIMEOUT}} set-connection ptcp:6641 - ovn-sbctl --timeout={{TIMEOUT}} set-connection ptcp:6642 - changed_when: false - -- name: Kubernetes Master | Setting OVN external_ids - shell: | - ovs-vsctl --timeout={{TIMEOUT}} set Open_vSwitch . external_ids:ovn-remote="tcp:{{ host_public_ip }}:6642" \ - external_ids:ovn-nb="tcp:{{ host_public_ip }}:6641" \ - external_ids:ovn-encap-ip="{{ host_internal_ip }}" \ - external_ids:ovn-encap-type="geneve" - changed_when: false - -# TODO: enable this for future GCE support in the playbooks -# - name: Kubernetes Master | Setting OVN external_ids when GCE -# shell: | -# ovs-vsctl set Open_vSwitch . external_ids:ovn-encap-ip="{{ host_public_ip }}" -# when: GCE - -- name: Kubernetes Master | Prepare OVN certs - shell: | - set -o errexit - ovs-vsctl --timeout={{TIMEOUT}} set Open_vSwitch . \ - external_ids:k8s-api-server="https://{{ host_public_ip }}" \ - external_ids:k8s-api-token="{{TOKEN}}" - ln -fs {{ kubernetes_certificates.directory }}/ca.pem /etc/openvswitch/k8s-ca.crt - changed_when: false - -- name: Kubernetes Master | restart ovn-kubernetes-master - service: - name: "ovn-kubernetes-master" - enabled: yes - state: restarted - daemon_reload: yes - changed_when: false - -- name: Kubernetes Master | Create test yamls - blockinfile: - path: /root/apache-pod.yaml - create: yes - marker: "# {mark} Ansible automatic example generation" - block: | - apiVersion: v1 - kind: Pod - metadata: - name: apachetwin - labels: - name: webserver - spec: - containers: - - name: apachetwin - image: fedora/apache - nodeSelector: - kubernetes.io/os: linux -- name: Create test yamls - blockinfile: - path: /root/apache-e-w.yaml - create: yes - marker: "# {mark} Ansible automatic example generation" - block: | - apiVersion: v1 - kind: Service - metadata: - labels: - name: apacheservice - role: service - name: apacheservice - spec: - ports: - - port: 8800 - targetPort: 80 - protocol: TCP - name: tcp - selector: - name: webserver -- name: Create test yamls - blockinfile: - path: /root/apache-n-s.yaml - create: yes - marker: "# {mark} Ansible automatic example generation" - block: | - apiVersion: v1 - kind: Service - metadata: - labels: - name: apacheexternal - role: service - name: apacheexternal - spec: - ports: - - port: 8800 - targetPort: 80 - protocol: TCP - name: tcp - selector: - name: webserver - type: NodePort -- name: Create test yamls - blockinfile: - path: /root/nginx-pod.yaml - create: yes - marker: "# {mark} Ansible automatic example generation" - block: | - apiVersion: v1 - kind: Pod - metadata: - name: nginxtwin - labels: - name: webserver - spec: - containers: - - name: nginxtwin - image: nginx - nodeSelector: - kubernetes.io/os: linux diff --git a/contrib/roles/linux/kubernetes/tasks/prepare_minion.yml b/contrib/roles/linux/kubernetes/tasks/prepare_minion.yml deleted file mode 100644 index d483f2f1d7..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/prepare_minion.yml +++ /dev/null @@ -1,164 +0,0 @@ ---- -- name: Kubernetes Minion | Setting OVN external_ids - shell: | - ovs-vsctl --timeout={{TIMEOUT}} set Open_vSwitch . external_ids:ovn-remote="tcp:{{ kubernetes_cluster_info.MASTER_IP }}:6642" \ - external_ids:ovn-nb="tcp:{{ kubernetes_cluster_info.MASTER_IP }}:6641" \ - external_ids:ovn-encap-ip="{{ host_public_ip }}" \ - external_ids:ovn-encap-type="geneve" - changed_when: false - -- name: Kubernetes Minion | Generate the client certificates for the current minion - import_role: - name: linux/common - tasks_from: k8s_client_certs - -- name: Kubernetes Minion | Creating kubeconfig - blockinfile: - path: /etc/kubernetes/kubeconfig.yaml - create: yes - block: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: {{ kubernetes_certificates.directory }}/ca.pem - server: https://{{ kubernetes_cluster_info.MASTER_IP }} - name: local - users: - - name: kubelet - user: - client-certificate: {{ kubernetes_certificates.directory }}/node.pem - client-key: {{ kubernetes_certificates.directory }}/node-key.pem - contexts: - - context: - cluster: local - user: kubelet - name: kubelet-context - current-context: kubelet-context - -- name: Kubernetes Minion | Create service kubelet - blockinfile: - path: /etc/systemd/system/kubelet.service - create: yes - block: | - [Unit] - Description=Kubelet Server - Documentation=https://github.com/GoogleCloudPlatform/kubernetes - [Service] - ExecStart=/usr/bin/kubelet \ - --network-plugin=cni \ - --allow-privileged=true \ - --hostname-override={{ ansible_hostname }} \ - --cluster-dns={{ kubernetes_cluster_info.K8S_DNS_SERVICE_IP }} \ - --cluster-domain={{ kubernetes_cluster_info.K8S_DNS_DOMAIN }} \ - --cni-bin-dir=/opt/cni/bin \ - --cni-conf-dir=/etc/cni/net.d \ - --kubeconfig=/etc/kubernetes/kubeconfig.yaml \ - --tls-cert-file={{ kubernetes_certificates.directory }}/node.pem \ - --tls-private-key-file={{ kubernetes_certificates.directory }}/node-key.pem \ - --fail-swap-on=false - Restart=on-failure - RestartSec=10 - WorkingDirectory=/root/ - [Install] - WantedBy=multi-user.target - -- name: Kubernetes Minion | start kubelet - service: - name: "kubelet" - enabled: yes - state: restarted - daemon_reload: yes - changed_when: false - -- name: Kubernetes Minion | Setup kubectl - import_role: - name: linux/common - tasks_from: kubectl_client - -- name: Kubernetes Minion | Prepare OVN certs - shell: | - set -o errexit - ovs-vsctl --timeout={{TIMEOUT}} set Open_vSwitch . \ - external_ids:k8s-api-server="http://{{ host_public_ip }}:8080" \ - external_ids:k8s-api-token="{{TOKEN}}" - ln -fs {{ kubernetes_certificates.directory }}/ca.pem /etc/openvswitch/k8s-ca.crt - changed_when: false - -- name: Kubernetes Minion | Ensure /etc/hosts is updated - lineinfile: - path: /etc/hosts - regexp: ' {{ ansible_hostname | lower }}$' - line: '{{ host_public_ip }} {{ ansible_hostname | lower }}' - -- name: Kubernetes Minion | Create CNI binaries folder - file: - path: "/opt/cni/bin" - state: directory - mode: 0755 - -- name: Kubernetes Minion | get cni archive - get_url: - url: "{{kubernetes_binaries_info.cni_linux_download_link}}" - dest: "/opt/cni/bin/cni.tgz" - force_basic_auth: yes - timeout: 30 - retries: 3 - -- name: Kubernetes Minion | unarchive tar.gz - unarchive: - src: "/opt/cni/bin/cni.tgz" - dest: "/opt/cni/bin/" - remote_src: yes - -- name: Kubernetes Minion | Set default "init_gateway" fact if not already defined - block: - - set_fact: - init_gateway: true - when: ansible_distribution_version == '16.04' - - set_fact: - init_gateway: false - when: ansible_distribution_version == '18.04' - when: init_gateway is not defined - -- fail: - msg: "Gateway not supported yet on Ubuntu 18.04" - when: init_gateway == true and ansible_distribution_version == '18.04' - -- name: Kubernetes Minion | Setup OVN Kube service - block: - - name: Kubernetes Minion | Create OVN Kube systemd service file - blockinfile: - path: /etc/systemd/system/ovn-kubernetes-node.service - create: yes - block: | - [Unit] - Description=OVN Kube Systemd Daemon - Documentation=https://github.com/ovn-org/ovn-kubernetes - [Service] - ExecStart=/usr/bin/ovnkube \ - --init-node "{{ ansible_hostname }}" \ - --cluster-subnets "{{ kubernetes_cluster_info.CLUSTER_SUBNET }}" \ - --k8s-token {{TOKEN}} \ - --k8s-cacert /etc/openvswitch/k8s-ca.crt \ - --k8s-apiserver "http://{{ kubernetes_cluster_info.MASTER_IP }}:8080" \ - --k8s-service-cidr "{{ kubernetes_cluster_info.SERVICE_CLUSTER_IP_RANGE }}" \ - --nb-address tcp://{{ kubernetes_cluster_info.MASTER_IP }}:6641 \ - --sb-address tcp://{{ kubernetes_cluster_info.MASTER_IP }}:6642 - Restart=on-failure - RestartSec=10 - WorkingDirectory=/root/ - [Install] - WantedBy=multi-user.target - - - name: Kubernetes Minion | Start OVN Kube service - service: - name: "ovn-kubernetes-node" - enabled: yes - state: restarted - daemon_reload: yes - when: not init_gateway - -- name: Kubernetes Minion | Minion init with gateway - include_tasks: ./init_gateway.yml - when: init_gateway diff --git a/contrib/roles/linux/kubernetes/tasks/set_ip_facts.yml b/contrib/roles/linux/kubernetes/tasks/set_ip_facts.yml deleted file mode 100644 index c396691f78..0000000000 --- a/contrib/roles/linux/kubernetes/tasks/set_ip_facts.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# Gather IP facts from ipify.org -- name: get my public IP - ipify_facts: - retries: 3 - -- name: set internal IP - set_fact: - host_internal_ip: "{{ ansible_default_ipv4.address }}" - -# TODO: enable this for future GCE support in the playbooks -# - name: set public IP -# set_fact: -# host_public_ip: "{{ ipify_public_ip }}" -# when: GCE - -- name: set public IP - set_fact: - host_public_ip: "{{ host_internal_ip }}" - # when: not GCE - -- name: set interface name - set_fact: - interface_name: "{{ ansible_default_ipv4.alias }}" - -- name: set interface gateway - set_fact: - interface_gateway: "{{ ansible_default_ipv4.gateway }}" - -- name: set gateway next hop - set_fact: - gateway_next_hop: "{{ ansible_default_ipv4.gateway }}" - -- debug: var=ansible_all_ipv4_addresses -- debug: var=host_internal_ip -- debug: var=host_public_ip -- debug: var=gateway_next_hop diff --git a/contrib/roles/linux/kubernetes/vars/ubuntu.yml b/contrib/roles/linux/kubernetes/vars/ubuntu.yml deleted file mode 100644 index e520e1a134..0000000000 --- a/contrib/roles/linux/kubernetes/vars/ubuntu.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -kubernetes_binaries_info: - install_path: /usr/bin - tmp_download_path: /tmp/k8s_bins - # Download link from here https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#downloads-for-v1100 - kubernetes_targz_link: https://dl.k8s.io/v1.12.0/kubernetes.tar.gz - # Download link from here https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#node-binaries - windows_bins_link: https://dl.k8s.io/v1.12.0/kubernetes-node-windows-amd64.tar.gz - cni_linux_download_link: https://github.com/containernetworking/cni/releases/download/v0.3.0/cni-v0.3.0.tgz - -kubernetes_cluster_info: - MASTER_IP: "{{MASTER_IP | default('the master node does not need this')}}" - CLUSTER_SUBNET: "{{CLUSTER_SUBNET}}" - MASTER_INTERNAL_IP: "{{MASTER_INTERNAL_IP}}" - SERVICE_CLUSTER_IP_RANGE: "{{SERVICE_CLUSTER_IP_RANGE}}" - K8S_DNS_DOMAIN: "{{K8S_DNS_DOMAIN}}" - K8S_DNS_SERVICE_IP: "{{K8S_DNS_SERVICE_IP}}" - K8S_API_SERVICE_IP: "{{K8S_API_SERVICE_IP}}" - ETCD_VERSION: "3.1.2" - -kubernetes_certificates: - tmp_generate_path: /tmp/k8s_certs - directory: /etc/kubernetes/tls - -kubernetes_binaries: - linux_common: - - kubectl - linux_master: - - kube-apiserver - - kube-scheduler - - kube-controller-manager - linux_minion: - - kubelet - windows: - - kubelet.exe - - kubectl.exe - -kubernetes_dns: - coredns_git_url: https://github.com/coredns/deployment - tmp_path: /tmp - branch: master diff --git a/contrib/roles/linux/openvswitch/tasks/build_install_release_ovs.yml b/contrib/roles/linux/openvswitch/tasks/build_install_release_ovs.yml deleted file mode 100644 index 4724e2f81d..0000000000 --- a/contrib/roles/linux/openvswitch/tasks/build_install_release_ovs.yml +++ /dev/null @@ -1,108 +0,0 @@ ---- -- name: OVS | ensure requirements are installed - action: "{{ ovs_package_info.pkg_mgr }}" - args: - pkg: "{{item.name}}" - force: "{{item.force|default(omit)}}" - state: present - register: ovs_task_result - until: ovs_task_result|succeeded - retries: 3 - with_items: "{{ ovs_package_info.pkgs }}" - -- name: OVS | get release build - get_url: - url: "{{ ovs_info.release_link }}" - dest: "{{ ovs_info.build_path }}/{{ ovs_info.release_name }}.tar.gz" - force_basic_auth: yes - mode: 0755 - timeout: 30 - retries: 3 - -- name: OVS | Remove directory if it exists - file: - path: "{{ ovs_info.build_path }}/{{ ovs_info.release_name }}" - state: absent - -- name: OVS | unarchive tar.gz - unarchive: - src: "{{ ovs_info.build_path }}/{{ ovs_info.release_name }}.tar.gz" - dest: "{{ ovs_info.build_path }}" - remote_src: yes - -- name: OVS | Build deb packages - shell: | - set -e - dpkg-checkbuilddeps - fakeroot debian/rules clean - DEB_BUILD_OPTIONS='nocheck parallel=4' fakeroot debian/rules binary - args: - chdir: "{{ ovs_info.build_path }}/{{ ovs_info.release_name }}" - -- name: OVS | install datapath debs - apt: deb="{{ ovs_info.build_path }}/{{item}}_{{ ovs_info.pkg_build_version }}_all.deb" - with_items: - - openvswitch-datapath-dkms - - python-openvswitch - -- name: OVS | add modules - lineinfile: - path: "{{ ovs_info.modules_file_path }}" - create: yes - line: | - openvswitch - vport_stt - vport_geneve - -- name: OVS | reload modules - action: shell depmod -a - -- name: OVS | remove modprobe - modprobe: - name: "{{item}}" - state: absent - with_items: - - openvswitch - - vport-geneve - - vport-stt - -- name: OVS | add modprobe - modprobe: - name: "{{item}}" - state: present - with_items: - - openvswitch - - vport-geneve - - vport-stt - -- name: OVS | install debs - apt: deb="{{ ovs_info.build_path }}/{{item}}_{{ ovs_info.pkg_build_version }}_amd64.deb" - with_items: - - libopenvswitch - - openvswitch-common - - openvswitch-switch - - ovn-common - - ovn-host - -- name: OVS | install controller deb if applicable - apt: deb="{{ ovs_info.build_path }}/ovn-central_{{ ovs_info.pkg_build_version }}_amd64.deb" - when: controller - -# TODO: .deb packages are not creating services, we need this for ovnkube -# when it tries to start/restart services -- name: OVS | Mock OVS services - lineinfile: - path: "{{item}}" - create: yes - line: | - [Unit] - Description=openvswitch - Documentation=https://github.com/openvswitch/ovs - [Service] - ExecStart=/bin/bash sleep 1 - [Install] - WantedBy=multi-user.target - with_items: - - /etc/systemd/system/openvswitch.service - - /etc/systemd/system/ovn-controller.service - - /etc/systemd/system/ovn-northd.service diff --git a/contrib/roles/linux/openvswitch/tasks/download_install_packages.yml b/contrib/roles/linux/openvswitch/tasks/download_install_packages.yml deleted file mode 100644 index f0cee0ab3c..0000000000 --- a/contrib/roles/linux/openvswitch/tasks/download_install_packages.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -# Download debs: -- name: OVS | downloading debs - get_url: - url: "{{ ovs_info.debs_targz_link }}" - dest: "{{ ovs_info.prebuilt_packages_download_path }}/ovs_debs.tar.gz" - force_basic_auth: yes - mode: 0755 - timeout: 30 - retries: 3 - -- name: OVS | unarchive ovs_debs.tar.gz - unarchive: - src: "{{ ovs_info.prebuilt_packages_download_path }}/ovs_debs.tar.gz" - dest: "{{ ovs_info.prebuilt_packages_download_path }}" - remote_src: yes - -- name: OVS | install datapath debs - apt: deb="{{ ovs_info.prebuilt_packages_download_path }}/{{item}}_{{ ovs_info.prebuilt_version }}_all.deb" - with_items: - - openvswitch-datapath-dkms - - python-openvswitch - -- name: OVS | add modules - lineinfile: - path: "{{ ovs_info.modules_file_path }}" - create: yes - line: | - openvswitch - vport_stt - vport_geneve - -- name: OVS | reload modules - action: shell depmod -a - -- name: OVS | remove modprobe - modprobe: - name: "{{item}}" - state: absent - with_items: - - openvswitch - - vport-geneve - - vport-stt - -- name: OVS | add modprobe - modprobe: - name: "{{item}}" - state: present - with_items: - - openvswitch - - vport-geneve - - vport-stt - -- name: OVS | install debs - apt: deb="{{ ovs_info.prebuilt_packages_download_path }}/{{item}}_{{ ovs_info.prebuilt_version }}_amd64.deb" - with_items: - - libopenvswitch - - openvswitch-common - - openvswitch-switch - - ovn-common - - ovn-host - -- name: OVS | install controller deb if applicable - apt: deb="{{ ovs_info.prebuilt_packages_download_path }}/ovn-central_{{ ovs_info.prebuilt_version }}_amd64.deb" - when: controller - -# TODO: make this better -- name: OVS | Mock OVS services - lineinfile: - path: "{{item}}" - create: yes - line: | - [Unit] - Description=openvswitch - Documentation=https://github.com/openvswitch/ovs - [Service] - ExecStart=/bin/bash sleep 1 - [Install] - WantedBy=multi-user.target - with_items: - - /etc/systemd/system/openvswitch.service - - /etc/systemd/system/ovn-controller.service - - /etc/systemd/system/ovn-northd.service diff --git a/contrib/roles/linux/openvswitch/tasks/main.yml b/contrib/roles/linux/openvswitch/tasks/main.yml deleted file mode 100644 index 4e36300bdc..0000000000 --- a/contrib/roles/linux/openvswitch/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: OVS | include global vars for minions - include_vars: "{{ansible_tmp_dir}}/generated_global_vars.yml" - when: not master - -- name: OVS | include vars - include_vars: "{{ ansible_distribution|lower }}.yml" - -- stat: - path: "{{ ovs_info.service_path }}" - register: ovs_service - -- name: OVS | Install OVS - block: - - name: OVS | build and install release ovs - include_tasks: ./build_install_release_ovs.yml - when: not ovs_info.install_prebuilt_packages - - - name: OVS | Install OVS from prebuilt packages - include_tasks: ./download_install_packages.yml - when: ovs_info.install_prebuilt_packages - when: not ovs_service.stat.exists diff --git a/contrib/roles/linux/openvswitch/vars/ubuntu.yml b/contrib/roles/linux/openvswitch/vars/ubuntu.yml deleted file mode 100644 index f6050c2f7d..0000000000 --- a/contrib/roles/linux/openvswitch/vars/ubuntu.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -ovs_package_info: - pkg_mgr: apt - pkgs: - - name: "build-essential" - - name: "fakeroot" - - name: "debhelper" - - name: "autoconf" - - name: "automake" - - name: "bzip2" - - name: "libssl-dev" - - name: "libunbound-dev" - - name: "openssl" - - name: "graphviz" - - name: "python-all" - - name: "procps" - - name: "python-dev" - - name: "python-setuptools" - - name: "python-twisted-conch" - - name: "libtool" - - name: "git" - - name: "dh-autoreconf" - - name: "dkms" - - name: "unzip" - - name: "linux-headers-{{ ansible_kernel }}" - -ovs_info: - git_url: https://github.com/openvswitch/ovs.git - build_path: /tmp - modules_file_path: /etc/modules-load.d/modules.conf - branch: branch-2.10 - service_path: /etc/systemd/system/openvswitch.service - release_link: https://www.openvswitch.org/releases/openvswitch-2.10.2.tar.gz - release_name: openvswitch-2.10.2 - pkg_build_version: 2.10.2-1 - # Prebuilt packages info and download link - install_prebuilt_packages: "{{ovs_install_prebuilt_packages | default(false)}}" - prebuilt_packages_download_path: /tmp - prebuilt_version: "2.10.2-1" - debs_targz_link: "replace_me" diff --git a/contrib/roles/linux/ovn-kubernetes/tasks/build_install_bins.yml b/contrib/roles/linux/ovn-kubernetes/tasks/build_install_bins.yml deleted file mode 100644 index e884af389d..0000000000 --- a/contrib/roles/linux/ovn-kubernetes/tasks/build_install_bins.yml +++ /dev/null @@ -1,74 +0,0 @@ ---- -- name: ovn-kubernetes | check if ovn-kubernetes is cloned - stat: - path: "{{ ovn_kubernetes_info.build_path }}/ovn-kubernetes-checkout" - register: ovnkubernetes_repo - -- name: ovn-kubernetes | git clone - git: - repo: "{{ ovn_kubernetes_info.git_url }}" - dest: "{{ ovn_kubernetes_info.build_path }}/ovn-kubernetes-checkout" - version: "{{ ovn_kubernetes_info.branch }}" - when: not ovnkubernetes_repo.stat.exists - -- name: ovn-kubernetes | expecting all bins to be built already - set_fact: - linux_bins_missing: false - windows_bins_missing: false - -- name: ovn-kubernetes | checking if linux bins have been built - action: stat path="{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller/_output/go/bin/{{item}}" - register: linux_stat_bins_exists - with_items: - - "{{ovn_kubernetes_binaries.linux}}" - -- name: ovn-kubernetes | checking linux bins - set_fact: - linux_bins_missing: true - with_items: - - "{{linux_stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- name: ovn-kubernetes | checking if windows bins have been built - action: stat path="{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller/_output/go/windows/{{item}}" - register: windows_stat_bins_exists - with_items: - - "{{ovn_kubernetes_binaries.windows}}" - -- name: ovn-kubernetes | checking windows bins - set_fact: - windows_bins_missing: true - with_items: - - "{{windows_stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- name: ovn-kubernetes | build binaries - block: - - name: ovn-kubernetes | Install golang dependency - import_role: - name: linux/common - tasks_from: golang - - - name: ovn-kubernetes | compile sources - make clean - make: - chdir: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller" - target: clean - - - name: ovn-kubernetes | compile sources - make linux - make: - chdir: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller" - - - name: ovn-kubernetes | compile sources - make windows - make: - chdir: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller" - target: windows - - - name: ovn-kubernetes | compile sources - make install - make: - chdir: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller" - target: install - when: windows_bins_missing or linux_bins_missing diff --git a/contrib/roles/linux/ovn-kubernetes/tasks/distribute_bins.yml b/contrib/roles/linux/ovn-kubernetes/tasks/distribute_bins.yml deleted file mode 100644 index 067f9647a8..0000000000 --- a/contrib/roles/linux/ovn-kubernetes/tasks/distribute_bins.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -#TODO: change this if /opt/cni/bin becomes a variable for kubelet -- name: ovn-kubernetes | create cni binary folder /opt/cni/bin - file: - path: /opt/cni/bin - state: directory - -- name: ovn-kubernetes | get ovn-k8s-cni-overlay - copy: - src: "{{ansible_tmp_dir}}/ovn-k8s-cni-overlay" - dest: "/opt/cni/bin/ovn-k8s-cni-overlay" - owner: root - group: root - mode: 0755 - -- name: ovn-kubernetes | get ovn-kube-util - copy: - src: "{{ansible_tmp_dir}}/ovn-kube-util" - dest: "{{ ovn_kubernetes_info.install_path }}/ovn-kube-util" - owner: root - group: root - mode: 0755 - -- name: ovn-kubernetes | get ovnkube - copy: - src: "{{ansible_tmp_dir}}/ovnkube" - dest: "{{ ovn_kubernetes_info.install_path }}/ovnkube" - owner: root - group: root - mode: 0755 diff --git a/contrib/roles/linux/ovn-kubernetes/tasks/fetch_bins.yml b/contrib/roles/linux/ovn-kubernetes/tasks/fetch_bins.yml deleted file mode 100644 index 4df2043092..0000000000 --- a/contrib/roles/linux/ovn-kubernetes/tasks/fetch_bins.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: ovn-kubernetes | fetch linux bins - synchronize: - mode: pull - src: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller/_output/go/bin/{{item}}" - dest: "{{ansible_tmp_dir}}/{{item}}" - use_ssh_args: yes - with_items: - - "{{ovn_kubernetes_binaries.linux}}" - -- name: ovn-kubernetes | fetch windows bins - synchronize: - mode: pull - src: "{{ovn_kubernetes_info.build_path}}/ovn-kubernetes-checkout/go-controller/_output/go/windows/{{item}}" - dest: "{{ansible_tmp_dir}}/{{item}}" - use_ssh_args: yes - with_items: - - "{{ovn_kubernetes_binaries.windows}}" diff --git a/contrib/roles/linux/ovn-kubernetes/tasks/main.yml b/contrib/roles/linux/ovn-kubernetes/tasks/main.yml deleted file mode 100644 index 483868430d..0000000000 --- a/contrib/roles/linux/ovn-kubernetes/tasks/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: ovn-kubernetes | include global vars for minions - include_vars: "{{ansible_tmp_dir}}/generated_global_vars.yml" - when: not master - -- name: ovn-kubernetes | include vars - include_vars: "{{ ansible_distribution|lower }}.yml" - -- name: ovn-kubernetes | Checking if binaries are already present on ansible machine - local_action: stat path="{{ansible_tmp_dir}}/{{item}}" - register: stat_bins_exists - with_items: - - "{{ovn_kubernetes_binaries.linux}}" - - "{{ovn_kubernetes_binaries.windows}}" - -- name: ovn-kubernetes | Expecting all binaries to be already present - set_fact: - binaries_missing: false - -- name: ovn-kubernetes | Checking all binaries - set_fact: - binaries_missing: true - with_items: - - "{{stat_bins_exists.results}}" - loop_control: - label: "{{item.item}}" - when: not item.stat.exists - -- debug: - msg: "Binaries are missing: {{binaries_missing}}" - -- name: ovn-kubernetes | Build and install binaries - include_tasks: ./build_install_bins.yml - when: binaries_missing or master or not distribute_binaries - -- name: ovn-kubernetes | Fetching Linux and Windows binaries - include_tasks: ./fetch_bins.yml - when: binaries_missing - -- name: ovn-kubernetes | Distributing binaries to minion - include_tasks: ./distribute_bins.yml - when: minion and distribute_binaries diff --git a/contrib/roles/linux/ovn-kubernetes/vars/ubuntu.yml b/contrib/roles/linux/ovn-kubernetes/vars/ubuntu.yml deleted file mode 100644 index 41ce2b02ad..0000000000 --- a/contrib/roles/linux/ovn-kubernetes/vars/ubuntu.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -ovn_kubernetes_info: - install_path: /usr/bin - # ovn-kubernetes build info - git_url: https://github.com/ovn-org/ovn-kubernetes - build_path: /tmp - branch: master - -ovn_kubernetes_binaries: - linux: - - ovnkube - - ovn-k8s-cni-overlay - - ovn-kube-util - windows: - - ovnkube.exe - - ovn-k8s-cni-overlay.exe diff --git a/contrib/roles/linux/validation/tasks/main.yml b/contrib/roles/linux/validation/tasks/main.yml deleted file mode 100644 index fe8036937f..0000000000 --- a/contrib/roles/linux/validation/tasks/main.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Linux validation | include vars - include_vars: "{{ ansible_distribution | lower }}.yml" - -- name: Linux validation | Set the service_names fact - block: - - name: Linux validation | Set the Linux master service names - set_fact: - service_names: "{{ master_service_names }}" - when: master - - - name: Linux validation | Set the Linux minion service names - set_fact: - service_names: "{{ minion_service_names }}" - when: minion - -- name: Linux validation | Validate the services success state - include: "./validate_service.yml service_name={{ item }}" - with_items: "{{ service_names }}" - -- name: Linux validation | Validate if the pods from namespace kube-system (CoreDNS) are running - run_once: true - retries: 10 - delay: 5 - register: result - shell: | - set -o errexit - for UNHEALTHY_POD in $(kubectl get pods --namespace kube-system --output jsonpath='{@.items[?(@.status.phase!="Running")].metadata.name}'); do - echo "ERROR: Pod $UNHEALTHY_POD from namespace kube-system is not running" - exit 1 - done - echo "All the pods from namespace kube-system are running" - args: - executable: /bin/bash - until: result.rc == 0 - when: master - -- name: Linux validation | Confirm K8s minions health - retries: 10 - delay: 5 - register: result - shell: | - set -o errexit - READY_STATUS=$(kubectl get node {{ hostvars[item]['ansible_facts']['hostname'] | lower }} --output jsonpath='{@.status.conditions[?(@.type=="Ready")].status}') - if [[ "$READY_STATUS" != "True" ]]; then - echo "ERROR: Node {{ hostvars[item]['ansible_facts']['hostname'] | lower }} is not ready" - exit 1 - fi - echo "Node {{ hostvars[item]['ansible_facts']['hostname'] | lower }} is ready" - args: - executable: /bin/bash - until: result.rc == 0 - with_items: - - "{{ groups['kube-minions-linux'] }}" - - "{{ groups['kube-minions-windows'] }}" - when: master diff --git a/contrib/roles/linux/validation/tasks/validate_service.yml b/contrib/roles/linux/validation/tasks/validate_service.yml deleted file mode 100644 index 2c34ec28f0..0000000000 --- a/contrib/roles/linux/validation/tasks/validate_service.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Linux validate service | Get service info - systemd: - name: "{{ service_name }}" - register: service_stat - -- name: Linux validate service | Fail if the service doesn't exist - fail: - msg: "The service {{ service_name }} doesn't exist" - when: service_stat.status.LoadState == "not-found" - -- name: Linux validate service | Fail if the service is not running - fail: - msg: "The service {{ service_name }} is not running. Current status is {{ service_stat.status.SubState }}" - when: service_stat.status.SubState != "running" - -- name: Linux validate service | Fail if the service result value is not success - fail: - msg: "The service result value is not success. Current result value: {{ service_stat.status.Result }}" - when: service_stat.status.Result != "success" - -- name: Linux validate service | Fail if the service is not enabled - fail: - msg: "The service {{ service_name }} is not enabled. Current status is {{ service_stat.status.UnitFileState }}" - when: service_stat.status.UnitFileState != "enabled" diff --git a/contrib/roles/linux/validation/vars/ubuntu.yml b/contrib/roles/linux/validation/vars/ubuntu.yml deleted file mode 100644 index f0f453e317..0000000000 --- a/contrib/roles/linux/validation/vars/ubuntu.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -master_service_names: - - "etcd3" - - "kube-apiserver" - - "kube-controller-manager" - - "kube-scheduler" - - "ovn-kubernetes-master" - -minion_service_names: - - "kubelet" - - "ovn-kubernetes-node" diff --git a/contrib/roles/linux/version_check/tasks/main.yml b/contrib/roles/linux/version_check/tasks/main.yml deleted file mode 100644 index c71f242409..0000000000 --- a/contrib/roles/linux/version_check/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: version check | Checking if distro is supported by this playbook - fail: - msg: "Distribution {{ ansible_distribution | lower }} not supported by this playbook" - when: ansible_distribution | lower != 'ubuntu' - -- name: version check | Checking if distro version is supported by this playbook - fail: - msg: "Distribution version {{ ansible_distribution_version }} not supported by this playbook" - when: ansible_distribution_version != '18.04' and ansible_distribution_version != '16.04' diff --git a/contrib/roles/windows/docker/tasks/install_docker.yml b/contrib/roles/windows/docker/tasks/install_docker.yml deleted file mode 100644 index bde499e11f..0000000000 --- a/contrib/roles/windows/docker/tasks/install_docker.yml +++ /dev/null @@ -1,101 +0,0 @@ ---- -- name: Docker | Download latest docker - win_shell: | - $ErrorActionPreference = "Stop" - Find-PackageProvider -Name "Nuget" | Install-PackageProvider -Force - Install-Module -Name "DockerMsftProvider" -Repository "PSGallery" -Force - Install-Package -Name "Docker" -ProviderName "DockerMsftProvider" -Force -RequiredVersion "{{ docker_version }}" - -# Remove the existing default Docker Windows service because we'll recreate it -# later on using the Windows service wrapper.This will allow us to capture -# the Docker logs to a file instead of using the event logs (the default -# logging behavior for Docker). -- name: Docker | Remove existing Docker Windows service - win_service: - name: Docker - state: absent - -- name: Docker | Create the service wrapper config file - win_lineinfile: - path: '{{ docker_info.install_dir }}\dockerd-servicewrapper-config.ini' - create: yes - line: |- - service-name=Docker - service-command={{ docker_info.install_dir }}/dockerd.exe - log-file={{ docker_info.install_dir }}/dockerd.log - -- name: Docker | Create the Docker Windows service using the service wrapper - win_service: - name: Docker - display_name: "Docker Windows Agent" - path: >- - "{{ install_path }}\servicewrapper.exe" --config "{{ docker_info.install_dir }}\dockerd-servicewrapper-config.ini" - state: stopped - start_mode: disabled - -- name: Docker | Create Docker config directory - win_file: - path: "{{ docker_info.config_dir }}" - state: directory - -- name: Docker | Disable docker default network - win_lineinfile: - path: "{{ docker_info.config_dir }}/daemon.json" - create: yes - line: '{ "bridge" : "none" }' - newline: unix - -- name: Docker | Remove current default nat network - win_shell: | - Get-HNSNetwork | Where-Object { $_.Name -eq "nat" } | Remove-HNSNetwork - -- name: Docker | Set Docker service failure command - win_shell: >- - sc.exe failure Docker reset=40 actions=restart/0/restart/0/run/30000 - command="powershell.exe Move-Item - \\\`"{{ docker_info.install_dir }}/dockerd.log\\\`" - \\\`"{{ docker_info.install_dir }}/dockerd.log_`$(Get-Date -f yyyy-MM-dd-hh-mm-ss)\\\`"; - Restart-Service Docker" - -- name: Docker | Enable Docker service failure flags - win_shell: sc.exe failureflag Docker 1 - -- name: Docker | Get the Dockerd version - win_command: > - "{{ docker_info.install_dir }}\dockerd.exe" --version - register: dockerd_version - -# On Windows, we need to explicitly set the DOCKER_API_VERSION environment variable -# to avoid failures when spawning Docker tasks. -# More info here: https://github.com/Azure/acs-engine/issues/4118 -- name: Docker | Set the DOCKER_API_VERSION system environment variable - block: - - name: Docker | Check if Docker 17.06 is found - set_fact: - docker_api_version: > - "1.30" - when: dockerd_version.stdout.startswith("Docker version 17.06") - - - name: Docker | Check if Docker 18.03 is found - set_fact: - docker_api_version: > - "1.37" - when: dockerd_version.stdout.startswith("Docker version 18.03") - - - name: Docker | Check if other Docker version is found - set_fact: - docker_api_version: > - $null - when: (not dockerd_version.stdout.startswith("Docker version 17.06") and - not dockerd_version.stdout.startswith("Docker version 18.03")) - - - name: Set or clear the DOCKER_API_VERSION system environment variable - win_shell: > - [System.Environment]::SetEnvironmentVariable('DOCKER_API_VERSION', {{ docker_api_version }}, [System.EnvironmentVariableTarget]::Machine) - when: dockerd_version.stdout != "" - -- name: Docker | Start the Docker service - win_service: - name: Docker - start_mode: auto - state: started diff --git a/contrib/roles/windows/docker/tasks/main.yml b/contrib/roles/windows/docker/tasks/main.yml deleted file mode 100644 index 9a0d0b938a..0000000000 --- a/contrib/roles/windows/docker/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Docker | include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: Docker | Check if Docker is installed - win_service: - name: Docker - register: docker_service - -- name: Docker | Install docker - block: - - name: Docker | Install docker - include_tasks: ./install_docker.yml - when: not docker_service.exists diff --git a/contrib/roles/windows/docker/vars/windows.yml b/contrib/roles/windows/docker/vars/windows.yml deleted file mode 100644 index d63b27b4a6..0000000000 --- a/contrib/roles/windows/docker/vars/windows.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -docker_info: - install_dir: C:/Program Files/Docker - config_dir: C:/ProgramData/docker/config diff --git a/contrib/roles/windows/kubernetes/tasks/create_infracontainer.yml b/contrib/roles/windows/kubernetes/tasks/create_infracontainer.yml deleted file mode 100644 index 83c4e43970..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/create_infracontainer.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Need to generate infra container image -- name: Kubernetes infracontainer | Delete Dockerfile - win_file: - path: "{{ install_path }}/Dockerfile" - state: absent -- name: Kubernetes infracontainer | Create Dockerfile - win_lineinfile: - path: "{{ install_path }}/Dockerfile" - create: yes - line: | - FROM mcr.microsoft.com/windows/nanoserver:{{ windows_container_tag }} - - CMD ping 127.0.0.1 -t - newline: unix - -- name: Kubernetes infracontainer | Create container {{ kubernetes_info.infracontainername }} - retries: 10 - delay: 5 - register: result - win_shell: | - cd {{ install_path }} - docker image build -t {{ kubernetes_info.infracontainername }} . - until: result.rc == 0 diff --git a/contrib/roles/windows/kubernetes/tasks/get_ovn_subnet.yml b/contrib/roles/windows/kubernetes/tasks/get_ovn_subnet.yml deleted file mode 100644 index 2f8b9245da..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/get_ovn_subnet.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: OVN subnet | Get the ovn_host_annotation - block: - - name: OVN subnet | Fetch the ovn_host_subnet annotation - win_shell: | - $ErrorActionPreference = "Stop" - $output = & "{{ install_path }}\kubectl.exe" get node {{ ansible_hostname | lower }} --output json - if($LASTEXITCODE) { - Throw "Failed to execute kubectl.exe get node" - } - $json = $output | ConvertFrom-Json - $json.metadata.annotations.ovn_host_subnet - register: cmd_output - until: cmd_output.stdout != "" - retries: 10 - delay: 3 - rescue: - - fail: - msg: Could not fetch OVN host subnet, check that kubelet can connect to kubernetes - -- name: OVN subnet | Set ovn_subnet and ovn_gateway_ip facts - set_fact: - ovn_subnet: "{{ cmd_output.stdout | trim }}" - -- name: OVN subnet | Set the ovn_gateway_ip - set_fact: - # Set the ovn_gateway_ip by parsing the ovn_subnet - ovn_gateway_ip: "{{ ovn_subnet.split('/')[0][:-1] + '1' }}" - -- debug: var=ovn_subnet -- debug: var=ovn_gateway_ip diff --git a/contrib/roles/windows/kubernetes/tasks/main.yml b/contrib/roles/windows/kubernetes/tasks/main.yml deleted file mode 100644 index 00e11d90ff..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/main.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -- name: Kubernetes | Include global vars for minions - include_vars: "{{ansible_tmp_dir}}/generated_global_vars.yml" - when: not master - -- name: Kubernetes | Include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: Kubernetes | Fetch network details - include_tasks: set_ip_facts.yml - -# We need to start kubelet in order to be able to retrieve the ovn_host_subnet -# from kubernetes node annotations -- name: Kubernetes | Register and start kubelet service - include_tasks: ./start_kubelet.yml - -- name: Kubernetes | Fetch OVN subnet for the node - include_tasks: ./get_ovn_subnet.yml - -# This will create the HNS network if it doesn't exist, or get its HNS id if -# it was already previously created. The variable "hns_net_id" is set at the -# end of the Setup SDN task. -- name: Kubernetes | Setup SDN - include_tasks: ./setup_sdn.yml - -- name: Kubernetes | OVS HNS network setup - include_tasks: ./setup_ovs_hns.yml - -- name: Kubernetes | Configure the Windows firewall - block: - - name: Disable firewall & realtime monitoring (only for testing environments) - block: - - name: Disable firewall - win_command: NetSh Advfirewall set allprofiles state off - - - name: Disable realtime monitoring - win_shell: Set-MpPreference -DisableRealtimeMonitoring $true - when: firewall.disable_completely == True - - - name: Enable firewall (for production environments) - win_command: NetSh Advfirewall set allprofiles state on - when: firewall.disable_completely == False - - - name: "Enable firewall rules" - win_shell: | - $ErrorActionPreference = "Stop" - $rule = Get-NetFirewallRule -Name "{{ item['name'] }}" -ErrorAction SilentlyContinue - if($rule) { - # Delete existing rule to re-create it with latest config - Remove-NetFirewallRule -InputObject $rule - } - New-NetFirewallRule -Enabled True -Action Allow ` - -Name "{{ item['name'] }}" -DisplayName "{{ item['name'] }}" ` - -Protocol "{{ item['protocol'] }}" -LocalPort "{{ item['port'] }}" ` - -Direction "{{ item['direction'] }}" - with_items: "{{ firewall['rules'] }}" - when: firewall.disable_completely == False - -- name: Kubernetes | Check ping to OVN gateway - win_shell: ping {{ ovn_gateway_ip }} -n 1 - register: ping_ovn_gateway - ignore_errors: true - -- name: Kubernetes | Check if minion-init has executed already - block: - - debug: - msg: Could not reach OVN gateway, running minion init - # This will create another powershell script for minion init. OVS on Windows - # does not allow any commands to be ran through network. Ansible uses WinRM - # for the connection. - - name: Kubernetes | Run minion-init - include_tasks: ./run_minion_init.yml - when: ping_ovn_gateway.rc != 0 - -# There is no infra container for Windows Server, this creates a custom -# infra container -- name: Kubernetes | Check if infra container exists - win_shell: "docker image inspect {{ kubernetes_info.infracontainername }}" - register: infra_inspect - failed_when: infra_inspect.rc != 0 and infra_inspect.rc != 1 - when: ansible_kernel in supported_versions_by_the_playbook - -- include_tasks: ./create_infracontainer.yml - when: infra_inspect.rc != 0 and ansible_kernel in supported_versions_by_the_playbook - -- name: Create test yaml for this host - become: true - become_method: sudo - blockinfile: - path: /root/nano-pod-{{windows_container_tag}}-{{ansible_hostname|lower}}.yaml - create: yes - marker: "# {mark} Ansible automatic example generation" - block: | - apiVersion: v1 - kind: Pod - metadata: - name: nano-{{windows_container_tag}}-{{ansible_hostname|lower}} - labels: - name: webserver - spec: - containers: - - name: nano - image: ovnkubernetes/pause - imagePullPolicy: IfNotPresent - # This test yaml uses a custom nanoserver container that starts a simple - # http server that can be used for tests. It's much faster compared to the - # IIS container. - - name: nano2 - image: alinbalutoiu/nanoserver-web:{{windows_container_tag}} - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/os: windows - kubernetes.io/hostname: {{ansible_hostname|lower}} - delegate_to: "{{ item }}" - with_items: "{{ groups['kube-master'] }}" diff --git a/contrib/roles/windows/kubernetes/tasks/run_minion_init.yml b/contrib/roles/windows/kubernetes/tasks/run_minion_init.yml deleted file mode 100644 index d6cb62eec5..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/run_minion_init.yml +++ /dev/null @@ -1,114 +0,0 @@ ---- -- name: Kubernetes minion | Update etc\hosts file - win_lineinfile: - path: '{{ ansible_env.windir }}\System32\drivers\etc\hosts' - line: '{{ host_public_ip }} {{ ansible_hostname | lower }}' - state: present - -- name: Kubernetes minion | Add "{{ install_path }}" to path - win_path: - elements: "{{ install_path }}" - -- name: Kubernetes minion | Check if ovn-kubernetes-node is installed - win_service: - name: ovn-kubernetes-node - register: ovnkube_service - -- name: Kubernetes minion | Remove ovn-kubernetes-node service if it exists - win_service: - name: ovn-kubernetes-node - state: absent - when: ovnkube_service.exists - -- name: Kubernetes minion | Set ovnkube service command - set_fact: - ovnkube_service_command: >- - "{{ install_path }}\\ovnkube.exe" - --k8s-kubeconfig={{ install_path }}\\kubeconfig.yaml - --k8s-apiserver http://{{ kubernetes_info.MASTER_IP }}:8080 - --init-node {{ ansible_hostname|lower }} - --k8s-token {{ TOKEN }} - --nb-address "tcp://{{ kubernetes_info.MASTER_IP }}:6641" - --sb-address "tcp://{{ kubernetes_info.MASTER_IP }}:6642" - --cluster-subnets {{ kubernetes_info.CLUSTER_SUBNET }} - --k8s-service-cidr {{ kubernetes_info.SERVICE_CLUSTER_IP_RANGE }} - --cni-conf-dir="{{ install_path }}/cni" - --cni-plugin "ovn-k8s-cni-overlay.exe" - --encap-ip="{{ host_internal_ip }}" - -- name: Kubernetes minion | Remove existing service wrapper config file - win_file: - state: absent - path: '{{ install_path }}\ovnkube-servicewrapper-config.ini' - -- name: Kubernetes minion | Create the new service wrapper config file - win_lineinfile: - path: '{{ install_path }}\ovnkube-servicewrapper-config.ini' - create: yes - line: | - log-file={{ install_path }}/ovn-kubernetes-node.log - service-name=ovn-kubernetes-node - -- name: Kubernetes minion | Set default "init_gateway" fact if not already defined - set_fact: - init_gateway: true - when: init_gateway is not defined - -- name: Kubernetes minion | Set the ovnkube service-command with gateway args - win_lineinfile: - path: '{{ install_path }}\ovnkube-servicewrapper-config.ini' - insertafter: EOF - state: present - line: > - service-command={{ ovnkube_service_command }} - --nodeport - --gateway-mode=shared - --gateway-interface="vEthernet ({{ interface_name }})" - --gateway-nexthop="{{ interface_default_gateway }}" - when: init_gateway - -- name: Kubernetes minion | Set the ovnkube service-command without gateway args - win_lineinfile: - path: '{{ install_path }}\ovnkube-servicewrapper-config.ini' - insertafter: EOF - state: present - line: > - service-command={{ ovnkube_service_command }} - when: not init_gateway - -- name: Kubernetes minion | Create ovn-kubernetes-node service - win_service: - name: ovn-kubernetes-node - display_name: OVN Kubernetes Node - description: OVN Kubernetes Node CNI Server - path: >- - "{{ install_path }}\servicewrapper.exe" --config "{{ install_path }}\ovnkube-servicewrapper-config.ini" - -- name: Kubernetes minion | Set Kubernetes minion service failure command - win_shell: >- - sc.exe failure ovn-kubernetes-node reset=40 actions=restart/0/restart/0/run/30000 - command="powershell.exe Move-Item - \\\`"{{ install_path }}/ovn-kubernetes-node.log\\\`" - \\\`"{{ install_path }}/ovn-kubernetes-node.log_`$(Get-Date -f yyyy-MM-dd-hh-mm-ss)\\\`"; - Restart-Service ovn-kubernetes-node" - -- name: Kubernetes minion | Enable Kubernetes minion service failure flags - win_shell: sc.exe failureflag ovn-kubernetes-node 1 - -- name: Kubernetes minion | Restart Kubelet - win_service: - name: kubelet - state: restarted - -- name: Kubernetes minion | Start ovn-kubernetes-node service - win_service: - name: ovn-kubernetes-node - start_mode: auto - state: started - -- name: Kubernetes minion | Wait for ovn-kubernetes-node service to finish init - win_shell: ping {{ ovn_gateway_ip }} -n 1 - register: ovngateway_ping - until: ovngateway_ping.rc == 0 - retries: 30 - delay: 3 diff --git a/contrib/roles/windows/kubernetes/tasks/set_ip_facts.yml b/contrib/roles/windows/kubernetes/tasks/set_ip_facts.yml deleted file mode 100644 index c0d1b9bb0f..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/set_ip_facts.yml +++ /dev/null @@ -1,93 +0,0 @@ ---- -- name: IP facts | Get the preferred network interface index - register: cmd_output - win_shell: | - $ErrorActionPreference = "Stop" - $hnsNet = Get-HnsNetwork | Where-Object { - ($_.Name -eq "{{ sdn_info.sdn_network_name }}") -and ($_.Type -eq "Transparent") - } - if($hnsNet) { - if($hnsNet.Count -gt 1) { - Throw "There is more than one HNS transparent networks with the name {{ sdn_info.sdn_network_name }}" - } - (Get-NetAdapter -Name $hnsNet.NetworkAdapterName).InterfaceIndex - exit 0 - } - $preferredIfName = "{{ sdn_preferred_nic_name | default('') }}" - if($preferredIfName) { - (Get-NetAdapter -Name $preferredIfName).InterfaceIndex - exit 0 - } - $defaultRoute = Get-NetRoute -DestinationPrefix "0.0.0.0/0" - if(!$defaultRoute) { - Throw "No default route. Cannot get the default SDN preferred NIC" - } - if($defaultRoute.Count -gt 1) { - Throw "There is more than one default gateway. Cannot get the default SDN preferred NIC" - } - $defaultRoute.ifIndex - -- name: IP facts | Set the interface_index fact - set_fact: - interface_index: "{{ cmd_output.stdout | trim }}" - -- name: IP facts | Get the default gateway - register: cmd_output - win_shell: (Get-NetRoute -DestinationPrefix "0.0.0.0/0").NextHop - -- name: IP facts | Set the interface_default_gateway fact - set_fact: - interface_default_gateway: "{{ cmd_output.stdout | trim }}" - -- name: IP facts | Get the preferred network interface name - register: cmd_output - win_shell: (Get-NetAdapter -InterfaceIndex "{{ interface_index }}").Name - -- name: IP facts | Set the interface_name fact - set_fact: - interface_name: "{{ cmd_output.stdout | trim }}" - -- name: IP facts | Get the configured network interface index - register: cmd_output - win_shell: | - $ErrorActionPreference = "Stop" - $vEthernet = Get-NetAdapter -Name "vEthernet ({{ interface_name }})" -ErrorAction SilentlyContinue - if($vEthernet) { - $vEthernet.InterfaceIndex - } else { - "{{ interface_index }}" - } - -- name: IP facts | Set the configured_interface_index fact - set_fact: - configured_interface_index: "{{ cmd_output.stdout | trim }}" - -- name: IP facts | Validate IP address on the preferred network interface - win_shell: | - $ErrorActionPreference = "Stop" - $netAddress = Get-NetIPAddress -InterfaceIndex "{{ configured_interface_index }}" - if(!$netAddress) { - Throw "The network interface {{ configured_interface_index }} doesn't have any addresses configured" - } - if($netAddress.Count -gt 1) { - Throw "There is more than one IP address configured on the interface {{ configured_interface_index }}" - } - -- name: IP facts | Get the internal IP address - register: cmd_output - win_shell: (Get-NetIPAddress -InterfaceIndex "{{ configured_interface_index }}").IPAddress - -- name: IP facts | Set the host_internal_ip and host_public_ip facts - set_fact: - host_internal_ip: "{{ cmd_output.stdout | trim }}" - # TODO: Set this to the proper public IP address in order to have support - # for GCE or cross-cloud deployments of k8s. For now, we simply set - # it to the host_internal_ip. - host_public_ip: "{{ cmd_output.stdout | trim }}" - -- debug: var=interface_index -- debug: var=configured_interface_index -- debug: var=interface_name -- debug: var=interface_default_gateway -- debug: var=host_internal_ip -- debug: var=host_public_ip diff --git a/contrib/roles/windows/kubernetes/tasks/setup_ovs_hns.yml b/contrib/roles/windows/kubernetes/tasks/setup_ovs_hns.yml deleted file mode 100644 index b81a3ec10a..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/setup_ovs_hns.yml +++ /dev/null @@ -1,74 +0,0 @@ ---- -- name: OVS HNS network setup | Remove setup_ovs_hns script and its log if they exists - win_file: - path: "{{ item }}" - state: absent - with_items: - - "{{ install_path }}/setup_ovs_hns.ps1" - - "{{ install_path }}/setup_ovs_hns.log" - -- name: OVS HNS network setup | Create setup_ovs_hns script - win_lineinfile: - path: "{{ install_path }}/setup_ovs_hns.ps1" - create: yes - line: | - $ErrorActionPreference = "Stop" - # - # Enable OVS on HNS network - # - $net = Get-HnsNetwork | Where-Object { $_.ID -eq "{{ hns_net_id }}" } - Set-Service "ovs-vswitchd" -StartupType Disabled - Stop-Service "ovs-vswitchd" -Force - Disable-OVSOnHNSNetwork $net.ID - $bridgeName = "vEthernet ($($net.NetworkAdapterName))" - ovs-vsctl.exe --no-wait --if-exists del-br "$bridgeName" - if($LASTEXITCODE) { - Throw "Failed to cleanup existing OVS bridge" - } - ovs-vsctl.exe --no-wait add-br "$bridgeName" - if($LASTEXITCODE) { - Throw "Failed to add the OVS bridge" - } - ovs-vsctl.exe --no-wait add-port "$bridgeName" "$($net.NetworkAdapterName)" - if($LASTEXITCODE) { - Throw "Failed to add the HNS interface to OVS bridge" - } - Enable-OVSOnHNSNetwork $net.ID - Set-Service "ovs-vswitchd" -StartupType Automatic - Start-Service "ovs-vswitchd" - # - # Set the OVS guid and the k8s-api-server - # - ovs-vsctl.exe get Open_vSwitch . external_ids:system-id - if($LASTEXITCODE -ne 0) { - # The system-id guid is not set is $LASTEXITCODE is different than zero - ovs-vsctl.exe --timeout {{ ovs_cmd_timeout }} set Open_vSwitch . external_ids:system-id="$((New-Guid).Guid)" - if($LASTEXITCODE) { - Throw "Failed to set the OVS system-id guid" - } - } - ovs-vsctl.exe --timeout {{ ovs_cmd_timeout }} set Open_vSwitch . external_ids:k8s-api-server="http://{{ kubernetes_info.MASTER_IP }}:8080" - if($LASTEXITCODE) { - Throw "Failed to set the k8s-api-server" - } - # - # Create the OVS tunnel - # - ovs-vsctl.exe --timeout {{ ovs_cmd_timeout }} set Open_vSwitch . ` - external_ids:ovn-remote="tcp:{{ kubernetes_info.MASTER_IP }}:6642" ` - external_ids:ovn-nb="tcp:{{ kubernetes_info.MASTER_IP }}:6641" ` - external_ids:ovn-encap-ip={{ host_public_ip }} ` - external_ids:ovn-encap-type="geneve" - if($LASTEXITCODE) { - Throw "Failed to create the OVS tunnel" - } - newline: unix - -- name: OVS HNS network setup | Run setup_ovs_hns script - win_psexec: - executable: "{{ install_path }}/PSTools/PsExec64.exe" - command: cmd /c powershell.exe {{ install_path }}/setup_ovs_hns.ps1 2>&1 >> {{ install_path }}/setup_ovs_hns.log - interactive: no - system: yes - wait: yes - timeout: 120 diff --git a/contrib/roles/windows/kubernetes/tasks/setup_sdn.yml b/contrib/roles/windows/kubernetes/tasks/setup_sdn.yml deleted file mode 100644 index e848846b43..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/setup_sdn.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: SDN setup | Check if the HNS network exists - register: hns_net_exists_out - win_shell: > - (Get-HNSNetwork | Where-Object { $_.Type -eq "Transparent" -and $_.Name -eq "{{ sdn_info.sdn_network_name }}" }) -ne $null - -- name: SDN setup | Create HNS network if it doesn't exist - block: - - name: SDN setup | Create HNS network - win_shell: > - New-HnsNetwork -Name {{ sdn_info.sdn_network_name }} ` - -AddressPrefix {{ ovn_subnet }} ` - -Gateway {{ ovn_gateway_ip }} ` - -Type Transparent ` - -AdapterName "{{ interface_name }}" - async: 10 - poll: 0 - - # Wait for the HNS network creation. Connection will drop shortly when doing - # this, wait for the connection to come back. - - name: SDN setup | Wait for system to become reachable over WinRM (if network was created) - wait_for_connection: - # The connection should come back usually after a few seconds - delay: 5 - timeout: 60 - when: (hns_net_exists_out.stdout | trim) == "False" - -- name: SDN setup | Get the HNS network ID - register: hns_net_id_out - win_shell: | - $ErrorActionPreference = "Stop" - $net = Get-HNSNetwork | Where-Object { $_.Type -eq "Transparent" -and $_.Name -eq "{{ sdn_info.sdn_network_name }}" } - if(!$net) { - Throw "HNS network was not found" - } - if($net.Count -gt 1) { - Throw "Multiple HNS networks were found" - } - $net.ID - -- name: SDN setup | Set the HNS network ID fact - set_fact: - hns_net_id: "{{ hns_net_id_out.stdout | trim }}" - -- name: SDN setup | Restart Docker to get the latest HNS networks - win_service: - name: Docker - state: restarted diff --git a/contrib/roles/windows/kubernetes/tasks/start_kubelet.yml b/contrib/roles/windows/kubernetes/tasks/start_kubelet.yml deleted file mode 100644 index 5df6309719..0000000000 --- a/contrib/roles/windows/kubernetes/tasks/start_kubelet.yml +++ /dev/null @@ -1,108 +0,0 @@ ---- -- name: Kubelet | Delete kubeconfig - win_file: - path: "{{ install_path }}/kubeconfig.yaml" - state: absent - -- name: Kubelet | Create kubeconfig - win_lineinfile: - path: "{{ install_path }}/kubeconfig.yaml" - create: yes - line: | - apiVersion: v1 - kind: Config - clusters: - - name: local - cluster: - server: http://{{ kubernetes_info.MASTER_IP }}:8080 - users: - - name: kubelet - contexts: - - context: - cluster: local - user: kubelet - name: kubelet-context - current-context: kubelet-context - newline: unix - -- name: Kubernetes | check if Kubelet is installed - win_service: - name: kubelet - register: kubelet_service - -- name: Kubelet | Remove kubelet service if it exists - win_service: - name: kubelet - state: absent - when: kubelet_service.exists - -- name: Kubelet | Create the service wrapper config file - block: - - name: Kubelet | Remove existing service wrapper config file - win_file: - state: absent - path: '{{ install_path }}\kubelet-servicewrapper-config.ini' - - - name: Kubelet | Create the new service wrapper config file - win_lineinfile: - path: '{{ install_path }}\kubelet-servicewrapper-config.ini' - create: yes - line: > - log-file={{ install_path }}/kubelet.log - - service-name=kubelet - - service-command="{{ install_path }}\\kubelet.exe" - --hostname-override="{{ ansible_hostname }}" - --cluster-dns="{{ kubernetes_info.K8S_DNS_SERVICE_IP }}" - --cluster-domain="{{ kubernetes_info.K8S_DNS_DOMAIN }}" - --pod-infra-container-image="{{kubernetes_info.infracontainername}}" - --kubeconfig="{{ install_path }}\\kubeconfig.yaml" - --network-plugin=cni --cni-bin-dir="{{ install_path }}\\cni" - --cni-conf-dir="{{ install_path }}\\cni" - --node-ip="{{ host_public_ip }}" - --enforce-node-allocatable "" - --cgroups-per-qos=false - --resolv-conf "" - -- name: Kubelet | Create Kublet Windows service - win_service: - name: kubelet - display_name: Kubernetes Kubelet - description: Kubernetes Kubelet service - path: >- - "{{ install_path }}\servicewrapper.exe" --config "{{ install_path }}\kubelet-servicewrapper-config.ini" - -- name: Kublet | Set Kublet service failure command - win_shell: >- - sc.exe failure kubelet reset=40 actions=restart/0/restart/0/run/30000 - command="powershell.exe Move-Item - \\\`"{{ install_path }}/kubelet.log\\\`" - \\\`"{{ install_path }}/kubelet.log_`$(Get-Date -f yyyy-MM-dd-hh-mm-ss)\\\`"; - Restart-Service kubelet" - -- name: Kublet | Enable Kublet service failure flags - win_shell: sc.exe failureflag kubelet 1 - -- name: Kubelet | Set kubectl context - win_shell: | - $ErrorActionPreference = "Stop" - {{ install_path }}\\kubectl.exe config set-cluster default-cluster --server={{ kubernetes_info.MASTER_IP }}:8080 - if($LASTEXITCODE) { - Throw "Failed to run kubectl.exe config set-cluster" - } - {{ install_path }}\\kubectl.exe config set-context local --cluster=default-cluster --user=default-admin - if($LASTEXITCODE) { - Throw "Failed to run kubectl.exe config set-context" - } - {{ install_path }}\\kubectl.exe config use-context local - if($LASTEXITCODE) { - Throw "Failed to run kubectl.exe config use-context" - } - -# Start the kubelet to ensure OVN gives subnet to this minion -- name: Kubelet | Start service kubelet - win_service: - name: kubelet - start_mode: auto - state: started diff --git a/contrib/roles/windows/kubernetes/vars/windows.yml b/contrib/roles/windows/kubernetes/vars/windows.yml deleted file mode 100644 index 23a64a7eb8..0000000000 --- a/contrib/roles/windows/kubernetes/vars/windows.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -sdn_info: - sdn_network_name: external - -ovs_cmd_timeout: 30 - -kubernetes_info: - MASTER_IP: "{{MASTER_IP}}" - CLUSTER_SUBNET: "{{CLUSTER_SUBNET | default('10.0.0.0/16')}}" - MASTER_INTERNAL_IP: "{{MASTER_INTERNAL_IP | default('10.0.0.2')}}" # Will always be the second IP of CLUSTER_SUBNET - SERVICE_CLUSTER_IP_RANGE: "{{SERVICE_CLUSTER_IP_RANGE | default('10.0.9.0/24')}}" - K8S_DNS_DOMAIN: "{{K8S_DNS_DOMAIN | default('cluster.local')}}" - K8S_DNS_SERVICE_IP: "{{K8S_DNS_SERVICE_IP | default('10.0.9.10')}}" - K8S_API_SERVICE_IP: "{{K8S_API_SERVICE_IP | default('10.0.9.1')}}" - infracontainername: "ovnkubernetes/pause" - -firewall: - disable_completely: false # WARNING: Do NOT enable this in production. Used only for testing purposes. - - rules: - - name: "Open vSwitch OVN GENEVE Port" - protocol: "UDP" - port: 6081 - direction: "Inbound" - - - name: "Open vSwitch OVN Database Ports" - protocol: "TCP" - port: 6641-6642 - direction: "Inbound" - - - name: "Kubernetes TCP Service Ports" - protocol: "TCP" - port: 30000-32767 - direction: "Inbound" - - - name: "Kubernetes UDP Service Ports" - protocol: "UDP" - port: 30000-32767 - direction: "Inbound" - - - name: "Kubernetes Kubelet Ports" - protocol: "TCP" - port: 10250-10252 - direction: "Inbound" - - - name: "Kubernetes HTTP API Port" - protocol: "TCP" - port: 8080 - direction: "Inbound" - - - name: "Kubernetes HTTPS API Port" - protocol: "TCP" - port: 443 - direction: "Inbound" diff --git a/contrib/roles/windows/openvswitch/tasks/install_custom_ovs.yml b/contrib/roles/windows/openvswitch/tasks/install_custom_ovs.yml deleted file mode 100644 index cd918051aa..0000000000 --- a/contrib/roles/windows/openvswitch/tasks/install_custom_ovs.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -# BCEDIT -- name: OVS | Running bcdedit - win_shell: bcdedit /set testsigning yes - when: bcdedit_needed - -- name: OVS | Restarting computer - win_reboot: - when: bcdedit_needed - -- name: OVS | Downloading OVS - win_get_url: - url: "{{custom_ovs_link}}" - dest: "{{ovs_info.tmp_dir}}\\ovs.msi" - timeout: 60 - retries: 3 - -- name: OVS | Download certificate beta - win_get_url: - url: "{{ovs_certs_link}}" - dest: "{{ovs_info.tmp_dir}}\\certificate.cer" - timeout: 60 - retries: 3 - when: ovs_certs_link is defined - -- name: OVS | Extract certificate from msi - win_shell: | - $ErrorActionPreference = "Stop" - $driverFile = "{{ovs_info.tmp_dir}}\\ovs.msi" - $outputFile = "{{ovs_info.tmp_dir}}\\certificate.cer" - $exportType = [System.Security.Cryptography.X509Certificates.X509ContentType]::Cert - $cert = (Get-AuthenticodeSignature $driverFile).SignerCertificate - [System.IO.File]::WriteAllBytes($outputFile, $cert.Export($exportType)) - when: ovs_certs_link is not defined - -- name: OVS | Install certificate - win_shell: | - $ErrorActionPreference = "Stop" - $cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2("{{ovs_info.tmp_dir}}\certificate.cer") - $rootStore = Get-Item cert:\LocalMachine\TrustedPublisher - $rootStore.Open("ReadWrite") - $rootStore.Add($cert) - $rootStore.Close() - $rootStore = Get-Item cert:\LocalMachine\Root - $rootStore.Open("ReadWrite") - $rootStore.Add($cert) - $rootStore.Close() - -- name: OVS | Installing OVS - win_package: - path: "{{ovs_info.tmp_dir}}\\ovs.msi" - wait: yes - state: present - arguments: ADDLOCAL="OpenvSwitchCLI,OpenvSwitchDriver,OVNHost" diff --git a/contrib/roles/windows/openvswitch/tasks/install_ovs.yml b/contrib/roles/windows/openvswitch/tasks/install_ovs.yml deleted file mode 100644 index bfb2539f4e..0000000000 --- a/contrib/roles/windows/openvswitch/tasks/install_ovs.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: OVS | Downloading OVS - win_get_url: - url: "{{ ovs_info.download_link }}" - dest: "{{ ovs_info.tmp_dir }}\\ovs.msi" - retries: 3 - -- name: OVS | Installing OVS - win_package: - path: "{{ ovs_info.tmp_dir }}\\ovs.msi" - wait: yes - state: present - arguments: ADDLOCAL="OpenvSwitchCLI,OpenvSwitchDriver,OVNHost" diff --git a/contrib/roles/windows/openvswitch/tasks/main.yml b/contrib/roles/windows/openvswitch/tasks/main.yml deleted file mode 100644 index d3028d0e55..0000000000 --- a/contrib/roles/windows/openvswitch/tasks/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: OVS | include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: OVS | check if OVS is installed - win_service: - name: ovsdb-server - register: ovs_service - -- name: OVS | Install OVS - block: - - name: OVS | Install beta OVS - include_tasks: "./install_custom_ovs.yml" - when: install_custom_ovs - - - name: OVS | Install release OVS - include_tasks: ./install_ovs.yml - when: not install_custom_ovs - when: not ovs_service.exists diff --git a/contrib/roles/windows/openvswitch/vars/windows.yml b/contrib/roles/windows/openvswitch/vars/windows.yml deleted file mode 100644 index 14c7409446..0000000000 --- a/contrib/roles/windows/openvswitch/vars/windows.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -ovs_info: - download_link: https://cloudbase.it/downloads/openvswitch-hyperv-2.7.0-certified.msi - tmp_dir: C:\\Windows\\Temp diff --git a/contrib/roles/windows/ovn-kubernetes/tasks/distribute_binaries.yml b/contrib/roles/windows/ovn-kubernetes/tasks/distribute_binaries.yml deleted file mode 100644 index e6e4db376e..0000000000 --- a/contrib/roles/windows/ovn-kubernetes/tasks/distribute_binaries.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: ovn-kubernetes | Create kubernetes dir - win_file: - path: "{{ download_info.install_path }}" - state: directory - -- name: ovn-kubernetes | Create CNI dir - win_file: - path: "{{ download_info.install_path }}/cni" - state: directory - -- name: ovn-kubernetes | get kubernetes binaries - win_copy: - src: "{{ansible_tmp_dir}}/{{item}}" - dest: "{{ download_info.install_path }}/{{item}}" - with_items: - - ovnkube.exe - - kubectl.exe - - kubelet.exe - -- name: ovn-kubernetes | get cni binary - win_copy: - src: "{{ansible_tmp_dir}}/ovn-k8s-cni-overlay.exe" - dest: "{{ download_info.install_path }}\\cni\\ovn-k8s-cni-overlay.exe" diff --git a/contrib/roles/windows/ovn-kubernetes/tasks/main.yml b/contrib/roles/windows/ovn-kubernetes/tasks/main.yml deleted file mode 100644 index e6d6ee4d4f..0000000000 --- a/contrib/roles/windows/ovn-kubernetes/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: ovn-kubernetes | include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: ovn-kubernetes | fetch binaries from the master node - include_tasks: ./distribute_binaries.yml diff --git a/contrib/roles/windows/ovn-kubernetes/vars/windows.yml b/contrib/roles/windows/ovn-kubernetes/vars/windows.yml deleted file mode 100644 index 3215f4db9a..0000000000 --- a/contrib/roles/windows/ovn-kubernetes/vars/windows.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -download_info: - install_path: "{{install_path | default('C:/kubernetes')}}" diff --git a/contrib/roles/windows/requirements/tasks/fetch_utils.yml b/contrib/roles/windows/requirements/tasks/fetch_utils.yml deleted file mode 100644 index 5ef73610bb..0000000000 --- a/contrib/roles/windows/requirements/tasks/fetch_utils.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- name: Windows | Create {{ install_path }} dir - win_file: - path: "{{ install_path }}" - state: directory - -- name: Windows | Check if the service wrapper exists - win_stat: - path: "{{ install_path }}/servicewrapper.exe" - register: servicewrapper_info - -- name: Windows | Get the service wrapper - block: - - name: Windows | Download the service wrapper zip file - win_get_url: - url: "{{ service_wrapper_link }}" - dest: "{{ install_path }}/service-wrapper.zip" - timeout: 60 - retries: 3 - - - name: Windows | Unzip the service-wrapper zip archive - win_unzip: - src: "{{ install_path }}/service-wrapper.zip" - dest: "{{ install_path }}/service-wrapper" - - - name: Windows | Move the service wrapper to the expected location - win_copy: - src: "{{ install_path }}/service-wrapper/service-wrapper.exe" - dest: "{{ install_path }}/servicewrapper.exe" - remote_src: yes - - - name: Windows | Remove service-wrapper temporary files - win_file: - state: absent - path: "{{ item }}" - with_items: - - "{{ install_path }}/service-wrapper.zip" - - "{{ install_path }}/service-wrapper" - when: not servicewrapper_info.stat.exists - -- name: Windows | Get the PSTools from sysinternals (used for SDN setup later on) - block: - - name: Windows | Remove existing PSTools - win_file: - state: absent - path: "{{ item }}" - with_items: - - "{{ install_path }}/PSTools.zip" - - "{{ install_path }}/PSTools" - - - name: Windows | Fetch PSTools.zip from {{ ps_tools_link }} - win_get_url: - url: "{{ ps_tools_link }}" - dest: "{{ install_path }}/PSTools.zip" - retries: 10 - delay: 5 - - - name: Windows | Unzip PSTools.zip archive - win_unzip: - src: "{{ install_path }}/PSTools.zip" - dest: "{{ install_path }}/PSTools" - - - name: Windows | Remove PSTools.zip archive - win_file: - state: absent - path: "{{ install_path }}/PSTools.zip" diff --git a/contrib/roles/windows/requirements/tasks/main.yml b/contrib/roles/windows/requirements/tasks/main.yml deleted file mode 100644 index 14822c5d0b..0000000000 --- a/contrib/roles/windows/requirements/tasks/main.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: Windows | include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: Windows | Expect reboot_required to false - set_fact: - reboot_required: false - -- name: Windows | Get the IPv6 disable flags - win_reg_stat: - path: '{{ ipv6.reg_path }}' - name: '{{ ipv6.disable_flags_reg_name }}' - register: ipv6_flags - -- name: Windows | Disable IPv6 (if not already disabled) - block: - - name: Windows | Set the IPv6 disable flags - win_regedit: - path: '{{ ipv6.reg_path }}' - name: '{{ ipv6.disable_flags_reg_name }}' - data: '{{ ipv6.disable_flags_reg_value }}' - type: dword - - name: Windows | Set reboot_required to true for the IPv6 disable - set_fact: - reboot_required: true - when: (not ipv6_flags.exists or - ipv6_flags.type != 'REG_DWORD' or - ipv6_flags.raw_value != ipv6.disable_flags_reg_value) - -- name: Windows | Installing Required features - win_feature: - name: "{{item}}" - state: present - register: features_installed - with_items: - - Containers - -- name: Windows | Checking if reboot_required - set_fact: - reboot_required: true - with_items: - - "{{features_installed.results}}" - loop_control: - label: "{{item.item}}" - when: item.reboot_required - -- name: Windows | Reboot the node - win_reboot: - when: reboot_required - -- name: Windows | Fetch Windows utils - include_tasks: ./fetch_utils.yml diff --git a/contrib/roles/windows/requirements/vars/windows.yml b/contrib/roles/windows/requirements/vars/windows.yml deleted file mode 100644 index 7390cf0cde..0000000000 --- a/contrib/roles/windows/requirements/vars/windows.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -ipv6: - reg_path: HKLM:\SYSTEM\CurrentControlSet\Services\Tcpip6\Parameters - disable_flags_reg_name: DisabledComponents - disable_flags_reg_value: 4294967295 # Integer value for: 0xffffffff - -ps_tools_link: https://download.sysinternals.com/files/PSTools.zip diff --git a/contrib/roles/windows/validation/tasks/main.yml b/contrib/roles/windows/validation/tasks/main.yml deleted file mode 100644 index b091bc36cb..0000000000 --- a/contrib/roles/windows/validation/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Windows validation | include vars - include_vars: "{{ ansible_os_family | lower }}.yml" - -- name: Windows validation | Validate the services success state - include: "./validate_service.yml service_name={{ item }}" - with_items: "{{ service_names }}" diff --git a/contrib/roles/windows/validation/tasks/validate_service.yml b/contrib/roles/windows/validation/tasks/validate_service.yml deleted file mode 100644 index 216c0962b4..0000000000 --- a/contrib/roles/windows/validation/tasks/validate_service.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Windows validate service | Get service info - win_service: - name: "{{ service_name }}" - register: service_stat - -- name: Windows validate service | Fail if the service doesn't exist - fail: - msg: "The service {{ service_name }} doesn't exist" - when: not service_stat.exists - -- name: Windows validate service | Fail if the service is not running - fail: - msg: "The service {{ service_name }} is not running. Current state: {{ service_stat.state }}" - when: service_stat.state != "running" - -- name: Windows validate service | Fail if the service start mode is not auto (starts at boot time) - fail: - msg: "The service {{ service_name }} start mode is not auto (starts at boot time). Current start mode: {{ service_stat.start_mode }}" - when: service_stat.start_mode != "auto" diff --git a/contrib/roles/windows/validation/vars/windows.yml b/contrib/roles/windows/validation/vars/windows.yml deleted file mode 100644 index 0174c61319..0000000000 --- a/contrib/roles/windows/validation/vars/windows.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -service_names: - - "Docker" - - "kubelet" - - "ovn-controller" - - "ovn-kubernetes-node" - - "ovs-vswitchd" - - "ovsdb-server" diff --git a/contrib/roles/windows/version_check/tasks/main.yml b/contrib/roles/windows/version_check/tasks/main.yml deleted file mode 100644 index f628ceecad..0000000000 --- a/contrib/roles/windows/version_check/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: ovn-kubernetes | include vars - include_vars: "{{ ansible_os_family|lower }}.yml" - -- name: version check | Check if the Windows version is supported by the playbooks - fail: - msg: The system distribution is not supported yet by the playbooks - when: ansible_kernel not in supported_versions_by_the_playbook diff --git a/contrib/roles/windows/version_check/vars/windows.yml b/contrib/roles/windows/version_check/vars/windows.yml deleted file mode 100644 index 122e12e160..0000000000 --- a/contrib/roles/windows/version_check/vars/windows.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -supported_versions_by_the_playbook: - - "{{windows1709}}" - - "{{windows1803}}" - - "{{windows2019}}" diff --git a/dist/Makefile b/dist/Makefile deleted file mode 100644 index 82db96565a..0000000000 --- a/dist/Makefile +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018 Red Hat Inc. -# -# Copying and distribution of this file, with or without modification, -# are permitted in any medium without royalty provided the copyright -# notice and this notice are preserved. This file is offered as-is, -# without warranty of any kind. - -# -# The rpms can be made from a release version, make rpm, or from a -# commit, make snapshot. In either case the spec file needs to be -# edited for the version or commit number. -# - -# Build distribution SRPM and RPMs based on version number specified -# in the openvswitch-ovn-kubernetes.spec file -rpms: srpm rpm - -# Build snapshot RPMs based on the commit number in the -# the openvswitch-ovn-kubernetes.spec.snapshot file -# The commit must be in https://github.com/ovn-org/ovn-kubernetes -snapshot: rpmsnap - -dist: openvswitch-ovn-kubernetes.spec - spectool -g openvswitch-ovn-kubernetes.spec - -srpm: dist - rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ - --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" \ - -bs openvswitch-ovn-kubernetes.spec -rpm: dist - rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ - --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" \ - -ba openvswitch-ovn-kubernetes.spec - -distsnap: openvswitch-ovn-kubernetes.spec.snapshot - spectool -g openvswitch-ovn-kubernetes.spec.snapshot - -rpmsnap: distsnap - rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ - --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" \ - -ba openvswitch-ovn-kubernetes.spec.snapshot - -.PHONY: ../go-controller/_output/go/bin/ovnkube - -../go-controller/_output/go/bin/ovnkube: - cd ../go-controller ; make - -container: ../go-controller/_output/go/bin/ovnkube - cp ../go-controller/_output/go/bin/* images - -.PHONY: clean -clean: - -rm -rf *~ \#* .#* - diff --git a/dist/READMEcontainer.md b/dist/READMEcontainer.md index ac90440734..1bde7b1dfe 100644 --- a/dist/READMEcontainer.md +++ b/dist/READMEcontainer.md @@ -95,23 +95,3 @@ for different clusters. It is convient to set up a docker registry for the cluster and add it to the /etc/containers/registries.conf file on each node in both the "registries:" and "insecure_registries:" sections. - -============================ -Cluster install: - -Follow the directions in the openshift documents to provision the hosts in the -cluster and install openshift. Make sure the cluster hosts file contains: -os_sdn_network_plugin_name='cni' - -When the install is complete, delete the ovs and openshift-sdn daemonsets. - -Run the cluster master: -$ ansible/scripts/ovn-setup.sh -script to to set up kubernetes configuration. Next: -$ kubectl create -f yaml/sdn-ovs.yaml -$ kubectl create -f yaml/ovskube-master.yaml -$ kubectl create -f yaml/ovskube.yaml - -Verify the install with -$ oc get nodes -All should show Ready. diff --git a/dist/READMEopenshiftdevpreview.md b/dist/READMEopenshiftdevpreview.md deleted file mode 100644 index f2319c3b8a..0000000000 --- a/dist/READMEopenshiftdevpreview.md +++ /dev/null @@ -1,132 +0,0 @@ -# OVN dev preview on OpenShift/origin/kubernetes - -You can preview ovn overlay networks on an OpenShift 3.11 cluster by replacing -the installed SDN: Multitenant software with openvswitch/ovn components. - -NOTES: -- This is Development Preview, not Production, and it will change over time. -- There is no upgrade path. New install only. -- This is limited to a single cluster master -- This is limited to a single cluster network cidr range. -- This is limited to OKD-3.11 (OKD-4.0 will build, install and work differently) - - -## Installation: - -Install OKD-3.11 as instructed in the OKD documentation. Make the following change -before running the cluster install playbook: -1. In the ansible host file for the cluster change: -``` -os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' -``` - to -``` -os_sdn_network_plugin_name='cni' -openshift_use_openshift_sdn='False' -``` - -When the cluster install is complete and the cluster is up there will -be no cluster networking. - -Clone ovn-kubernetes on a convenient host where you can run ansible-playbook: -``` -# git clone https://github.com/ovn-org/ovn-kubernetes -# cd ovn-kubernetes/dist/ansible -``` - -Edit the hosts file adding the name of the cluster master. - -Optionally, edit the name of the desired image into the daemonset -yaml files. All of the daemonsets use the same image. -The default is the community image. - -Provision the cluster for OVN: -``` -# ./run-playbook -``` - - -``` -# oc project -Using project "ovn-kubernetes" on server "https://wsfd-netdev22.ntdv.lab.eng.bos.redhat.com:8443". -# oc get nodes -NAME STATUS ROLES AGE VERSION -wsfd-netdev22.ntdv.lab.eng.bos.redhat.com Ready infra,master 43d v1.10.0+b81c8f8 -wsfd-netdev28.ntdv.lab.eng.bos.redhat.com Ready compute 43d v1.10.0+b81c8f8 -wsfd-netdev35.ntdv.lab.eng.bos.redhat.com Ready compute 43d v1.10.0+b81c8f8 -# oc get ds -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -ovnkube 2 2 2 2 2 22h -ovnkube-master 1 1 1 1 1 node-role.kubernetes.io/master=true 22h -ovs-ovn 3 3 3 3 3 16d -# oc get po -NAME READY STATUS RESTARTS AGE -ovnkube-5dq4z 1/1 Running 0 22h -ovnkube-j5r6d 1/1 Running 0 22h -ovnkube-master-jvdwc 1/1 Running 0 22h -ovs-ovn-g55jw 1/1 Running 0 16d -ovs-ovn-kk8l8 1/1 Running 0 16d -ovs-ovn-qfk5n 1/1 Running 0 16d -``` - -At this point ovn is providing networking for the cluster. - -## Images: - -There is a single docker image that is used in all of the ovn daemonsets. -All daemonset yaml files must be edited to reference the same desired image. -The images can be found in docker.io, one of the official OKD repositories or -they can be built in the openvswitch/ovn-kubernetes git repo. - -The OKD image is built in the openshift/ose-ovn-kubernetes repo from rhel:7 -with openvswitch from the fastdatapath repo. -The default community image is built from centos:7 with openvswitch from -http://cbs.centos.org/kojifiles/packages/. It can also be built from fedora:28 -with openvswitch from fedora. - -The OKD image is available in the following: -``` -registry.access.redhat.com/ -brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/ -aws.openshift.com:443/ -``` -The OKD 3.11 image name includes the build tag: -``` -openshift3/ose-ovn-kubernetes:v3.11 -``` - -The community image based on current development is: -``` -docker.io/ovnkube/ovn-daemonset:latest -``` -The daemonset yaml files reference the community image. - - -Alternatively, the image can be built from the ovn-kubernetes repo. -When doing this edit the Makefile to itag and push the image to your existing -docker registry. Edit the daemonset yaml files to reference the image. - -1. build the ovn binaries, copy them to the Dockerfile directory and build the image, -tag and push it to your registry: -``` -$ cd ovn-kubernetes/dist/images -$ make -``` - -or - -``` -$ make fedora -``` - -In a development cycle, a new image can be built and pushed and the ovnkube-master and ovnkube daemonsets -can be deleted and recreated. - -``` -# cd ovn-kubernetes/dist/yaml -# oc project ovn-kubernetes -# oc delete -f ovnkube.yaml -# oc delete -f ovnkube-master.yaml -# oc create -f ovnkube-master.yaml -# oc create -f ovnkube.yaml -``` diff --git a/dist/READMEopenshifttechpreview.md b/dist/READMEopenshifttechpreview.md deleted file mode 100644 index 9b6222a598..0000000000 --- a/dist/READMEopenshifttechpreview.md +++ /dev/null @@ -1,341 +0,0 @@ -# OVN tech preview on OpenShift/origin/kubernetes - -NOTE: -- This is a temporary approach to working with the tech preview. Ultimately -4.0 will install using the network operator. - -You can preview ovn overlay networks on an OpenShift 4.0 cluster by replacing -the installed CNI plugin with OVN. -A v3.11 cluster installs with no plugin and 4.0 (currently) installs OpenShiftSDN. - -NOTES: -- This is Tech Preview, not Production, and it will change over time. -- There is no upgrade path. New install only. -- This is limited to a single master in the cluster (no high availability). - - -## Installation: - -Install OKD-v4.0 as instructed in the OKD documentation. More TBD. - - -Install OKD-v3.11 as instructed in the OKD documentation. Make the following change -before running the cluster install playbook: -1. In the ansible host file for the cluster change: -``` -os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' -``` - to -``` -os_sdn_network_plugin_name='cni' -openshift_use_openshift_sdn='False' -``` - -When the cluster install is complete and the cluster is up there will -be no cluster networking. - - -Clone ovn-kubernetes on a convenient host where you can run ansible-playbook: -``` -# git clone https://github.com/ovn-org/ovn-kubernetes -# cd ovn-kubernetes/dist/ansible -``` - -Edit the hosts file adding the name of the cluster master and select the ovn_image -and ovn_image_pull_policy. The default is the community image. -Also, you can specify a network cidr and service cidr, or just take the default. - -Provision the cluster for OVN: -``` -# ./run-playbook -``` - -OVN may be removed from the cluster by running: -``` -# ./run-playbook uninstall -``` - -The yaml/{ovnkube.yaml,ovnkube-master.yaml} files are now created as follows: -``` -# cd ../images -# make daemonsetyaml -``` -The daemonsets are now in template files that are expanded on the master. The -previous daemonsets in dist/yaml have been deleted and can be reconstructed -using the above make. - -``` -# oc project -Using project "ovn-kubernetes" on server "https://wsfd-netdev22.ntdv.lab.eng.bos.redhat.com:8443". -# oc get nodes -NAME STATUS ROLES AGE VERSION -wsfd-netdev22.ntdv.lab.eng.bos.redhat.com Ready infra,master 43d v1.10.0+b81c8f8 -wsfd-netdev28.ntdv.lab.eng.bos.redhat.com Ready compute 43d v1.10.0+b81c8f8 -wsfd-netdev35.ntdv.lab.eng.bos.redhat.com Ready compute 43d v1.10.0+b81c8f8 -# oc get all -NAME READY STATUS RESTARTS AGE -pod/ovnkube-master-85bfb958f9-62dmq 4/4 Running 0 1h -pod/ovnkube-node-c5vp6 3/3 Running 0 1h -pod/ovnkube-node-fv589 3/3 Running 0 1h -pod/ovnkube-node-l8pps 3/3 Running 0 1h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/ovnkube-master ClusterIP None 6641/TCP,6642/TCP 1h - -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/ovnkube-node 3 3 3 3 3 beta.kubernetes.io/os=linux 1h - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/ovnkube-master 1 1 1 1 1h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/ovnkube-master-85bfb958f9 1 1 1 1h -``` - -At this point ovn is providing networking for the cluster. - -## Architecture: - -OVN has a single master that is run on one node and node support that is -run on every node. - -The master, ovnkube-master, selects the cluster master nodes. There can only be one OVN master, -however, the cluster can support multiple cluster masters. The OVN master runs in -a deployment with one replica. It selects from the cluster masters. The deployment -exports a headless service in which the end point "IP-OF-OVN-MASTER" is the running master. -Whenever the master is running the endpoint exists and points to the node running the OVN master. - -The OVN node is run from a daemonset, ovnkube-node, with one pod on each node. - -### ovnkube-master -The ovnkube-master has a single pod with a container for each process: - -- run-ovn-northd -- nb-ovsdb -- sb-ovsdb -- ovnkube-master - -This is done so that logs can be streamed to stdout and present with the "oc logs" -command. They also run independently and when on fails it is automatically restarted. - -The top three contianers above are the ovn northd daemons. They run on the master and -all the nodes access them through: -``` -# North db -tcp://:6441 -# South db -tcp://:6442 -``` - -The ovnkube-master container runs ovnkube in master mode. - -### ovnkube-node -The ovnkube-node pod on each node has containers for: - -- ovs-daemons -- ovn-controller -- ovn-node - -The ovs-daemons container includes ovs-vswitchd and ovsdb-server for the ovs database. - -The ovn-controller container runs ovnkube in --init-controller - -The ovn-node container runs ovnkube in --init-node mode. - -### Daemonset/Deployment - image dependency - -There is a single image that supports all of the OVN pods. The image has a startup -script, ovnkube.sh, that is the entry point for each container. So the image and -daemonset are dependent on each other. The daemonset version environment variable, -OVN_DAEMONSET_VERSION, passes the version to ovnkube.sh. The script has for support -for some of the daemonset versions (currently 1, 2, and 3). - -This is important because the daemonset and image come from different build -streams and they can get out of sync. - -## Images: - -There is a single container image that is used in all of the ovn daemonsets. -The desired image is entered in the dist/ansible/hosts file. - -Built images can be found in docker.io, one of the official OKD repositories or -a user provided image repository. - -A development image can be built in the openvswitch/ovn-kubernetes git repo, -taged, and pushed to a private image repo. - -The OCP image is built in the openshift/ose-ovn-kubernetes repo from rhel:7 -with whatever openvswitch version is in the fastdatapath repo. -The default community image is built from centos:7 with openvswitch 2.9.2 from -http://cbs.centos.org/kojifiles/packages/. The image can also -be built from fedora:28 with openvswitch from fedora. - -The OCP image is available in the following: -``` -registry.access.redhat.com/ -brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/ -aws.openshift.com:443/ -``` -The OKD 4.0 image name includes the build tag: -``` -openshift/ose-ovn-kubernetes:v4.0 -``` - -The community image based on current development is: -``` -docker.io/ovnkube/ovn-daemonset:latest -``` -The the community image is the default. This image is updated from time to time -and may get out of fsync. - - -Alternatively, the image can be built from the ovn-kubernetes repo. -When doing this, edit the Makefile to tag and push the image to your existing -docker registry. Edit the ansible/hosts file to reference the image. - -1. build the ovn binaries, copy them to the Dockerfile directory and build the image, -tag and push it to your registry: -``` -$ cd ovn-kubernetes/dist/images -$ make -``` - -or - -``` -$ make fedora -``` - -## Development Cycle: -In a development cycle, a new image can be built and pushed and the ansible -scripts can uninstall and reinstall ovn on the cluster. - -``` -# cd ansible -# ./run-playbook uninstall -# ./run-playbook -``` - -Alternatively once the daemonsets are running, the pods can be deleted. They -will be automatically created using the new image. -``` -# oc project ovn-kubernetes -# oc get po -NAME READY STATUS RESTARTS AGE -ovnkube-master-85bfb958f9-62dmq 4/4 Running 0 3h -ovnkube-node-c5vp6 3/3 Running 0 3h -ovnkube-node-fv589 3/3 Running 0 3h -ovnkube-node-l8pps 3/3 Running 0 3h -# oc delete po ovnkube-master-85bfb958f9-62dmq ovnkube-node-c5vp6 ovnkube-node-fv589 ovnkube-node-l8pps -``` - -## Debugging Aids - -The ovnkube-node pod has the following containers: ovs-daemons ovn-controller -ovn-node The ovnkube-master pod has the following containers: run-ovn-northd -nb-ovsdb sb-ovsdb ovnkube-master - -Logs from the containers can be viewed using the "kubectl logs" command. Each -container writes output to stdout and the results are displayed -using "kubectl logs". The log may be truncated but the full log is available -by rsh into the container and runnning ./ovnkube.sh display in the container -(see below). - -The log is on a container basis so the logs can be shown using: -``` -On each node pod: -# kubectl logs -c ovs-daemons ovnkube-node-c5vp6 -# kubectl logs -c ovn-controller ovnkube-node-c5vp6 -# kubectl logs -c ovn-node ovnkube-node-c5vp6 - -On each master pod: -# kubectl logs -c run-ovn-northd ovnkube-master-85bfb958f9-62dmq -# kubectl logs -c nb-ovsdb ovnkube-master-85bfb958f9-62dmq -# kubectl logs -c sb-ovsdb ovnkube-master-85bfb958f9-62dmq -# kubectl logs -c ovnkube-master ovnkube-master-85bfb958f9-62dmq -``` -There is a convenience scripton the master, $HOME/ovn/ovn-logs that extracts logs -for all ovn pods in the cluster. The optional parameter will just display the -desired pod. The script is installed on the master by ./run-playbook. -``` -# $HOME/ovn/ovn-logs -# $HOME/ovn/ovn-logs ovnkube-node-c5vp6 -``` - - -The full logs are available using: -``` -# oc rsh -c ovs-daemons ovnkube-node-c5vp6 ./ovnkube.sh display -``` -Where the container names and pods are as described above. In the following -the commit is the commit number that was built into the image. In the contianer -the /root/.git/* directories are copied from the github repo. This can be used -to match the image to a specific commit. - -There is a convenience script, $HOME/ovn/ovn-display that extracts the complete logs -for all ovn pods in the cluster. The optional parameter will just display the -desired pod. The script is installed on the master by ./run-playbook. -``` -# $HOME/ovn/ovn-display -# $HOME/ovn/ovn-display ovnkube-node-c5vp6 -``` - -The display includes information on the image, daemonset and cluster in addition to the log. For example, -``` -================== ovnkube.sh version 3 ================ - ==================== command: display - =================== hostname: wsfd-netdev22.ntdv.lab.eng.bos.redhat.com - =================== daemonset version 3 - =================== Image built from ovn-kubernetes ref: refs/heads/ovn-v3 commit: eacdf15c917e1bb49d06047711dab920d72af178 -OVS_USER_ID root:root -OVS_OPTIONS -OVN_NORTH tcp://10.19.188.9:6641 -OVN_NORTHD_OPTS --db-nb-sock=/var/run/openvswitch/ovnnb_db.sock --db-sb-sock=/var/run/openvswitch/ovnsb_db.sock -OVN_SOUTH tcp://10.19.188.9:6642 -OVN_CONTROLLER_OPTS --ovn-controller-log=-vconsole:emer -OVN_NET_CIDR 10.128.0.0/14/24 -OVN_SVC_CIDR 172.30.0.0/16 -K8S_APISERVER https://wsfd-netdev22.ntdv.lab.eng.bos.redhat.com:8443 -OVNKUBE_LOGLEVEL 4 -OVN_DAEMONSET_VERSION 3 -ovnkube.sh version 3 -==================== display for wsfd-netdev22.ntdv.lab.eng.bos.redhat.com =================== -Wed Nov 14 20:14:18 UTC 2018 -====================== run-ovn-northd pid -10072 -====================== run-ovn-northd log -2018-11-14T17:59:07.917Z|00001|vlog|INFO|opened log file /var/log/openvswitch/ovn-northd.log -2018-11-14T17:59:07.918Z|00002|reconnect|INFO|unix:/var/run/openvswitch/ovnnb_db.sock: connecting... -2018-11-14T17:59:07.918Z|00003|reconnect|INFO|unix:/var/run/openvswitch/ovnnb_db.sock: connection attempt failed (Connection refused) -... -``` - -The ovn configuration for each of the ovn pods can be extracted using: -``` -# oc rsh -c ovs-daemons ovnkube-node-c5vp6 ./ovnkube.sh ovn_debug -``` -Where the container names and pods are as described above. - -There is a convenience script, $HOME/ovn/ovn-debug that extracts the complete logs -for all ovn pods in the cluster. The optional parameter will just display the -desired pod. The script is installed on the master by ./run-playbook. -``` -# $HOME/ovn/ovn-debug -# $HOME/ovn/ovn-debug ovnkube-node-c5vp6 -``` - -The reported ovn configuration includes: -- ovn-nbctl show -- ovn-nbctl list ACL -- ovn-nbctl list address_set -- ovs-vsctl show -- ovs-ofctl -O OpenFlow13 dump-ports br-int -- ovs-ofctl -O OpenFlow13 dump-ports-desc br-int -- ovs-ofctl dump-flows br-int - -On the Master: -- ovn-sbctl show -- ovn-sbctl lflow-list -- ovn-sbctl list datapath -- ovn-sbctl list port - diff --git a/dist/ansible/hosts b/dist/ansible/hosts deleted file mode 100644 index 12415effab..0000000000 --- a/dist/ansible/hosts +++ /dev/null @@ -1,43 +0,0 @@ -# ansible hosts file - - -[OSEv3:children] -masters -nodes - -[OSEv3:vars] - -# These variables may be added to the host file used to install the cluster - -# This is the community build (default) -ovn_image="docker.io/ovnkube/ovn-daemonset:latest" -# This is an official redhat build -# ovn_image="registry.access.redhat.com/openshift3/ose-ovn-kubernetes:v4.0" -# This is an official (internal) redhat build -# ovn_image="brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/openshift3/ose-ovn-kubernetes:v4.0" -# This is an official image on aws -# ovn_image="registry.reg-aws.openshift.com:443/openshift3/ose-ovn-kubernetes:v4.0" - -ovn_image_pull_policy=IfNotPresent -# ovn_image_pull_policy=Always - -ovn_kube_master_log_level=4 - -# network configuration -# net_cidr=10.128.0.0/14/23 -# svc_cidr=172.30.0.0/16 - - -[masters] - -# This should be the master node form the cluster install. - -# result of `hostname` on master node -#wsfd-netdev22.ntdv.lab.eng.bos.redhat.com - -[nodes] - -# list of all of the nodes -#wsfd-netdev22.ntdv.lab.eng.bos.redhat.com -#wsfd-netdev28.ntdv.lab.eng.bos.redhat.com -#wsfd-netdev35.ntdv.lab.eng.bos.redhat.com diff --git a/dist/ansible/ovn-playbook.yaml b/dist/ansible/ovn-playbook.yaml deleted file mode 100644 index fefa08ed91..0000000000 --- a/dist/ansible/ovn-playbook.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# Install ovn onto cluster -# This is done when the cluster is up with no networking - -# On a convienent host, clone ovn-kubernetes cd to dist/ansible -# git clone https://github.com/ovn-org/ovn-kubernetes ~/ovn-kubernetes -# cd ~/ovn-kubernetes/dist/ansible - -# edit hosts file for the hostname of the master - -# ./run-playbook - -- hosts: nodes - become: yes - become_user: root - tasks: - - name: copy tmpfile.d file - template: src=../templates/cleanup-ovn-cni.conf.j2 dest=/usr/lib/tmpfiles.d/cleanup-ovn-cni.conf - -- hosts: masters - become: yes - become_user: root - tasks: - # OVN data is persistent, so delete the database - - name: Remove OVS DB Content - file: - state: absent - name: /var/lib/openvswitch/ovnnb_db.db - - name: Remove OVS DB Content - file: - state: absent - name: /var/lib/openvswitch/ovnsb_db.db - - # copy yaml files to master - - name: Make dirctory /root/ovn/yaml - file: - state: directory - path: /root/ovn/yaml - - name: Make directory /root/ovn/scripts - file: - state: directory - path: /root/ovn/scripts - - name: Copy ovn-logs - copy: - src: scripts/ovn-logs - dest: /root/ovn/ovn-logs - mode: 0755 - - name: Copy ovn-display - copy: - src: scripts/ovn-display - dest: /root/ovn/ovn-display - mode: 0755 - - name: Copy ovn-debug - copy: - src: scripts/ovn-debug - dest: /root/ovn/ovn-debug - mode: 0755 - - name: Copy ovnkube-master.yaml - template: src=../templates/ovnkube-master.yaml.j2 dest=/root/ovn/yaml/ovnkube-master.yaml - - name: Copy ovnkube-node.yaml - template: src=../templates/ovnkube-node.yaml.j2 dest=/root/ovn/yaml/ovnkube-node.yaml - - - name: Get the k8s_apiserver - shell: grep server /etc/origin/node/node.kubeconfig | awk '{ print $2 }' - register: k8s_apisvr - - set_fact: k8s_apiserver={{ k8s_apisvr }} - - name: Set up ovn - template: src=../templates/ovn-setup.yaml.j2 dest=/root/ovn/yaml/ovn-setup.yaml - - - name: Provision the OVN - shell: oc create -f /root/ovn/yaml/ovn-setup.yaml - ignore_errors: yes - - name: Provision the OVN Project - shell: oc project ovn-kubernetes - ignore_errors: yes - - name: Add adm policy auyuid - shell: oc adm policy add-scc-to-user anyuid -z ovn - ignore_errors: yes - - - name: Start the node ovn daemonset on all nodes - shell: oc create -f /root/ovn/yaml/ovnkube-node.yaml - ignore_errors: yes - - name: Start the master ovn daemonset - shell: oc create -f /root/ovn/yaml/ovnkube-master.yaml - ignore_errors: yes diff --git a/dist/ansible/ovn-uninstall.yaml b/dist/ansible/ovn-uninstall.yaml deleted file mode 100644 index 35436fc79e..0000000000 --- a/dist/ansible/ovn-uninstall.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# Unnstall ovn -# This is done when installed ovn is no longer needed - -# On a convienent host, clone ovn-kubernetes cd to dist/ansible -# git clone https://github.com/ovn-org/ovn-kubernetes ~/ovn-kubernetes -# cd ~/ovn-kubernetes/dist/ansible - -# edit hosts file for the hostname of the master - -# ./run-playbook - -- hosts: masters - become: yes - become_user: root - tasks: - - name: Delete the OVN Namespace - shell: oc delete -f /root/ovn/yaml/ovn-setup.yaml - ignore_errors: yes - - # remove /root/ovn - - name: Remove /root/ovn - shell: rm -rf /root/ovn/* - - # OVN data is persistent, so delete the database - - name: Remove OVN nb DB Content - file: - state: absent - name: /var/lib/openvswitch/ovnnb_db.db - - name: Remove OVN sb DB Content - file: - state: absent - name: /var/lib/openvswitch/ovnsb_db.db - - # iptables -D INPUT -p tcp -m state --state NEW -m tcp --dport 6641 -j ACCEPT - - name: iptables allow 6641 - shell: iptables -D INPUT -p tcp -m tcp --dport 6641 -m conntrack --ctstate NEW -j ACCEPT - ignore_errors: yes - - # iptables -D INPUT -p tcp -m state --state NEW -m tcp --dport 6642 -j ACCEPT - - name: iptables allow 6642 - shell: iptables -D INPUT -p tcp -m tcp --dport 6642 -m conntrack --ctstate NEW -j ACCEPT - ignore_errors: yes - -# - name: Add adm policy auyuid -# shell: oc adm policy add-scc-to-user anyuid -z ovn -# ignore_errors: yes - - -- hosts: nodes - become: yes - become_user: root - tasks: - - name: Remove remove the cni plugin config - file: - state: absent - name: /etc/cni/net.d/10-ovn-kubernetes.conf - - name: Remove ovn-k8s-cni-overlay - file: - state: absent - name: /opt/cni/bin/ovn-k8s-cni-overlay - - - name: Remove old log files - shell: rm -rf /var/log/openvswitch/ovn*log /var/log/openvswitch/ovs*log - - - name: remove old unix sockets - shell: rm -rf /var/run/openvswitch/ovn*ctl /var/run/openvswitch/ovn*pid /var/run/openvswitch/ovs*ctl /var/run/openvswitch/ovs*pid /var/run/openvswitch/ovn?b_db.sock - - - name: Remove OVS DB Content - file: - state: absent - name: /etc/origin/openvswitch/conf.db - - name: Remove OVS system id Content - file: - state: absent - name: /etc/origin/openvswitch/system-id.conf diff --git a/dist/ansible/run-playbook b/dist/ansible/run-playbook deleted file mode 100755 index 86fab797ed..0000000000 --- a/dist/ansible/run-playbook +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# clone ovn-kubernetes on master and cd to dist/ansible -# git clone https://github.com/ovn-org/ovn-kubernetes ~/ovn-kubernetes -# cd ~/ovn-kubernetes/dist/ansible -# edit hosts for hostname of master and desired image - -# ./run-playbook -# ./run-playbook install # The default -# ./run-playbook uninstall # Remove ovn components - -cmd=${1:-install} - -case ${cmd} in - install ) - ansible-playbook -i hosts ovn-playbook.yaml - ;; - uninstall ) - ansible-playbook -i hosts ovn-uninstall.yaml - ;; - * ) - echo "./run-playbook [|install|uninstall]" - echo "./run-playbook default is install" - ;; -esac - - diff --git a/dist/ansible/scripts/ovn-debug b/dist/ansible/scripts/ovn-debug deleted file mode 100755 index 696b53748d..0000000000 --- a/dist/ansible/scripts/ovn-debug +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -#set -x - -# ./ovn-debug [pod] -# By default all ovn pods are processed - -con_node="ovs-daemons ovn-controller ovn-node" -con_master="run-ovn-northd nb-ovsdb sb-ovsdb ovnkube-master" - -if [[ $1 != "" ]] ; then - echo $1 | grep master > /dev/null 2>&1 - if [[ $? == 0 ]] ; then - masters=$1 - else - nodes=$1 - fi -else - nodes=$(oc get po -n ovn-kubernetes | grep ovnkube | grep -v ovnkube-master | awk '{ print $1 }') - masters=$(oc get po -n ovn-kubernetes | awk '/ovnkube-master/{print $1}') -fi - -oc get po -n ovn-kubernetes -o wide - -con=run-ovn-northd -for m in ${masters}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc logs -n ovn-kubernetes -c ${con} ${m} ./ovnkube.sh ovn_debug" - echo "==============================================================================" - oc rsh -n ovn-kubernetes -c ${con} ${m} ./ovnkube.sh ovn_debug - echo " " - echo " " -done - -echo " " -echo " " -echo " " -echo " " - -con=ovn-node -for n in ${nodes}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc logs -n ovn-kubernetes -c ${con} ${n} ./ovnkube.sh ovn_debug" - echo "==============================================================================" - oc rsh -n ovn-kubernetes -c ${con} ${n} ./ovnkube.sh ovn_debug - echo " " - echo " " -done - -exit 0 diff --git a/dist/ansible/scripts/ovn-display b/dist/ansible/scripts/ovn-display deleted file mode 100755 index d97d691839..0000000000 --- a/dist/ansible/scripts/ovn-display +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -#set -x - -# ./ovn-display [pod] -# By defaut all ovn pods are processed, both nodes and masters - -con_node="ovs-daemons ovn-controller ovn-node" -con_master="run-ovn-northd nb-ovsdb sb-ovsdb ovnkube-master" - -if [[ $1 != "" ]] ; then - echo $1 | grep master > /dev/null 2>&1 - if [[ $? == 0 ]] ; then - masters=$1 - else - nodes=$1 - fi -else - nodes=$(oc get po -n ovn-kubernetes | grep ovnkube | grep -v ovnkube-master | awk '{ print $1 }') - masters=$(oc get po -n ovn-kubernetes | awk '/ovnkube-master/{print $1}') -fi - -oc get po -n ovn-kubernetes -o wide - -for m in ${masters}; do - for c in ${con_master}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc rsh -n ovn-kubernetes -c ${c} ${m} ./ovnkube.sh display" - echo "==============================================================================" - oc rsh -n ovn-kubernetes -c ${c} ${m} ./ovnkube.sh display - echo " " - echo " " - done -done - -echo " " -echo " " -echo " " -echo " " - -for n in ${nodes}; do - for c in ${con_node}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc rsh -n ovn-kubernetes -c ${c} ${n} ./ovnkube.sh display" - echo "==============================================================================" - oc rsh -n ovn-kubernetes -c ${c} ${n} ./ovnkube.sh display - echo " " - echo " " - done -done - -exit 0 diff --git a/dist/ansible/scripts/ovn-logs b/dist/ansible/scripts/ovn-logs deleted file mode 100755 index c981764e74..0000000000 --- a/dist/ansible/scripts/ovn-logs +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -#set -x - -# ./ovn-logs [pod] -# By defaut all ovn pods are processed, both nodes and masters - -con_node="ovs-daemons ovn-controller ovn-node" -con_master="run-ovn-northd nb-ovsdb sb-ovsdb ovnkube-master" - -if [[ $1 != "" ]] ; then - echo $1 | grep master > /dev/null 2>&1 - if [[ $? == 0 ]] ; then - masters=$1 - else - nodes=$1 - fi -else - nodes=$(oc get po -n ovn-kubernetes | grep ovnkube | grep -v ovnkube-master | awk '{ print $1 }') - masters=$(oc get po -n ovn-kubernetes | awk '/ovnkube-master/{print $1}') -fi - -oc get po -n ovn-kubernetes -o wide - -for m in ${masters}; do - for c in ${con_master}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc logs -n ovn-kubernetes -c ${c} ${m}" - echo "==============================================================================" - oc logs -n ovn-kubernetes -c ${c} ${m} - echo " " - echo " " - done -done - -echo " " -echo " " -echo " " -echo " " - -for n in ${nodes}; do - for c in ${con_node}; do - echo "==============================================================================" - echo "==============================================================================" - echo "==============================================================================" - echo "========================================= oc logs -n ovn-kubernetes -c ${c} ${n}" - echo "==============================================================================" - oc logs -n ovn-kubernetes -c ${c} ${n} - echo " " - echo " " - done -done - -exit 0 diff --git a/dist/files/ovn-kubernetes-master.service b/dist/files/ovn-kubernetes-master.service deleted file mode 100644 index 5e14219319..0000000000 --- a/dist/files/ovn-kubernetes-master.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=ovn-kubernetes master service -Requires=openvswitch.service -After=openvswitch.service -Requires=ovn-northd.service -After=ovn-northd.service - -[Service] -Type=simple -EnvironmentFile=-/etc/sysconfig/ovn-kubernetes -ExecStart=/usr/bin/ovn-kubernetes-master.sh - -[Install] -WantedBy=multi-user.target - diff --git a/dist/files/ovn-kubernetes-master.sh b/dist/files/ovn-kubernetes-master.sh deleted file mode 100644 index 9cbea11b70..0000000000 --- a/dist/files/ovn-kubernetes-master.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -source /etc/sysconfig/ovn-kubernetes - -function ovn-kubernetes-master() { - echo "Enable and start ovn-kubernetes master services" - /usr/bin/ovnkube \ - --cluster-subnets "${cluster_cidr}" \ - --init-master `hostname` -} - -ovn-kubernetes-master diff --git a/dist/files/ovn-kubernetes-node.service b/dist/files/ovn-kubernetes-node.service deleted file mode 100644 index 3ea918b063..0000000000 --- a/dist/files/ovn-kubernetes-node.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Provision ovn-kubernetes node service -Requires=openvswitch.service -After=openvswitch.service -Requires=ovn-controller.service -After=ovn-controller.service - -[Service] -Type=simple -EnvironmentFile=-/etc/sysconfig/ovn-kubernetes -ExecStart=/usr/bin/ovn-kubernetes-node.sh - -[Install] -WantedBy=multi-user.target - diff --git a/dist/files/ovn-kubernetes-node.sh b/dist/files/ovn-kubernetes-node.sh deleted file mode 100644 index ae59b4d099..0000000000 --- a/dist/files/ovn-kubernetes-node.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -source /etc/sysconfig/ovn-kubernetes - -function ovn-kubernetes-node() { - - echo "Enable and start ovn-kubernetes node services" - /usr/bin/ovnkube \ - --cluster-subnets "${cluster_cidr}" \ - --init-node `hostname` -} - -ovn-kubernetes-node diff --git a/dist/files/ovn-kubernetes.sysconfig b/dist/files/ovn-kubernetes.sysconfig deleted file mode 100644 index 5b5382a3f5..0000000000 --- a/dist/files/ovn-kubernetes.sysconfig +++ /dev/null @@ -1,15 +0,0 @@ -# ovn-kubernetes config - all nodes and masters - -# This file configures the ovn cni plugin. When completed or changed -# the file must be copied to each ovn-master and each ovn-node. The -# ovn-kubernetes-master and ovn-kubernetes-node daemons must be restarted. - -# This file and /etc/openvswitch/ovn_k8s.conf, in combination, provide -# all of the configuration options. See man 5 ovn_k8s.conf - -# When changes are made, copy the file to each ovn-node and ovn-master -# and restart ovn-kubernetes-master.service and ovn-kubernetes-node.service - -# cluster_cidr - this contains the clusterNetworkCIDR from -# /etc/origin/master/master-config.yaml -# cluster_cidr=10.128.0.0/14 diff --git a/dist/images/Makefile b/dist/images/Makefile index 99d275102e..61f9b8d9eb 100644 --- a/dist/images/Makefile +++ b/dist/images/Makefile @@ -58,7 +58,7 @@ fedora-dev: bld --ovn-loglevel-nbctld="-vconsole:info" # This target expands the daemonset yaml templates into final form -# It gets the image name from ../ansible/hosts +# Use CLI flags or environment variables to customize its behavior. daemonsetyaml: ./daemonset.sh diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index c2e0a1d996..5f2bad48f3 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -4,7 +4,6 @@ #Always exit on errors set -e -# This is for people that are not using the ansible install. # The script renders j2 templates into yaml files in ../yaml/ # ensure j2 renderer installed @@ -22,7 +21,7 @@ OVN_GATEWAY_OPTS="" OVN_DB_VIP_IMAGE="" OVN_DB_VIP="" OVN_DB_REPLICAS="" -OVN_MTU="1400" +OVN_MTU="" KIND="" MASTER_LOGLEVEL="" NODE_LOGLEVEL="" @@ -102,32 +101,14 @@ while [ "$1" != "" ]; do shift done -# The options provided on the CLI overrides the values in ../ansible/hosts - # Create the daemonsets with the desired image -# The image name is from ../ansible/hosts -# The daemonset.yaml files are templates in ../ansible/templates # They are expanded into daemonsets in ../yaml -if [[ ${OVN_IMAGE} == "" ]]; then - image=$(awk -F = '/^ovn_image=/{ print $2 }' ../ansible/hosts | sed 's/\"//g') - if [[ ${image} == "" ]]; then - image="docker.io/ovnkube/ovn-daemonset:latest" - fi -else - image=$OVN_IMAGE -fi +image=${OVN_IMAGE:-"docker.io/ovnkube/ovn-daemonset:latest"} echo "image: ${image}" -if [[ ${OVN_IMAGE_PULL_POLICY} == "" ]]; then - policy=$(awk -F = '/^ovn_image_pull_policy/{ print $2 }' ../ansible/hosts) - if [[ ${policy} == "" ]]; then - policy="IfNotPresent" - fi -else - policy=$OVN_IMAGE_PULL_POLICY -fi -echo "imagePullPolicy: ${policy}" +image_pull_policy=${OVN_IMAGE_PULL_POLICY:-"IfNotPresent"} +echo "imagePullPolicy: ${image_pull_policy}" ovn_gateway_mode=${OVN_GATEWAY_MODE} echo "ovn_gateway_mode: ${ovn_gateway_mode}" @@ -163,7 +144,7 @@ ovn_hybrid_overlay_net_cidr=${OVN_HYBRID_OVERLAY_NET_CIDR} echo "ovn_hybrid_overlay_net_cidr: ${ovn_hybrid_overlay_net_cidr}" ovn_image=${image} \ - ovn_image_pull_policy=${policy} \ + ovn_image_pull_policy=${image_pull_policy} \ kind=${KIND} \ ovn_gateway_mode=${ovn_gateway_mode} \ ovn_gateway_opts=${ovn_gateway_opts} \ @@ -174,7 +155,7 @@ ovn_image=${image} \ j2 ../templates/ovnkube-node.yaml.j2 -o ../yaml/ovnkube-node.yaml ovn_image=${image} \ - ovn_image_pull_policy=${policy} \ + ovn_image_pull_policy=${image_pull_policy} \ ovnkube_master_loglevel=${master_loglevel} \ ovn_loglevel_northd=${ovn_loglevel_northd} \ ovn_loglevel_nbctld=${ovn_loglevel_nbctld} \ @@ -183,62 +164,39 @@ ovn_image=${image} \ j2 ../templates/ovnkube-master.yaml.j2 -o ../yaml/ovnkube-master.yaml ovn_image=${image} \ - ovn_image_pull_policy=${policy} \ + ovn_image_pull_policy=${image_pull_policy} \ ovn_loglevel_nb=${ovn_loglevel_nb} \ ovn_loglevel_sb=${ovn_loglevel_sb} \ j2 ../templates/ovnkube-db.yaml.j2 -o ../yaml/ovnkube-db.yaml ovn_db_vip_image=${ovn_db_vip_image} \ - ovn_image_pull_policy=${policy} \ + ovn_image_pull_policy=${image_pull_policy} \ ovn_db_replicas=${ovn_db_replicas} \ ovn_db_vip=${ovn_db_vip} ovn_loglevel_nb=${ovn_loglevel_nb} \ j2 ../templates/ovnkube-db-vip.yaml.j2 -o ../yaml/ovnkube-db-vip.yaml ovn_image=${image} \ - ovn_image_pull_policy=${policy} \ + ovn_image_pull_policy=${image_pull_policy} \ ovn_db_replicas=${ovn_db_replicas} \ ovn_db_minAvailable=${ovn_db_minAvailable} \ ovn_loglevel_nb=${ovn_loglevel_nb} ovn_loglevel_sb=${ovn_loglevel_sb} \ j2 ../templates/ovnkube-db-raft.yaml.j2 -o ../yaml/ovnkube-db-raft.yaml # ovn-setup.yaml -# net_cidr=10.128.0.0/14/23 -# svc_cidr=172.30.0.0/16 - -if [[ ${OVN_NET_CIDR} == "" ]]; then - net_cidr=$(awk -F = '/^net_cidr=/{ print $2 }' ../ansible/hosts) - if [[ ${net_cidr} == "" ]]; then - net_cidr="10.128.0.0/14/23" - fi -else - net_cidr=$OVN_NET_CIDR -fi - -if [[ ${OVN_SVC_CIDR} == "" ]]; then - svc_cidr=$(awk -F = '/^svc_cidr=/{ print $2 }' ../ansible/hosts) - if [[ ${svc_cidr} == "" ]]; then - svc_cidr="172.30.0.0/16" - fi -else - svc_cidr=$OVN_SVC_CIDR -fi - -k8s_apiserver=${OVN_K8S_APISERVER:-10.0.2.16:6443} - -net_cidr_repl="{{ net_cidr | default('10.128.0.0/14/23') }}" -svc_cidr_repl="{{ svc_cidr | default('172.30.0.0/16') }}" -k8s_apiserver_repl="{{ k8s_apiserver.stdout }}" -mtu_repl="{{ mtu_value }}" +net_cidr=${OVN_NET_CIDR:-"10.128.0.0/14/23"} +svc_cidr=${OVN_SVC_CIDR:-"172.30.0.0/16"} +k8s_apiserver=${OVN_K8S_APISERVER:-"10.0.2.16:6443"} +mtu=${OVN_MTU:-1400} echo "net_cidr: ${net_cidr}" echo "svc_cidr: ${svc_cidr}" echo "k8s_apiserver: ${k8s_apiserver}" -echo "mtu: ${OVN_MTU}" +echo "mtu: ${mtu}" -sed "s,${net_cidr_repl},${net_cidr}, -s,${svc_cidr_repl},${svc_cidr}, -s,${mtu_repl},${OVN_MTU}, -s,${k8s_apiserver_repl},${k8s_apiserver}," ../templates/ovn-setup.yaml.j2 >../yaml/ovn-setup.yaml +sed "s,{{ net_cidr }},${net_cidr}, +s,{{ svc_cidr }},${svc_cidr}, +s,{{ mtu_value }},${mtu}, +s,{{ k8s_apiserver }},${k8s_apiserver}," ../templates/ovn-setup.yaml.j2 >../yaml/ovn-setup.yaml cp ../templates/ovnkube-monitor.yaml.j2 ../yaml/ovnkube-monitor.yaml diff --git a/dist/openvswitch-ovn-kubernetes.spec b/dist/openvswitch-ovn-kubernetes.spec deleted file mode 100644 index dc33440188..0000000000 --- a/dist/openvswitch-ovn-kubernetes.spec +++ /dev/null @@ -1,145 +0,0 @@ -%global project ovn-kubernetes -%global repo %{project} -%global debug_package %{nil} - -# some distros (e.g: RHEL-7) don't define _rundir macro yet -# Fedora 15 onwards uses /run as _rundir -%if 0%{!?_rundir:1} -%define _rundir /run -%endif - -# define the python package prefix based on distribution version so that we can -# simultaneously support RHEL-based and later Fedora versions in this spec file. -%if 0%{?fedora} >= 25 -%define _py2 python2 -%endif - -%if 0%{?rhel} || 0%{?fedora} < 25 -%define _py2 python -%endif - -Name: openvswitch-ovn-kubernetes -Version: 0.3.0 -Release: 1%{?dist} -URL: https://www.github.com/ovn-org/ovn-kubernetes -Summary: Open Virtual Networking Kubernetes Wedge - -License: ASL 2.0 -Source0: https://github.com/ovn-org/ovn-kubernetes/archive/v%{version}.tar.gz - -# golang not supported -ExcludeArch: ppc64 - -BuildRequires: %{_py2}-devel -%if 0%{?fedora} > 22 || %{with build_python3} -BuildRequires: python3-devel -%endif -BuildRequires: golang - -%description -This allows kubernetes to use Open Virtual Networking (OVN) - -%package master -Summary: ovn-kubernetes systemd for master -License: ASL 2.0 -#Requires: openvswitch-ovn-kubernetes systemd openvswitch - -%description master -This allows systemd to control ovn on the master - -%package node -Summary: ovn-kubernetes systemd for node -License: ASL 2.0 -#Requires: openvswitch-ovn-kubernetes systemd openvswitch - -%description node -This allows systemd to control ovn on the node - -%prep -%setup -q -n %{repo}-%{version} - -%build -cd go-controller && make -strip _output/go/bin/ovnkube -strip _output/go/bin/ovn-kube-util -strip _output/go/bin/ovn-k8s-overlay -strip _output/go/bin/ovn-k8s-cni-overlay - -%install -install -d -m 0750 %{buildroot}%{_bindir} -install -d -m 0750 %{buildroot}%{_libexecdir}/cni -install -p -m 755 go-controller/_output/go/bin/ovnkube %{buildroot}%{_bindir} -install -p -m 755 go-controller/_output/go/bin/ovn-kube-util %{buildroot}%{_bindir} -install -p -m 755 go-controller/_output/go/bin/ovn-k8s-overlay %{buildroot}%{_bindir} -install -p -m 755 go-controller/_output/go/bin/ovn-k8s-cni-overlay %{buildroot}%{_libexecdir}/cni -install -d -m 0750 %{buildroot}/etc/openvswitch -install -p -m 644 go-controller/etc/ovn_k8s.conf %{buildroot}/etc/openvswitch -install -d -m 0750 %{buildroot}%{_mandir}/man1 -install -p -m 644 docs/ovnkube.1 %{buildroot}%{_mandir}/man1 -install -p -m 644 docs/ovn-kube-util.1 %{buildroot}%{_mandir}/man1 -install -p -m 644 docs/ovn-k8s-overlay.1 %{buildroot}%{_mandir}/man1 -install -d -m 0750 %{buildroot}%{_mandir}/man5 -install -p -m 644 docs/ovn_k8s.conf.5 %{buildroot}%{_mandir}/man5 - -install -p -D -m 0644 dist/files/ovn-kubernetes-master.service \ - %{buildroot}%{_unitdir}/ovn-kubernetes-master.service -install -p -D -m 0644 dist/files/ovn-kubernetes-node.service \ - %{buildroot}%{_unitdir}/ovn-kubernetes-node.service -install -p -m 755 dist/files/ovn-kubernetes-master.sh %{buildroot}%{_bindir} -install -p -m 755 dist/files/ovn-kubernetes-node.sh %{buildroot}%{_bindir} -install -p -D -m 0644 dist/files/ovn-kubernetes.sysconfig \ - %{buildroot}%{_sysconfdir}/sysconfig/ovn-kubernetes - -%preun node - %systemd_preun openvswitch-ovn-kubernetes-node - -%preun master - %systemd_preun openvswitch-ovn-kubernetes-master - -%post node - %systemd_post openvswitch-ovn-kubernetes-node - -%post master - %systemd_post openvswitch-ovn-kubernetes-master - - -%files -%defattr(-,root,root) -%license COPYING -%doc CONTRIBUTING.md README.md -%doc docs/config.md docs/debugging.md docs/INSTALL.SSL.md docs/INSTALL.UBUNTU.md -%{_mandir}/man1/ovnkube.1.* -%{_mandir}/man1/ovn-kube-util.1.* -%{_mandir}/man1/ovn-k8s-overlay.1.* -%{_mandir}/man5/ovn_k8s.conf.5.* -%{_bindir}/ovnkube -%{_bindir}/ovn-kube-util -%{_bindir}/ovn-k8s-overlay -%{_libexecdir}/cni/ovn-k8s-cni-overlay -%config(noreplace) %{_sysconfdir}/openvswitch/ovn_k8s.conf - -%files node -%{_unitdir}/ovn-kubernetes-node.service -%{_bindir}/ovn-kubernetes-node.sh -%config(noreplace) %{_sysconfdir}/sysconfig/ovn-kubernetes - -%files master -%{_unitdir}/ovn-kubernetes-master.service -%{_bindir}/ovn-kubernetes-master.sh - - -%changelog -* Wed May 9 2018 Phil Cameron - 0.3.0-1 -- Added support for containers - -* Fri Mar 23 2018 Phil Cameron - 0.2.0-1 -- Added packages for systemd packages openvswitch-ovn-kubernetes-node - and openvswitch-ovn-kubernetes-master. - -* Thu Jan 25 2018 Phil Cameron - 0.1.0-2 -- Changed from referencing a commit to referencing a release - in the source repo. - -* Fri Jan 12 2018 Phil Cameron - 0.1.0-1 -- Initial package for Fedora - diff --git a/dist/templates/ovn-setup.yaml.j2 b/dist/templates/ovn-setup.yaml.j2 index ba65efa8ee..fd02efd12d 100644 --- a/dist/templates/ovn-setup.yaml.j2 +++ b/dist/templates/ovn-setup.yaml.j2 @@ -130,7 +130,7 @@ metadata: name: ovn-config namespace: ovn-kubernetes data: - net_cidr: "{{ net_cidr | default('10.128.0.0/14/23') }}" - svc_cidr: "{{ svc_cidr | default('172.30.0.0/16') }}" - k8s_apiserver: "{{ k8s_apiserver.stdout }}" + net_cidr: "{{ net_cidr }}" + svc_cidr: "{{ svc_cidr }}" + k8s_apiserver: "{{ k8s_apiserver }}" mtu: "{{ mtu_value }}" diff --git a/test/integration/README.md b/test/integration/README.md deleted file mode 100644 index b7d6bb77d3..0000000000 --- a/test/integration/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Integration Tests - -This directory contains playbooks to set up for and run set of integration -tests for ovn-kubernetes using openshift-dind cluster on RHEL and Fedora hosts. One entrypoint exists: - - - `main.yml`: sets up the machine and runs tests - -When running `main.yml`, two tags are present: - - - `setup`: run all tasks to set up the system for testing - - `integration`: build ovn-kubernetes from source and run the dind based cni_vendor_tests - -The playbooks assume the following things about your system: - - - on RHEL, the server and extras repos are configured and certs are present - - `ansible` is installed and the host is boot-strapped to allow `ansible` to run against it - - the `$GOPATH` is set and present for all shells (*e.g.* written in `/etc/environment`) - - ovn-kubernetes repository is checked out to the correct state at `${GOPATH}/src/github.com/ovn-org/ovn-kubernetes` - - the user running the playbook has access to passwordless `sudo` diff --git a/test/integration/ansible.cfg b/test/integration/ansible.cfg deleted file mode 100644 index f1622a985f..0000000000 --- a/test/integration/ansible.cfg +++ /dev/null @@ -1,359 +0,0 @@ -# config file for ansible -- http://ansible.com/ -# ============================================== - -# nearly all parameters can be overridden in ansible-playbook -# or with command line flags. ansible will read ANSIBLE_CONFIG, -# ansible.cfg in the current working directory, .ansible.cfg in -# the home directory or /etc/ansible/ansible.cfg, whichever it -# finds first - -[defaults] - -# some basic default values... - -#inventory = inventory -#library = /usr/share/my_modules/ -#remote_tmp = $HOME/.ansible/tmp -#local_tmp = .ansible/tmp -#forks = 5 -forks = 10 -#poll_interval = 15 -#sudo_user = root -#ask_sudo_pass = True -ask_sudo_pass = False -#ask_pass = True -ask_pass = False -#transport = smart -#remote_port = 22 -#module_lang = C -#module_set_locale = True - -# plays will gather facts by default, which contain information about -# the remote system. -# -# smart - gather by default, but don't regather if already gathered -# implicit - gather by default, turn off with gather_facts: False -# explicit - do not gather by default, must say gather_facts: True -#gathering = implicit -gathering = smart - -# by default retrieve all facts subsets -# all - gather all subsets -# network - gather min and network facts -# hardware - gather hardware facts (longest facts to retrieve) -# virtual - gather min and virtual facts -# facter - import facts from facter -# ohai - import facts from ohai -# You can combine them using comma (ex: network,virtual) -# You can negate them using ! (ex: !hardware,!facter,!ohai) -# A minimal set of facts is always gathered. -gather_subset = network - -# additional paths to search for roles in, colon separated -# N/B: This depends on how ansible is called -#roles_path = $WORKSPACE/kommandir_workspace/roles - -# uncomment this to disable SSH key host checking -#host_key_checking = False -host_key_checking = False - -# change the default callback -#stdout_callback = skippy -# enable additional callbacks -#callback_whitelist = timer, mail - -# Determine whether includes in tasks and handlers are "static" by -# default. As of 2.0, includes are dynamic by default. Setting these -# values to True will make includes behave more like they did in the -# 1.x versions. -task_includes_static = True -handler_includes_static = True - -# change this for alternative sudo implementations -#sudo_exe = sudo - -# What flags to pass to sudo -# WARNING: leaving out the defaults might create unexpected behaviours -#sudo_flags = -H -S -n - -# SSH timeout -#timeout = 10 - -# default user to use for playbooks if user is not specified -# (/usr/bin/ansible will use current user as default) -#remote_user = root -remote_user = openshift - -# logging is off by default unless this path is defined -# if so defined, consider logrotate -log_path = $ARTIFACTS/main.log - -# default module name for /usr/bin/ansible -#module_name = command - -# use this shell for commands executed under sudo -# you may need to change this to bin/bash in rare instances -# if sudo is constrained -# executable = /bin/sh - -# if inventory variables overlap, does the higher precedence one win -# or are hash values merged together? The default is 'replace' but -# this can also be set to 'merge'. -hash_behaviour = replace - -# by default, variables from roles will be visible in the global variable -# scope. To prevent this, the following option can be enabled, and only -# tasks and handlers within the role will see the variables there -private_role_vars = False - -# list any Jinja2 extensions to enable here: -#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n - -# if set, always use this private key file for authentication, same as -# if passing --private-key to ansible or ansible-playbook -#private_key_file = /path/to/file - -# If set, configures the path to the Vault password file as an alternative to -# specifying --vault-password-file on the command line. -#vault_password_file = /path/to/vault_password_file - -# format of string {{ ansible_managed }} available within Jinja2 -# templates indicates to users editing templates files will be replaced. -# replacing {file}, {host} and {uid} and strftime codes with proper values. -#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} -# This short version is better used in templates as it won't flag the file as changed every run. -#ansible_managed = Ansible managed: {file} on {host} - -# by default, ansible-playbook will display "Skipping [host]" if it determines a task -# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" -# messages. NOTE: the task header will still be shown regardless of whether or not the -# task is skipped. -#display_skipped_hosts = True -display_skipped_hosts = False - -# by default, if a task in a playbook does not include a name: field then -# ansible-playbook will construct a header that includes the task's action but -# not the task's args. This is a security feature because ansible cannot know -# if the *module* considers an argument to be no_log at the time that the -# header is printed. If your environment doesn't have a problem securing -# stdout from ansible-playbook (or you have manually specified no_log in your -# playbook on all of the tasks where you have secret information) then you can -# safely set this to True to get more informative messages. -display_args_to_stdout = False - -# by default (as of 1.3), Ansible will raise errors when attempting to dereference -# Jinja2 variables that are not set in templates or action lines. Uncomment this line -# to revert the behavior to pre-1.3. -#error_on_undefined_vars = False - -# by default (as of 1.6), Ansible may display warnings based on the configuration of the -# system running ansible itself. This may include warnings about 3rd party packages or -# other conditions that should be resolved if possible. -# to disable these warnings, set the following value to False: -system_warnings = False - -# by default (as of 1.4), Ansible may display deprecation warnings for language -# features that should no longer be used and will be removed in future versions. -# to disable these warnings, set the following value to False: -deprecation_warnings = False - -# (as of 1.8), Ansible can optionally warn when usage of the shell and -# command module appear to be simplified by using a default Ansible module -# instead. These warnings can be silenced by adjusting the following -# setting or adding warn=yes or warn=no to the end of the command line -# parameter string. This will for example suggest using the git module -# instead of shelling out to the git command. -command_warnings = False - - -# set plugin path directories here, separate with colons -#action_plugins = /usr/share/ansible/plugins/action -#callback_plugins = /usr/share/ansible/plugins/callback -#connection_plugins = /usr/share/ansible/plugins/connection -#lookup_plugins = /usr/share/ansible/plugins/lookup -#vars_plugins = /usr/share/ansible/plugins/vars -#filter_plugins = /usr/share/ansible/plugins/filter -#test_plugins = /usr/share/ansible/plugins/test -#strategy_plugins = /usr/share/ansible/plugins/strategy - -# Most callbacks shipped with Ansible are disabled by default -# and need to be whitelisted in your ansible.cfg file in order to function. -callback_whitelist = default - -# by default callbacks are not loaded for /bin/ansible, enable this if you -# want, for example, a notification or logging callback to also apply to -# /bin/ansible runs -#bin_ansible_callbacks = False - - -# don't like cows? that's unfortunate. -# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 -#nocows = 1 - -# set which cowsay stencil you'd like to use by default. When set to 'random', -# a random stencil will be selected for each task. The selection will be filtered -# against the `cow_whitelist` option below. -#cow_selection = default -#cow_selection = random - -# when using the 'random' option for cowsay, stencils will be restricted to this list. -# it should be formatted as a comma-separated list with no spaces between names. -# NOTE: line continuations here are for formatting purposes only, as the INI parser -# in python does not support them. -#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ -# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ -# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www - -# don't like colors either? -# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 -nocolor = 0 - -# if set to a persistent type (not 'memory', for example 'redis') fact values -# from previous runs in Ansible will be stored. This may be useful when -# wanting to use, for example, IP information from one group of servers -# without having to talk to them in the same playbook run to get their -# current IP information. -#fact_caching = memory - -# retry files -# When a playbook fails by default a .retry file will be created in ~/ -# You can disable this feature by setting retry_files_enabled to False -# and you can change the location of the files by setting retry_files_save_path - -#retry_files_enabled = False -retry_files_enabled = False - -# squash actions -# Ansible can optimise actions that call modules with list parameters -# when looping. Instead of calling the module once per with_ item, the -# module is called once with all items at once. Currently this only works -# under limited circumstances, and only with parameters named 'name'. -squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper - -# prevents logging of task data, off by default -#no_log = False - -# prevents logging of tasks, but only on the targets, data is still logged on the master/controller -no_target_syslog = True - -# controls whether Ansible will raise an error or warning if a task has no -# choice but to create world readable temporary files to execute a module on -# the remote machine. This option is False by default for security. Users may -# turn this on to have behaviour more like Ansible prior to 2.1.x. See -# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user -# for more secure ways to fix this than enabling this option. -#allow_world_readable_tmpfiles = False - -# controls the compression level of variables sent to -# worker processes. At the default of 0, no compression -# is used. This value must be an integer from 0 to 9. -#var_compression_level = 9 - -# controls what compression method is used for new-style ansible modules when -# they are sent to the remote system. The compression types depend on having -# support compiled into both the controller's python and the client's python. -# The names should match with the python Zipfile compression types: -# * ZIP_STORED (no compression. available everywhere) -# * ZIP_DEFLATED (uses zlib, the default) -# These values may be set per host via the ansible_module_compression inventory -# variable -#module_compression = 'ZIP_DEFLATED' - -# This controls the cutoff point (in bytes) on --diff for files -# set to 0 for unlimited (RAM may suffer!). -#max_diff_size = 1048576 - -[privilege_escalation] -become=True -#become_method=sudo -#become_user=root -become_user=root -#become_ask_pass=False - -[paramiko_connection] - -# uncomment this line to cause the paramiko connection plugin to not record new host -# keys encountered. Increases performance on new host additions. Setting works independently of the -# host key checking setting above. -#record_host_keys=False - -# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this -# line to disable this behaviour. -#pty=False - -[ssh_connection] - -# ssh arguments to use -# Leaving off ControlPersist will result in poor performance, so use -# paramiko on older platforms rather than removing it -ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o PreferredAuthentications=publickey -o ConnectTimeout=13 - -# The path to use for the ControlPath sockets. This defaults to -# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with -# very long hostnames or very long path names (caused by long user names or -# deeply nested home directories) this can exceed the character limit on -# file socket names (108 characters for most platforms). In that case, you -# may wish to shorten the string below. -# -# Example: -# control_path = %(directory)s/%%h-%%r -#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r - -# Enabling pipelining reduces the number of SSH operations required to -# execute a module on the remote server. This can result in a significant -# performance improvement when enabled, however when using "sudo:" you must -# first disable 'requiretty' in /etc/sudoers -# -# By default, this option is disabled to preserve compatibility with -# sudoers configurations that have requiretty (the default on many distros). -# -#pipelining = False -pipelining=True - -# if True, make ansible use scp if the connection type is ssh -# (default is sftp) -#scp_if_ssh = True - -# if False, sftp will not use batch mode to transfer files. This may cause some -# types of file transfer failures impossible to catch however, and should -# only be disabled if your sftp version has problems with batch mode -#sftp_batch_mode = False - -[accelerate] -#accelerate_port = 5099 -#accelerate_timeout = 30 -#accelerate_connect_timeout = 5.0 - -# The daemon timeout is measured in minutes. This time is measured -# from the last activity to the accelerate daemon. -#accelerate_daemon_timeout = 30 - -# If set to yes, accelerate_multi_key will allow multiple -# private keys to be uploaded to it, though each user must -# have access to the system via SSH to add a new key. The default -# is "no". -#accelerate_multi_key = yes - -[selinux] -# file systems that require special treatment when dealing with security context -# the default behaviour that copies the existing context or uses the user default -# needs to be changed to use the file system dependent context. -#special_context_filesystems=nfs,vboxsf,fuse,ramfs - -# Set this to yes to allow libvirt_lxc connections to work without SELinux. -#libvirt_lxc_noseclabel = yes - -[colors] -#highlight = white -#verbose = blue -#warn = bright purple -#error = red -#debug = dark gray -#deprecate = purple -#skip = cyan -#unreachable = red -#ok = green -#changed = yellow -#diff_add = green -#diff_remove = red -#diff_lines = cyan diff --git a/test/integration/build/openshift.yml b/test/integration/build/openshift.yml deleted file mode 100644 index cb7bac45fb..0000000000 --- a/test/integration/build/openshift.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -- name: clone openshift origin source repo - git: - repo: "https://github.com/openshift/origin.git" - dest: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin" - force: "{{ force_clone | default(False) | bool}}" - -- name: build openshift - make: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin" - -- name: build dind images - shell: "hack/build-dind-images.sh" - args: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin" diff --git a/test/integration/build/ovnkube.yml b/test/integration/build/ovnkube.yml deleted file mode 100644 index 5d80bb9c51..0000000000 --- a/test/integration/build/ovnkube.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- - -- name: stat the expected ovn-kubernetes directory - stat: - path: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes" - register: dir_stat - -- name: ensure cni bin directory is present - file: path=/opt/cni/bin state=directory - -- name: expect ovn-kubernetes to be cloned already - fail: - msg: "Expected ovn-kubernetes to be cloned at {{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes but it wasn't!" - when: not dir_stat.stat.exists - -- name: install ovn-kubernetes tools - make: - target: install.tools - chdir: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes/go-controller" - -- name: build ovn-kubernetes - make: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes/go-controller" - -- name: install ovn-kubernetes - make: - target: install - chdir: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes/go-controller" - -#- name: install ovn-kubernetes systemd files -# make: -# target: install.systemd -# chdir: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes/go-controller" -# when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' -# -#- name: install ovn-kubernetes config -# make: -# target: install.config -# chdir: "{{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes" diff --git a/test/integration/golang.yml b/test/integration/golang.yml deleted file mode 100644 index e23305a4c7..0000000000 --- a/test/integration/golang.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- - -- name: ensure Golang dir is empty first - file: - path: /usr/local/go - state: absent - -- name: fetch Golang - unarchive: - remote_src: yes - src: "https://storage.googleapis.com/golang/go{{ version }}.linux-amd64.tar.gz" - dest: /usr/local - -- name: link go toolchain - file: - src: "/usr/local/go/bin/{{ item }}" - dest: "/usr/bin/{{ item }}" - state: link - with_items: - - go - - gofmt - - godoc - -- name: ensure user profile exists - file: - path: "{{ ansible_user_dir }}/.profile" - state: touch - -- name: set up PATH for Go toolchain and built binaries - lineinfile: - dest: "{{ ansible_user_dir }}/.profile" - line: 'PATH={{ ansible_env.PATH }}:{{ ansible_env.GOPATH }}/bin:/usr/local/go/bin' - regexp: '^PATH=' - state: present - -- name: set up directories - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ ansible_env.GOPATH }}/src/github.com/ovn-org" - - "{{ ansible_env.GOPATH }}/src/github.com/k8s.io" - - "{{ ansible_env.GOPATH }}/src/github.com/openshift" - -- name: install Go tools and dependencies - shell: /usr/bin/go get -u "github.com/{{ item }}" - with_items: - - tools/godep - - onsi/ginkgo/ginkgo diff --git a/test/integration/hosts b/test/integration/hosts deleted file mode 100644 index 9fad1a8892..0000000000 --- a/test/integration/hosts +++ /dev/null @@ -1,2 +0,0 @@ -[hosts] -localhost diff --git a/test/integration/main.yml b/test/integration/main.yml deleted file mode 100644 index f72beb5f0e..0000000000 --- a/test/integration/main.yml +++ /dev/null @@ -1,28 +0,0 @@ -- hosts: all - become_user: root - vars_files: - - "{{ playbook_dir }}/vars.yml" - tags: - - setup - tasks: - - name: set up the system - include: system.yml - - - name: install Golang tools - include: golang.yml - vars: - version: "1.11.4" - - - name: clone build and install openshift - include: "build/openshift.yml" - -- hosts: all - vars_files: - - "{{ playbook_dir }}/vars.yml" - tags: - - integration - tasks: - - name: clone build and install ovn-kubernetes - include: "build/ovnkube.yml" - - name: run openshift-dind tests - include: "openshift-dind-test.yml" diff --git a/test/integration/openshift-dind-test.yml b/test/integration/openshift-dind-test.yml deleted file mode 100644 index 179466f3ff..0000000000 --- a/test/integration/openshift-dind-test.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- - -- name: disable selinux - selinux: - state: disabled - -- name: setup dind cluster with ovn - shell: "OVN_ROOT={{ ansible_env.GOPATH }}/src/github.com/ovn-org/ovn-kubernetes hack/dind-cluster.sh start -n ovn" - args: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin" - -- name: ensure artifacts directory is present - file: path={{ artifacts }} state=directory - -- name: run integration tests - shell: "OPENSHIFT_TEST_KUBECONFIG={{ openshift_dind_kubeconfig }} ./cni_vendor_test.sh &> {{ artifacts }}/cni_vendor_test.log" - args: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin/test/extended" - -- name: cleanup after integration tests - shell: "./hack/dind-cluster.sh stop" - args: - chdir: "{{ ansible_env.GOPATH }}/src/github.com/openshift/origin" diff --git a/test/integration/system.yml b/test/integration/system.yml deleted file mode 100644 index dc9d6a40e2..0000000000 --- a/test/integration/system.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- - -- name: Make sure we have all required packages - become_user: root - package: - name: "{{ item }}" - state: present - with_items: - - curl - - docker - - expect - - findutils - - gcc - - git - - glib2-devel - - glibc-devel - - glibc-static - - hostname - - iproute - - iptables - - libxml2-devel - - make - - nfs-utils - - nmap-ncat - - openssl - - openssl-devel - - pkgconfig - - python - - python2-crypto - - python-devel - - python-virtualenv - - PyYAML - - rpcbind - - rsync - - sed - - socat - - tar - - wget - -- name: Update all packages - package: - name: '*' - state: latest - -- name: Setup swap to prevent kernel firing off the OOM killer - shell: | - truncate -s 8G /root/swap && \ - export SWAPDEV=$(losetup --show -f /root/swap | head -1) && \ - mkswap $SWAPDEV && \ - swapon $SWAPDEV && \ - swapon --show - -- name: ensure directories exist as needed - file: - path: "{{ item }}" - state: directory - with_items: - - /opt/cni/bin - - /etc/cni/net.d - -- name: set sysctl vm.overcommit_memory=1 for CentOS - sysctl: - name: vm.overcommit_memory - state: present - value: 1 - when: ansible_distribution == 'CentOS' - -- name: inject hostname into /etc/hosts - lineinfile: - dest: /etc/hosts - line: '{{ ansible_default_ipv4.address }} {{ ansible_nodename }}' - insertafter: 'EOF' - regexp: '{{ ansible_default_ipv4.address }}\s+{{ ansible_nodename }}' - state: present - -- name: start docker daemon - systemd: - state: started - name: docker diff --git a/test/integration/vars.yml b/test/integration/vars.yml deleted file mode 100644 index 6c7fc72e4c..0000000000 --- a/test/integration/vars.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -# For results.yml Paths use rsync 'source' conventions -artifacts: "/tmp/artifacts" # Base-directory for collection -openshift_dind_kubeconfig: "/tmp/openshift-dind-cluster/openshift/openshift.local.config/master/admin.kubeconfig" -result_dest_basedir: '{{ lookup("env","WORKSPACE") | - default(playbook_dir, True) }}/artifacts' diff --git a/vagrant/README.md b/vagrant/README.md deleted file mode 100644 index 97d50777f9..0000000000 --- a/vagrant/README.md +++ /dev/null @@ -1,80 +0,0 @@ -Kubernetes and OVN -================== - -This contains a Vagrant setup for Kubernetes and OVN integration. This needs -a minimum vagrant version of 1.8.5 and is known to atleast work on Mac, -Ubuntu 16.04 and Windows 10. - -This has been tested with Virtualbox only. Some work has been done to get it -working with vagrant-libvirt, but this may not fully work yet. - -Howto ------ - -From the cloned ovn-kubernetes repo, -* cd vagrant - -* vagrant up k8s-master -* vagrant up k8s-minion1 -* vagrant up k8s-minion2 - -Please note that, by default, the pods created cannot reach the internet. -This is because, the network to which it is attached is a vagrant private -network. You can reach the pods running inside the VMs from your host via -the nodeport though. - -If you need external network connectivity for your pods, you should use -the vagrant's "public network" option. This will give dhcp provided IP -address for your gateways. You can invoke this by providing your host's -network interface while running the vagrant. For e.g., if your host network -interface on your MAC is "en4: Thunderbolt Ethernet", you can run - -* OVN_EXTERNAL="en4: Thunderbolt Ethernet" vagrant up k8s-minion1 - -Run some containers -------------------- - -The Vagrant will create some sample yaml files for configuring a pod -running Apache, as well as yaml for creating an east-west service and -a north-south service. To try these out, follow these instructions. - -* kubectl create -f ~/apache-pod.yaml -* kubectl create -f ~/nginx-pod.yaml -* kubectl create -f ~/apache-e-w.yaml -* kubectl create -f ~/apache-n-s.yaml - -You can verify the services are up and running now: - -* kubectl get pods -* kubectl get svc - -You can now get to the service from the host running Virtualbox by using -the Nodeport and the IP 10.10.0.12 (the public-ip for the k8s-minion1 found in -the vagrant/provisioning/vm_config.conf.yml file). - -* curl 10.10.0.12:[nodeport] - -Since the vagrant initializes gateway node on the other minion too, you should -be able to access the same service via 10.10.0.13 too. - -* curl 10.10.0.13:[nodeport] - -Note: The above IP addresss are NOT used when you use the vagrant's public -network option. In that case, the above IP addresses are provided by dhcp -by your underlying network. So it is dynamic. You can fetch these IP -addresses by running 'ifconfig brenp0s8' on each of your host. You can then -run the curl commands on those IP addresses. - -You should see OVN doing load-balancing between the pods, which means you will -both the apache example page and the nginx example page. - -Launch a busybox pod: - -* kubectl run -i --tty busybox --image=busybox -- sh - -Verify this pod: - -* kubectl get pods -* kubectl describe pod - -You can now login to the busybox pod on the minion host and ping across pods. diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile deleted file mode 100644 index 23c9498a63..0000000000 --- a/vagrant/Vagrantfile +++ /dev/null @@ -1,123 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -require 'yaml' -require 'ipaddr' - -vagrant_config = YAML.load_file("provisioning/vm_config.conf.yml") - -ovn_external = ENV[ 'OVN_EXTERNAL' ] - -Vagrant.configure(2) do |config| - config.vm.box = "ubuntu/xenial64" - config.vm.provider "libvirt" do |lv, override| - override.vm.box = "ceph/ubuntu-xenial" - lv.driver = "kvm" - end - - # Use the ipaddr library to calculate the netmask of a given network - net = IPAddr.new vagrant_config['public_network'] - netmask = net.inspect().split("/")[1].split(">")[0] - - # Bring up the Devstack ovsdb/ovn-northd node on Virtualbox - config.vm.define "k8s-master" do |k8smaster| - k8smaster.vm.host_name = vagrant_config['k8smaster']['host_name'] - if !ovn_external.nil? - k8smaster.vm.network "public_network", bridge: ovn_external - else - k8smaster.vm.network "private_network", ip: vagrant_config['k8smaster']['public-ip'], netmask: netmask - end - k8smaster.vm.provision "shell", path: "provisioning/setup-hostnames.sh", privileged: true, - :args => "#{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8smaster']['short_name']} #{vagrant_config['k8sminion1']['public-ip']} #{vagrant_config['k8sminion1']['short_name']} #{vagrant_config['k8sminion2']['public-ip']} #{vagrant_config['k8sminion2']['short_name']}" - k8smaster.vm.provision "shell", path: "provisioning/setup-master.sh", privileged: false, - :args => "#{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8sminion1']['public-ip']} #{vagrant_config['k8sminion2']['public-ip']} #{vagrant_config['k8smaster']['short_name']} #{netmask} #{vagrant_config['public_gateway']} #{ovn_external}" - k8smaster.vm.provider "virtualbox" do |vb| - vb.name = vagrant_config['k8smaster']['short_name'] - vb.memory = vagrant_config['k8smaster']['memory'] - vb.cpus = vagrant_config['k8smaster']['cpus'] - vb.customize [ - 'modifyvm', :id, - '--nicpromisc3', "allow-all" - ] - vb.customize [ - "guestproperty", "set", :id, - "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000 - ] - end - k8smaster.vm.provider "libvirt" do |domain| - domain.memory = vagrant_config['k8smaster']['memory'] - domain.cpus = vagrant_config['k8smaster']['cpus'] - domain.nic_model_type = "virtio" - end - end - - config.vm.define "k8s-minion1" do |k8sminion1| - k8sminion1.vm.host_name = vagrant_config['k8sminion1']['host_name'] - k8sminion1.vm.host_name = "k8sminion1" - if !ovn_external.nil? - k8sminion1.vm.network "public_network", bridge: ovn_external - else - k8sminion1.vm.network "private_network", ip: vagrant_config['k8sminion1']['public-ip'], netmask: netmask - end - k8sminion1.vm.provision "shell", path: "provisioning/setup-hostnames.sh", privileged: true, - :args => "#{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8smaster']['short_name']} #{vagrant_config['k8sminion1']['public-ip']} #{vagrant_config['k8sminion1']['short_name']} #{vagrant_config['k8sminion2']['public_ip']} #{vagrant_config['k8sminion2']['short_name']}" - k8sminion1.vm.provision "shell", path: "provisioning/setup-minion.sh", privileged: false, - :args => "#{vagrant_config['k8sminion1']['public-ip']} #{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8sminion2']['public-ip']} #{netmask} #{vagrant_config['k8sminion1']['short_name']} #{vagrant_config['public_gateway']} #{ovn_external}" - k8sminion1.vm.provider "virtualbox" do |vb| - vb.name = vagrant_config['k8sminion1']['short_name'] - vb.memory = vagrant_config['k8sminion1']['memory'] - vb.cpus = vagrant_config['k8sminion1']['cpus'] - vb.customize [ - 'modifyvm', :id, - '--nicpromisc3', "allow-all" - ] - vb.customize [ - "guestproperty", "set", :id, - "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000 - ] - end - k8sminion1.vm.provider "libvirt" do |domain| - domain.memory = vagrant_config['k8sminion1']['memory'] - domain.cpus = vagrant_config['k8sminion1']['cpus'] - domain.nic_model_type = "virtio" - end - end - - config.vm.define "k8s-minion2" do |k8sminion2| - k8sminion2.vm.host_name = vagrant_config['k8sminion2']['host_name'] - k8sminion2.vm.host_name = "k8sminion2" - if !ovn_external.nil? - k8sminion2.vm.network "public_network", bridge: ovn_external - else - k8sminion2.vm.network "private_network", ip: vagrant_config['k8sminion2']['public-ip'], netmask: netmask - end - k8sminion2.vm.provision "shell", path: "provisioning/setup-hostnames.sh", privileged: true, - :args => "#{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8smaster']['short_name']} #{vagrant_config['k8sminion1']['public-ip']} #{vagrant_config['k8sminion1']['short_name']} #{vagrant_config['k8sminion2']['public-ip']} #{vagrant_config['k8sminion2']['short_name']}" - k8sminion2.vm.provision "shell", path: "provisioning/setup-minion.sh", privileged: false, - :args => "#{vagrant_config['k8sminion2']['public-ip']} #{vagrant_config['k8smaster']['public-ip']} #{vagrant_config['k8sminion1']['public-ip']} #{netmask} #{vagrant_config['k8sminion2']['short_name']} #{vagrant_config['public_gateway']} #{ovn_external}" - k8sminion2.vm.provider "virtualbox" do |vb| - vb.name = vagrant_config['k8sminion2']['short_name'] - vb.memory = vagrant_config['k8sminion2']['memory'] - vb.cpus = vagrant_config['k8sminion2']['cpus'] - vb.customize [ - 'modifyvm', :id, - '--nicpromisc3', "allow-all" - ] - vb.customize [ - "guestproperty", "set", :id, - "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000 - ] - end - k8sminion2.vm.provider "libvirt" do |domain| - domain.memory = vagrant_config['k8sminion2']['memory'] - domain.cpus = vagrant_config['k8sminion2']['cpus'] - domain.nic_model_type = "virtio" - end - end - - config.vm.provider "virtualbox" do |v| - v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] - v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] - v.customize ["modifyvm", :id, "--nictype1", "virtio"] - end -end diff --git a/vagrant/ovnkube-rbac.yaml b/vagrant/ovnkube-rbac.yaml deleted file mode 100644 index 3d58bddfb3..0000000000 --- a/vagrant/ovnkube-rbac.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ovnkube - namespace: default ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: ovnkube -rules: - - apiGroups: - - "" - - networking.k8s.io - resources: - - pods - - services - - endpoints - - namespaces - - networkpolicies - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - nodes - - pods - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: ovnkube -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ovnkube -subjects: -- kind: ServiceAccount - name: ovnkube - namespace: default diff --git a/vagrant/provisioning/setup-hostnames.sh b/vagrant/provisioning/setup-hostnames.sh deleted file mode 100755 index 0f85a7890f..0000000000 --- a/vagrant/provisioning/setup-hostnames.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -# ARGS: -# $1: Master IP -# $2: Master hostname -# $3: Minion1 IP -# $4: Minion1 hostname -# $5: Minion2 IP -# $6: Minion2 hostname - -MASTER_IP=$1 -MASTER_HOSTNAME=$2 -MINION1_IP=$3 -MINION1_HOSTNAME=$4 -MINION2_IP=$5 -MINION2_HOSTNAME=$6 - -cat << HOSTEOF >> /etc/hosts -$MASTER_IP $MASTER_HOSTNAME -$MINION1_IP $MINION1_HOSTNAME -$MINION2_IP $MINION2_HOSTNAME -HOSTEOF - -# Restore xtrace -$XTRACE diff --git a/vagrant/provisioning/setup-master.sh b/vagrant/provisioning/setup-master.sh deleted file mode 100755 index 8278657d89..0000000000 --- a/vagrant/provisioning/setup-master.sh +++ /dev/null @@ -1,316 +0,0 @@ -#!/bin/bash - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -MASTER1=$1 -MASTER2=$2 -MASTER3=$3 -NODE_NAME=$4 -PUBLIC_SUBNET_MASK=$5 -GW_IP=$6 -OVN_EXTERNAL=$7 - -if [ -n "$OVN_EXTERNAL" ]; then - MASTER1=`ifconfig enp0s8 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'` - PUBLIC_SUBNET_MASK=`ifconfig enp0s8 | grep 'inet addr' | cut -d: -f4` - GW_IP=`grep 'option routers' /var/lib/dhcp/dhclient.enp0s8.leases | head -1 | sed -e 's/;//' | awk '{print $3}'` -fi - -OVERLAY_IP=$MASTER1 - -cat > setup_master_args.sh <> /etc/apt/sources.list.d/docker.list" -sudo apt-get update - -## First, install docker -sudo apt-get purge lxc-docker -sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual -sudo apt-get install -y docker-engine -sudo service docker start - -## Install kubernetes -sudo apt-get install -y kubelet kubeadm kubectl -sudo apt-mark hold kubelet kubeadm kubectl -sudo service kubelet restart - -sudo swapoff -a -sudo kubeadm config images pull -sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=$OVERLAY_IP \ - --service-cidr=172.16.1.0/24 2>&1 | tee kubeadm.log -grep -A1 "kubeadm join" kubeadm.log | sudo tee /vagrant/kubeadm.log - -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config - -# Wait till kube-apiserver is up -while true; do - kubectl get node $NODE_NAME - if [ $? -eq 0 ]; then - break - fi - echo "waiting for kube-apiserver to be up" - sleep 1 -done - -# Let master run pods too. -kubectl taint nodes --all node-role.kubernetes.io/master- - -## install packages that deliver ovs-pki and its dependencies -sudo apt-get build-dep dkms -sudo apt-get install python-six openssl python-pip -y -sudo apt-get install openvswitch-common libopenvswitch -y -sudo apt-get install openvswitch-datapath-dkms -y - -if [ "$DAEMONSET" != "true" ]; then - ## Install OVS and OVN components - sudo apt-get install openvswitch-switch - sudo apt-get install ovn-central ovn-common ovn-host -y -fi -if [ -n "$SSL" ]; then - PROTOCOL=ssl - echo "PROTOCOL=ssl" >> setup_master_args.sh - # Install SSL certificates - pushd /etc/openvswitch - sudo ovs-pki -d /vagrant/pki init --force - sudo ovs-pki req ovnsb - sudo ovs-pki -b -d /vagrant/pki sign ovnsb - - sudo ovs-pki req ovnnb - sudo ovs-pki -b -d /vagrant/pki sign ovnnb - - sudo ovs-pki req ovncontroller - sudo ovs-pki -b -d /vagrant/pki sign ovncontroller switch - popd -else - PROTOCOL=tcp - echo "PROTOCOL=tcp" >> setup_master_args.sh -fi - -if [ "$HA" = "true" ]; then - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_nb_ovsdb - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_sb_ovsdb - sudo rm /etc/openvswitch/ovn*.db - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_northd - - LOCAL_IP=$OVERLAY_IP - - sudo /usr/share/openvswitch/scripts/ovn-ctl \ - --db-nb-cluster-local-addr=$LOCAL_IP start_nb_ovsdb - - sudo /usr/share/openvswitch/scripts/ovn-ctl \ - --db-sb-cluster-local-addr=$LOCAL_IP start_sb_ovsdb - - ovn_nb="$PROTOCOL:$MASTER1:6641,$PROTOCOL:$MASTER2:6641,$PROTOCOL:$MASTER3:6641" - ovn_sb="$PROTOCOL:$MASTER1:6642,$PROTOCOL:$MASTER2:6642,$PROTOCOL:$MASTER3:6642" - - sudo ovn-northd -vconsole:emer -vsyslog:err -vfile:info \ - --ovnnb-db="$ovn_nb" --ovnsb-db="$ovn_sb" --no-chdir \ - --log-file=/var/log/openvswitch/ovn-northd.log \ - --pidfile=/var/run/openvswitch/ovn-northd.pid --detach --monitor -fi - - -# Clone ovn-kubernetes repo -mkdir -p $HOME/work/src/github.com/ovn-org -pushd $HOME/work/src/github.com/ovn-org -git clone https://github.com/ovn-org/ovn-kubernetes -popd - -if [ "$DAEMONSET" != "true" ]; then - # Install golang - wget -nv https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz - export PATH="/usr/local/go/bin:echo $PATH" - export GOPATH=$HOME/work - - pushd $HOME/work/src/github.com/ovn-org/ovn-kubernetes/go-controller - make 1>&2 2>/dev/null - sudo make install - popd - - if [ $PROTOCOL = "ssl" ]; then - sudo ovn-nbctl set-connection pssl:6641 -- set connection . inactivity_probe=0 - sudo ovn-sbctl set-connection pssl:6642 -- set connection . inactivity_probe=0 - sudo ovn-nbctl set-ssl /etc/openvswitch/ovnnb-privkey.pem \ - /etc/openvswitch/ovnnb-cert.pem /vagrant/pki/switchca/cacert.pem - sudo ovn-sbctl set-ssl /etc/openvswitch/ovnsb-privkey.pem \ - /etc/openvswitch/ovnsb-cert.pem /vagrant/pki/switchca/cacert.pem - SSL_ARGS="-nb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ - -nb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ - -nb-client-cacert /vagrant/pki/switchca/cacert.pem \ - -sb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ - -sb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ - -sb-client-cacert /vagrant/pki/switchca/cacert.pem" - elif [ $PROTOCOL = "tcp" ]; then - sudo ovn-nbctl set-connection ptcp:6641 -- set connection . inactivity_probe=0 - sudo ovn-sbctl set-connection ptcp:6642 -- set connection . inactivity_probe=0 - fi - - if [ "$HA" = "true" ]; then - ovn_nb="$PROTOCOL://$MASTER1:6641,$PROTOCOL://$MASTER2:6641,$PROTOCOL://$MASTER3:6641" - ovn_sb="$PROTOCOL://$MASTER1:6642,$PROTOCOL://$MASTER2:6642,$PROTOCOL://$MASTER3:6642" - else - ovn_nb="$PROTOCOL://$OVERLAY_IP:6641" - ovn_sb="$PROTOCOL://$OVERLAY_IP:6642" - fi - - sudo kubectl create -f /vagrant/ovnkube-rbac.yaml - - SECRET=`kubectl get secret | grep ovnkube | awk '{print $1}'` - TOKEN=`kubectl get secret/$SECRET -o yaml |grep "token:" | cut -f2 -d ":" | sed 's/^ *//' | base64 -d` - echo $TOKEN > /vagrant/token - - nohup sudo ovnkube -loglevel=4 \ - -k8s-apiserver="https://$OVERLAY_IP:6443" \ - -k8s-cacert=/etc/kubernetes/pki/ca.crt \ - -k8s-token="$TOKEN" \ - -logfile="/var/log/ovn-kubernetes/ovnkube.log" \ - -init-master="k8smaster" -cluster-subnets="192.168.0.0/16" \ - -init-node="k8smaster" \ - -nb-address="$ovn_nb" \ - -sb-address="$ovn_sb" \ - -init-gateways -gateway-local \ - ${SSL_ARGS} 2>&1 & -else - # Daemonset is enabled. - - # Dameonsets only work with TCP now. - PROTOCOL="tcp" - - # cleanup /etc/hosts as it incorrectly maps the hostname to `127.0.1.1` - # or `127.0.0.1` - sudo sed -i '/^127.0.1.1/d' /etc/hosts - sudo sed -i '/^127.0.0.1\tk8s/d' /etc/hosts - - # Generate various OVN K8s yamls from the template files - pushd $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/images - ./daemonset.sh --image=docker.io/ovnkube/ovn-daemonset-u:latest \ - --net-cidr=192.168.0.0/16 --svc-cidr=172.16.1.0/24 \ - --gateway-mode="local" \ - --k8s-apiserver=https://$OVERLAY_IP:6443 - popd - - # Create OVN namespace, service accounts, ovnkube-db headless service, configmap, and policies - kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovn-setup.yaml - - # Run ovnkube-db daemonset. - kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-db.yaml - - # Run ovnkube-master daemonset. - kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-master.yaml - - # Run ovnkube daemonsets for nodes - kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-node.yaml -fi - -# Setup some example yaml files -cat << APACHEPOD >> ~/apache-pod.yaml -apiVersion: v1 -kind: Pod -metadata: - name: apachetwin - labels: - name: webserver -spec: - containers: - - name: apachetwin - image: fedora/apache -APACHEPOD - -cat << NGINXPOD >> ~/nginx-pod.yaml -apiVersion: v1 -kind: Pod -metadata: - name: nginxtwin - labels: - name: webserver -spec: - containers: - - name: nginxtwin - image: nginx -NGINXPOD - -cat << APACHEEW >> ~/apache-e-w.yaml -apiVersion: v1 -kind: Service -metadata: - labels: - name: apacheservice - role: service - name: apacheservice -spec: - ports: - - port: 8800 - targetPort: 80 - protocol: TCP - name: tcp - selector: - name: webserver -APACHEEW - -cat << APACHENS >> ~/apache-n-s.yaml -apiVersion: v1 -kind: Service -metadata: - labels: - name: apacheexternal - role: service - name: apacheexternal -spec: - ports: - - port: 8800 - targetPort: 80 - protocol: TCP - name: tcp - selector: - name: webserver - type: NodePort -APACHENS - -sleep 10 - -# Restore xtrace -$XTRACE diff --git a/vagrant/provisioning/setup-minion.sh b/vagrant/provisioning/setup-minion.sh deleted file mode 100755 index 2c904bdf3b..0000000000 --- a/vagrant/provisioning/setup-minion.sh +++ /dev/null @@ -1,185 +0,0 @@ -#!/bin/bash - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -OVERLAY_IP=$1 -MASTER1=$2 -MASTER2=$3 -PUBLIC_SUBNET_MASK=$4 -MINION_NAME=$5 -GW_IP=$6 -OVN_EXTERNAL=$7 - -if [ -n "$OVN_EXTERNAL" ]; then - OVERLAY_IP=`ifconfig enp0s8 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'` - PUBLIC_SUBNET_MASK=`ifconfig enp0s8 | grep 'inet addr' | cut -d: -f4` - GW_IP=`grep 'option routers' /var/lib/dhcp/dhclient.enp0s8.leases | head -1 | sed -e 's/;//' | awk '{print $3}'` -fi - -cat > setup_minion_args.sh <> /etc/apt/sources.list.d/docker.list" -sudo apt-get update - -## First, install docker -sudo apt-get purge lxc-docker -sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual -sudo apt-get install -y docker-engine -sudo service docker start - -## install packages that deliver ovs-pki and its dependencies -sudo apt-get build-dep dkms -sudo apt-get install python-six openssl -y -sudo apt-get install openvswitch-common libopenvswitch -y -sudo apt-get install openvswitch-datapath-dkms -y - -if [ "$DAEMONSET" != "true" ]; then - ## Install OVS and OVN components - sudo apt-get install openvswitch-switch - sudo apt-get install ovn-common ovn-host -y -fi - -if [ -n "$SSL" ]; then - PROTOCOL=ssl - echo "PROTOCOL=ssl" >> setup_minion_args.sh - # Install certificates - pushd /etc/openvswitch - sudo ovs-pki req ovncontroller - sudo ovs-pki -b -d /vagrant/pki sign ovncontroller switch - popd -else - PROTOCOL=tcp - echo "PROTOCOL=tcp" >> setup_minion_args.sh -fi - -if [ $HA = "true" ]; then - sudo apt-get install ovn-central -y - - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_nb_ovsdb - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_sb_ovsdb - sudo rm /etc/openvswitch/ovn*.db - sudo /usr/share/openvswitch/scripts/ovn-ctl stop_northd - - LOCAL_IP=$OVERLAY_IP - MASTER_IP=$MASTER1 - - sudo /usr/share/openvswitch/scripts/ovn-ctl \ - --db-nb-cluster-local-addr=$LOCAL_IP \ - --db-nb-cluster-remote-addr=$MASTER_IP start_nb_ovsdb - - sudo /usr/share/openvswitch/scripts/ovn-ctl \ - --db-sb-cluster-local-addr=$LOCAL_IP \ - --db-sb-cluster-remote-addr=$MASTER_IP start_sb_ovsdb -fi - -## Install kubernetes -sudo swapoff -a -sudo apt-get install -y kubelet kubeadm -sudo service kubelet restart - -# Start kubelet join the cluster -cat /vagrant/kubeadm.log > kubeadm_join.sh -sudo sh kubeadm_join.sh - - -## Clone ovn-kubernetes repo -mkdir -p $HOME/work/src/github.com/ovn-org -pushd $HOME/work/src/github.com/ovn-org -git clone https://github.com/ovn-org/ovn-kubernetes -popd - -if [ "$DAEMONSET" != "true" ]; then - # Install golang - wget -nv https://dl.google.com/go/go1.11.4.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.11.4.linux-amd64.tar.gz - export PATH="/usr/local/go/bin:echo $PATH" - export GOPATH=$HOME/work - - # Install OVN+K8S Integration - pushd $HOME/work/src/github.com/ovn-org/ovn-kubernetes/go-controller - make 1>&2 2>/dev/null - sudo make install - popd - - # Initialize the minion and gateway. - if [ $PROTOCOL = "ssl" ]; then - SSL_ARGS="-nb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ - -nb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ - -nb-client-cacert /vagrant/pki/switchca/cacert.pem \ - -sb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ - -sb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ - -sb-client-cacert /vagrant/pki/switchca/cacert.pem" - fi - - if [ "$HA" = "true" ]; then - ovn_nb="$PROTOCOL://$MASTER1:6641,$PROTOCOL://$OVERLAY_IP:6641,$PROTOCOL://$MASTER2:6641" - ovn_sb="$PROTOCOL://$MASTER1:6642,$PROTOCOL://$OVERLAY_IP:6642,$PROTOCOL://$MASTER2:6642" - else - ovn_nb="$PROTOCOL://$MASTER1:6641" - ovn_sb="$PROTOCOL://$MASTER1:6642" - fi - - TOKEN=`sudo cat /vagrant/token` - - nohup sudo ovnkube -loglevel=4 -logfile="/var/log/ovn-kubernetes/ovnkube.log" \ - -k8s-apiserver="https://$MASTER1:6443" \ - -k8s-cacert=/etc/kubernetes/pki/ca.crt \ - -k8s-token="$TOKEN" \ - -init-node="$MINION_NAME" \ - -nodeport \ - -nb-address="$ovn_nb" \ - -sb-address="$ovn_sb" \ - ${SSL_ARGS} \ - -init-gateways -gateway-interface=enp0s8 -gateway-nexthop="$GW_IP" \ - -cluster-subnets="192.168.0.0/16" 2>&1 & -fi - -sleep 10 - -# Restore xtrace -$XTRACE diff --git a/vagrant/provisioning/vm_config.conf.yml b/vagrant/provisioning/vm_config.conf.yml deleted file mode 100644 index 26b503d039..0000000000 --- a/vagrant/provisioning/vm_config.conf.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -public_network: "10.10.0.0/24" -public_gateway: "10.10.0.1" -k8smaster: - short_name: "k8smaster" - host_name: "k8smaster.ovn" - public-ip: "10.10.0.11" - memory: 3192 - cpus: 2 -k8sminion1: - short_name: "k8sminion1" - host_name: "k8sminion1.ovn" - public-ip: "10.10.0.12" - memory: 3192 - cpus: 2 -k8sminion2: - short_name: "k8sminion2" - host_name: "k8sminion2.ovn" - public-ip: "10.10.0.13" - memory: 3192 - cpus: 2 From d4c3e06cfdaf0a0826752353110edf828138586b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2020 15:52:39 -0500 Subject: [PATCH 21/27] config: shift tests to normal DB address format (eg tcp:1.2.3.4:6641) And add explicit tests for the old format (tcp://1.2.3.4:6641) Signed-off-by: Dan Williams --- go-controller/pkg/config/config_test.go | 98 ++++++++++++++++++------- 1 file changed, 71 insertions(+), 27 deletions(-) diff --git a/go-controller/pkg/config/config_test.go b/go-controller/pkg/config/config_test.go index ec392429e9..b95e3d7dc8 100644 --- a/go-controller/pkg/config/config_test.go +++ b/go-controller/pkg/config/config_test.go @@ -153,13 +153,13 @@ conf-dir=/etc/cni/net.d22 plugin=ovn-k8s-cni-overlay22 [ovnnorth] -address=ssl://1.2.3.4:6641 +address=ssl:1.2.3.4:6641 client-privkey=/path/to/nb-client-private.key client-cert=/path/to/nb-client.crt client-cacert=/path/to/nb-client-ca.crt [ovnsouth] -address=ssl://1.2.3.4:6642 +address=ssl:1.2.3.4:6642 client-privkey=/path/to/sb-client-private.key client-cert=/path/to/sb-client.crt client-cacert=/path/to/sb-client-ca.crt @@ -575,12 +575,12 @@ var _ = Describe("Config Operations", func() { "-k8s-cacert=" + kubeCAFile, "-k8s-token=asdfasdfasdfasfd", "-k8s-service-cidrs=172.15.0.0/24", - "-nb-address=ssl://6.5.4.3:6651", + "-nb-address=ssl:6.5.4.3:6651", "-no-hostsubnet-nodes=test=pass", "-nb-client-privkey=/client/privkey", "-nb-client-cert=/client/cert", "-nb-client-cacert=/client/cacert", - "-sb-address=ssl://6.5.4.1:6652", + "-sb-address=ssl:6.5.4.1:6652", "-sb-client-privkey=/client/privkey2", "-sb-client-cert=/client/cert2", "-sb-client-cacert=/client/cacert2", @@ -830,11 +830,11 @@ mode=shared "-k8s-cacert=" + kubeCAFile, "-k8s-token=asdfasdfasdfasfd", "-k8s-service-cidr=172.15.0.0/24", - "-nb-address=ssl://6.5.4.3:6651,ssl://6.5.4.4:6651,ssl://6.5.4.5:6651", + "-nb-address=ssl:6.5.4.3:6651,ssl:6.5.4.4:6651,ssl:6.5.4.5:6651", "-nb-client-privkey=/client/privkey", "-nb-client-cert=/client/cert", "-nb-client-cacert=/client/cacert", - "-sb-address=ssl://6.5.4.1:6652,ssl://6.5.4.2:6652,ssl://6.5.4.3:6652", + "-sb-address=ssl:6.5.4.1:6652,ssl:6.5.4.2:6652,ssl:6.5.4.3:6652", "-sb-client-privkey=/client/privkey2", "-sb-client-cert=/client/cert2", "-sb-client-cacert=/client/cacert2", @@ -1053,17 +1053,15 @@ mode=shared }) const ( - nbURL string = "ssl://1.2.3.4:6641" - sbURL string = "ssl://1.2.3.4:6642" + nbURL string = "ssl:1.2.3.4:6641" + sbURL string = "ssl:1.2.3.4:6642" ) It("configures client northbound SSL correctly", func() { - const nbURLOVN string = "ssl:1.2.3.4:6641" - fexec := ovntest.NewFakeExec() fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovn-nbctl --db=" + nbURLOVN + " --timeout=5 --private-key=" + keyFile + " --certificate=" + certFile + " --bootstrap-ca-cert=" + caFile + " list nb_global", - "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-nb=\"" + nbURLOVN + "\"", + "ovn-nbctl --db=" + nbURL + " --timeout=5 --private-key=" + keyFile + " --certificate=" + certFile + " --bootstrap-ca-cert=" + caFile + " list nb_global", + "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-nb=\"" + nbURL + "\"", }) cliConfig := &OvnAuthConfig{ @@ -1078,25 +1076,23 @@ mode=shared Expect(a.PrivKey).To(Equal(keyFile)) Expect(a.Cert).To(Equal(certFile)) Expect(a.CACert).To(Equal(caFile)) - Expect(a.Address).To(Equal("ssl:1.2.3.4:6641")) + Expect(a.Address).To(Equal(nbURL)) Expect(a.northbound).To(BeTrue()) Expect(a.externalID).To(Equal("ovn-nb")) - Expect(a.GetURL()).To(Equal(nbURLOVN)) + Expect(a.GetURL()).To(Equal(nbURL)) err = a.SetDBAuth() Expect(err).NotTo(HaveOccurred()) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) }) It("configures client southbound SSL correctly", func() { - const sbURLOVN string = "ssl:1.2.3.4:6642" - fexec := ovntest.NewFakeExec() fexec.AddFakeCmdsNoOutputNoError([]string{ - "ovn-nbctl --db=" + sbURLOVN + " --timeout=5 --private-key=" + keyFile + " --certificate=" + certFile + " --bootstrap-ca-cert=" + caFile + " list nb_global", + "ovn-nbctl --db=" + sbURL + " --timeout=5 --private-key=" + keyFile + " --certificate=" + certFile + " --bootstrap-ca-cert=" + caFile + " list nb_global", "ovs-vsctl --timeout=15 del-ssl", "ovs-vsctl --timeout=15 set-ssl " + keyFile + " " + certFile + " " + caFile, - "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-remote=\"" + sbURLOVN + "\"", + "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-remote=\"" + sbURL + "\"", }) cliConfig := &OvnAuthConfig{ @@ -1111,11 +1107,60 @@ mode=shared Expect(a.PrivKey).To(Equal(keyFile)) Expect(a.Cert).To(Equal(certFile)) Expect(a.CACert).To(Equal(caFile)) - Expect(a.Address).To(Equal("ssl:1.2.3.4:6642")) + Expect(a.Address).To(Equal(sbURL)) Expect(a.northbound).To(BeFalse()) Expect(a.externalID).To(Equal("ovn-remote")) - Expect(a.GetURL()).To(Equal(sbURLOVN)) + Expect(a.GetURL()).To(Equal(sbURL)) + err = a.SetDBAuth() + Expect(err).NotTo(HaveOccurred()) + Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) + }) + + const ( + nbURLLegacy string = "tcp://1.2.3.4:6641" + nbURLConverted string = "tcp:1.2.3.4:6641" + sbURLLegacy string = "tcp://1.2.3.4:6642" + sbURLConverted string = "tcp:1.2.3.4:6642" + ) + + It("configures client northbound TCP legacy address correctly", func() { + fexec := ovntest.NewFakeExec() + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-nb=\"" + nbURLConverted + "\"", + }) + + cliConfig := &OvnAuthConfig{Address: nbURLLegacy} + a, err := buildOvnAuth(fexec, true, cliConfig, &OvnAuthConfig{}, true) + Expect(err).NotTo(HaveOccurred()) + Expect(a.Scheme).To(Equal(OvnDBSchemeTCP)) + // Config should convert :// to : in addresses + Expect(a.Address).To(Equal(nbURLConverted)) + Expect(a.northbound).To(BeTrue()) + Expect(a.externalID).To(Equal("ovn-nb")) + + Expect(a.GetURL()).To(Equal(nbURLConverted)) + err = a.SetDBAuth() + Expect(err).NotTo(HaveOccurred()) + Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) + }) + + It("configures client southbound TCP legacy address correctly", func() { + fexec := ovntest.NewFakeExec() + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-vsctl --timeout=15 set Open_vSwitch . external_ids:ovn-nb=\"" + nbURLConverted + "\"", + }) + + cliConfig := &OvnAuthConfig{Address: nbURLLegacy} + a, err := buildOvnAuth(fexec, true, cliConfig, &OvnAuthConfig{}, true) + Expect(err).NotTo(HaveOccurred()) + Expect(a.Scheme).To(Equal(OvnDBSchemeTCP)) + // Config should convert :// to : in addresses + Expect(a.Address).To(Equal(nbURLConverted)) + Expect(a.northbound).To(BeTrue()) + Expect(a.externalID).To(Equal("ovn-nb")) + + Expect(a.GetURL()).To(Equal(nbURLConverted)) err = a.SetDBAuth() Expect(err).NotTo(HaveOccurred()) Expect(fexec.CalledMatchesExpected()).To(BeTrue(), fexec.ErrorDesc) @@ -1211,7 +1256,7 @@ mode=shared generateTests("the scheme is not empty/tcp/ssl", "unknown OVN DB scheme \"blah\"", func() []string { - return []string{"address=blah://1.2.3.4:5555"} + return []string{"address=blah:1.2.3.4:5555"} }) generateTests("the address is unix socket and certs are given", @@ -1228,7 +1273,7 @@ mode=shared "failed to parse OVN DB host/port \"4.3.2.1\": address 4.3.2.1: missing port in address", func() []string { return []string{ - "address=tcp://4.3.2.1", + "address=tcp:4.3.2.1", } }) @@ -1236,7 +1281,7 @@ mode=shared "certificate or key given; perhaps you mean to use the 'ssl' scheme?", func() []string { return []string{ - "address=tcp://1.2.3.4:444", + "address=tcp:1.2.3.4:444", "client-privkey=/bar/baz/foo", } }) @@ -1246,7 +1291,7 @@ mode=shared generateTests("the SSL scheme is missing a client CA cert", "", func() []string { return []string{ - "address=ssl://1.2.3.4:444", + "address=ssl:1.2.3.4:444", "client-privkey=" + keyFile, "client-cert=" + certFile, "client-cacert=/foo/bar/baz", @@ -1256,7 +1301,7 @@ mode=shared generateTests("the SSL scheme is missing a private key file", "", func() []string { return []string{ - "address=ssl://1.2.3.4:444", + "address=ssl:1.2.3.4:444", "client-privkey=/foo/bar/baz", "client-cert=" + certFile, "client-cacert=" + caFile, @@ -1266,13 +1311,12 @@ mode=shared generateTests("the SSL scheme is missing a client cert file", "", func() []string { return []string{ - "address=ssl://1.2.3.4:444", + "address=ssl:1.2.3.4:444", "client-privkey=" + keyFile, "client-cert=/foo/bar/baz", "client-cacert=" + caFile, } }) - }) }) }) From 8bcd1d051faf60c017d46158538393d83ae61add Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2020 16:32:03 -0500 Subject: [PATCH 22/27] docs/misc: clean up usage of legacy :// DB address format Signed-off-by: Dan Williams --- README_MANUAL.md | 8 ++++---- dist/images/ovnkube.sh | 4 ++-- docs/INSTALL.OPENSHIFT.md | 4 ++-- docs/INSTALL.SSL.md | 8 ++++---- docs/config.md | 4 ++-- docs/ha.md | 8 ++++---- docs/ovn_k8s.conf.5 | 4 ++-- docs/ovnkube.1 | 4 ++-- etc/ovn_k8s.conf | 4 ++-- go-controller/README.md | 6 +++--- go-controller/etc/ovn_k8s.conf | 4 ++-- go-controller/pkg/config/config.go | 7 ++++--- 12 files changed, 33 insertions(+), 32 deletions(-) diff --git a/README_MANUAL.md b/README_MANUAL.md index 2b6642cb25..14c8ed2a19 100644 --- a/README_MANUAL.md +++ b/README_MANUAL.md @@ -134,8 +134,8 @@ uses the hostname. kubelet allows this name to be overridden with -nodeport \ -init-gateways -gateway-local \ -k8s-token="$TOKEN" \ - -nb-address="tcp://$CENTRAL_IP:6641" \ - -sb-address="tcp://$CENTRAL_IP:6642" 2>&1 & + -nb-address="tcp:$CENTRAL_IP:6641" \ + -sb-address="tcp:$CENTRAL_IP:6642" 2>&1 & ``` Note: Make sure to read /var/log/ovn-kubernetes/ovnkube.log to see that there were @@ -172,8 +172,8 @@ nohup sudo ovnkube -k8s-kubeconfig kubeconfig.yaml -loglevel=4 \ -k8s-apiserver="http://$CENTRAL_IP:8080" \ -init-node="$NODE_NAME" \ -nodeport \ - -nb-address="tcp://$CENTRAL_IP:6641" \ - -sb-address="tcp://$CENTRAL_IP:6642" -k8s-token="$TOKEN" \ + -nb-address="tcp:$CENTRAL_IP:6641" \ + -sb-address="tcp:$CENTRAL_IP:6642" -k8s-token="$TOKEN" \ -init-gateways \ -k8s-service-cidr=$SERVICE_IP_SUBNET \ -cluster-subnets=$CLUSTER_IP_SUBNET 2>&1 & diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 82b183d2c1..4fa6c381f7 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -269,8 +269,8 @@ get_ovn_db_vars() { ovn_nbdb_str=${ovn_nbdb_str}"," ovn_sbdb_str=${ovn_sbdb_str}"," fi - ovn_nbdb_str=${ovn_nbdb_str}tcp://${ovn_db_hosts[${i}]}:${ovn_nb_port} - ovn_sbdb_str=${ovn_sbdb_str}tcp://${ovn_db_hosts[${i}]}:${ovn_sb_port} + ovn_nbdb_str=${ovn_nbdb_str}tcp:${ovn_db_hosts[${i}]}:${ovn_nb_port} + ovn_sbdb_str=${ovn_sbdb_str}tcp:${ovn_db_hosts[${i}]}:${ovn_sb_port} done ovn_nbdb=${OVN_NORTH:-$ovn_nbdb_str} ovn_sbdb=${OVN_SOUTH:-$ovn_sbdb_str} diff --git a/docs/INSTALL.OPENSHIFT.md b/docs/INSTALL.OPENSHIFT.md index fe9fba1534..bd01dcbf59 100644 --- a/docs/INSTALL.OPENSHIFT.md +++ b/docs/INSTALL.OPENSHIFT.md @@ -186,8 +186,8 @@ that is needed for ovn to access the cluster apiserver. # oc get configmap ovn-config -o yaml apiVersion: v1 data: - OvnNorth: tcp://10.19.188.22:6641 - OvnSouth: tcp://10.19.188.22:6642 + OvnNorth: tcp:10.19.188.22:6641 + OvnSouth: tcp:10.19.188.22:6642 k8s_apiserver: https://wsfd-netdev22.ntdv.lab.eng.bos.redhat.com:8443 net_cidr: 10.128.0.0/14 svc_cidr: 172.30.0.0/16 diff --git a/docs/INSTALL.SSL.md b/docs/INSTALL.SSL.md index 6583d22845..53c558ce7f 100644 --- a/docs/INSTALL.SSL.md +++ b/docs/INSTALL.SSL.md @@ -169,8 +169,8 @@ sudo ovnkube -k8s-kubeconfig kubeconfig.yaml -loglevel=4 \ -init-master="$NODE_NAME" -cluster-subnets=$CLUSTER_IP_SUBNET \ -k8s-service-cidr=$SERVICE_IP_SUBNET \ -nodeport \ - -nb-address="ssl://$CENTRAL_IP:6641" \ - -sb-address="ssl://$CENTRAL_IP:6642" \ + -nb-address="ssl:$CENTRAL_IP:6641" \ + -sb-address="ssl:$CENTRAL_IP:6642" \ -nb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ -nb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ -nb-client-cacert /etc/openvswitch/cacert.pem \ @@ -187,8 +187,8 @@ sudo ovnkube -k8s-kubeconfig $HOME/kubeconfig.yaml -loglevel=4 \ -k8s-apiserver="http://$CENTRAL_IP:8080" \ -init-node="$MINION_NAME" \ -nodeport \ - -nb-address="ssl://$CENTRAL_IP:6641" \ - -sb-address="ssl://$CENTRAL_IP:6642" -k8s-token=$TOKEN \ + -nb-address="ssl:$CENTRAL_IP:6641" \ + -sb-address="ssl:$CENTRAL_IP:6642" -k8s-token=$TOKEN \ -nb-client-privkey /etc/openvswitch/ovncontroller-privkey.pem \ -nb-client-cert /etc/openvswitch/ovncontroller-cert.pem \ -nb-client-cacert /etc/openvswitch/cacert.pem \ diff --git a/docs/config.md b/docs/config.md index de6ff01e99..7d2fb2b8d3 100644 --- a/docs/config.md +++ b/docs/config.md @@ -94,7 +94,7 @@ This section contains the address and (if the 'ssl' method is used) certificates needed to use the OVN northbound database API. Only the the ovn-kubernetes master needs to specify the 'server' options. ``` -address=ssl://1.2.3.4:6641 +address=ssl:1.2.3.4:6641 client-privkey=/path/to/private.key client-cert=/path/to/client.crt client-cacert=/path/to/client-ca.crt @@ -109,7 +109,7 @@ This section contains the address and (if the 'ssl' method is used) certificates needed to use the OVN southbound database API. Only the the ovn-kubernetes master needs to specify the 'server' options. ``` -address=ssl://1.2.3.4:6642 +address=ssl:1.2.3.4:6642 client-privkey=/path/to/private.key client-cert=/path/to/client.crt client-cacert=/path/to/client-ca.crt diff --git a/docs/ha.md b/docs/ha.md index c3433269fd..a61813dcc0 100644 --- a/docs/ha.md +++ b/docs/ha.md @@ -83,8 +83,8 @@ IP1="$MASTER1" IP2="$MASTER2" IP3="$MASTER3" -ovn_nb="tcp://$IP1:6641,tcp://$IP2:6641,tcp://$IP3:6641" -ovn_sb="tcp://$IP1:6642,tcp://$IP2:6642,tcp://$IP3:6642" +ovn_nb="tcp:$IP1:6641,tcp:$IP2:6641,tcp:$IP3:6641" +ovn_sb="tcp:$IP1:6642,tcp:$IP2:6642,tcp:$IP3:6642" nohup sudo ovnkube -k8s-kubeconfig kubeconfig.yaml \ -loglevel=4 \ @@ -127,8 +127,8 @@ IP1="$MASTER1" IP2="$MASTER2" IP3="$MASTER3" -ovn_nb="tcp://$IP1:6641,tcp://$IP2:6641,tcp://$IP3:6641" -ovn_sb="tcp://$IP1:6642,tcp://$IP2:6642,tcp://$IP3:6642" +ovn_nb="tcp:$IP1:6641,tcp:$IP2:6641,tcp:$IP3:6641" +ovn_sb="tcp:$IP1:6642,tcp:$IP2:6642,tcp:$IP3:6642" nohup sudo ovnkube -k8s-kubeconfig $HOME/kubeconfig.yaml -loglevel=4 \ -logfile="/var/log/openvswitch/ovnkube.log" \ diff --git a/docs/ovn_k8s.conf.5 b/docs/ovn_k8s.conf.5 index ca97a37245..fc790db071 100644 --- a/docs/ovn_k8s.conf.5 +++ b/docs/ovn_k8s.conf.5 @@ -80,7 +80,7 @@ On openshift, the token can be generated on the cluster master as follows: \fBaddress\fR= This is the url used to access the northbound database server. The scheme may be ssl or tcp. When ssl is used, the certs must be provided. The host must be a host IP address, not name. -The port, by default, is 6441. E.g., ssl://1.2.3.4:6641 +The port, by default, is 6441. E.g., ssl:1.2.3.4:6641 .TP \fBclient-privkey\fR=/etc/openvswitch/ovnnb-privkey.pem .TP @@ -100,7 +100,7 @@ For example: /etc/openvswitch/ovnnb-ca.cert \fBaddress\fR= This is the url used to access the southbound database server. The scheme may be ssl or tcp. When ssl is used, the certs must be provided. The host must be a host IP address, not name. -The port, by default, is 6442. E.g., ssl://1.2.3.4:6642 +The port, by default, is 6442. E.g., ssl:1.2.3.4:6642 .TP \fBclient-privkey\fR=/etc/openvswitch/ovnsb-privkey.pem .TP diff --git a/docs/ovnkube.1 b/docs/ovnkube.1 index 9f6da3bb91..d0d1454d76 100644 --- a/docs/ovnkube.1 +++ b/docs/ovnkube.1 @@ -100,7 +100,7 @@ The Kubernetes API authentication token (not required if --k8s-kubeconfig is giv The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces). .TP \fB\--nb-address\fR string -IP address and port of the OVN northbound API (eg, ssl://1.2.3.4:6641). Leave empty to use a local unix socket. +IP address and port of the OVN northbound API (eg, ssl:1.2.3.4:6641). Leave empty to use a local unix socket. .TP \fB\--nb-client-privkey\fR string Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. @@ -112,7 +112,7 @@ Client certificate that the client should use for talking to the OVN database. CA certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. .TP \fB\--sb-address\fR string -IP address and port of the OVN southbound database (eg, ssl://1.2.3.4:6642). Leave empty to use a local unix socket. +IP address and port of the OVN southbound database (eg, ssl:1.2.3.4:6642). Leave empty to use a local unix socket. .TP \fB\--sb-client-privkey\fR string Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. diff --git a/etc/ovn_k8s.conf b/etc/ovn_k8s.conf index 0b1427ae84..8361e018b7 100644 --- a/etc/ovn_k8s.conf +++ b/etc/ovn_k8s.conf @@ -16,7 +16,7 @@ conf-dir=/etc/cni/net.d plugin=ovn-k8s-cni-overlay [ovnnorth] -address=ssl://1.2.3.4:6641 +address=ssl:1.2.3.4:6641 client-privkey=/path/to/private.key client-cert=/path/to/client.crt client-cacert=/path/to/client-ca.crt @@ -25,7 +25,7 @@ server-cert=/path/to/server.crt server-cacert=/path/to/server-ca.crt [ovnsouth] -address=ssl://1.2.3.4:6642 +address=ssl:1.2.3.4:6642 client-privkey=/path/to/private.key client-cert=/path/to/client.crt client-cacert=/path/to/client-ca.crt diff --git a/go-controller/README.md b/go-controller/README.md index e029e0d170..ddb8146767 100644 --- a/go-controller/README.md +++ b/go-controller/README.md @@ -66,7 +66,7 @@ Usage: -k8s-token string the Kubernetes API authentication token (not required if --k8s-kubeconfig is given) -nb-address string - IP address and port of the OVN northbound API (eg, ssl://1.2.3.4:6641). Leave empty to use a local unix socket. + IP address and port of the OVN northbound API (eg, ssl:1.2.3.4:6641). Leave empty to use a local unix socket. -nb-client-privkey string Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnnb-privkey.pem) -nb-client-cert string @@ -74,7 +74,7 @@ Usage: -nb-client-cacert string CA certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnnb-ca.cert) -sb-address string - IP address and port of the OVN southbound API (eg, ssl://1.2.3.4:6642). Leave empty to use a local unix socket. + IP address and port of the OVN southbound API (eg, ssl:1.2.3.4:6642). Leave empty to use a local unix socket. -sb-client-privkey string Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnsb-privkey.pem) -sb-client-cert string @@ -134,7 +134,7 @@ conf-dir=/etc/cni/net.d plugin=ovn-k8s-cni-overlay [ovnnorth] -address=ssl://1.2.3.4:6641 +address=ssl:1.2.3.4:6641 client-privkey=/path/to/private.key client-cert=/path/to/client.crt client-cacert=/path/to/client-ca.crt diff --git a/go-controller/etc/ovn_k8s.conf b/go-controller/etc/ovn_k8s.conf index f5c4eee0b8..4e44bb81b2 100644 --- a/go-controller/etc/ovn_k8s.conf +++ b/go-controller/etc/ovn_k8s.conf @@ -16,10 +16,10 @@ apiserver=https://ovn_master_fqn:8443 token= [OvnNorth] -address=tcp://ovn_master_ip:6641 +address=tcp:ovn_master_ip:6641 [OvnSouth] -address=tcp://ovn_master_ip:6642 +address=tcp:ovn_master_ip:6642 [gateway] mode=shared diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index 2415e90c02..341d423b97 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -611,7 +611,7 @@ var OvnNBFlags = []cli.Flag{ cli.StringFlag{ Name: "nb-address", Usage: "IP address and port of the OVN northbound API " + - "(eg, ssl://1.2.3.4:6641,ssl://1.2.3.5:6642). Leave empty to " + + "(eg, ssl:1.2.3.4:6641,ssl:1.2.3.5:6642). Leave empty to " + "use a local unix socket.", Destination: &cliConfig.OvnNorth.Address, }, @@ -640,7 +640,7 @@ var OvnSBFlags = []cli.Flag{ cli.StringFlag{ Name: "sb-address", Usage: "IP address and port of the OVN southbound API " + - "(eg, ssl://1.2.3.4:6642,ssl://1.2.3.5:6642). " + + "(eg, ssl:1.2.3.4:6642,ssl:1.2.3.5:6642). " + "Leave empty to use a local unix socket.", Destination: &cliConfig.OvnSouth.Address, }, @@ -1257,7 +1257,8 @@ func pathExists(path string) bool { } // parseAddress parses an OVN database address, which can be of form -// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" or "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641" +// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" (OVS/OVN format) or +// "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641" (legacy ovnkube format) // or "ssl:[fd01::1]:6641,ssl:[fd01::2]:6641 // and returns the validated address(es) and the scheme func parseAddress(urlString string) (string, OvnDBScheme, error) { From ee14c772787fc108a0f7769208989a5d74a25bad Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Sun, 5 Apr 2020 13:25:37 -0400 Subject: [PATCH 23/27] ovn: add dual-stack support to loadbalancer code Rename createLoadBalancerVIP to createLoadBalancerVIPs and make it able to handle multiple source IPs, of multiple families, and filter the endpoints to match the source IP family/families. Signed-off-by: Dan Winship --- go-controller/pkg/ovn/endpoints.go | 36 ++++++++++++------------ go-controller/pkg/ovn/gateway.go | 22 ++++++++------- go-controller/pkg/ovn/loadbalancer.go | 40 +++++++++++++++++---------- go-controller/pkg/ovn/service.go | 26 +++++++++-------- 4 files changed, 71 insertions(+), 53 deletions(-) diff --git a/go-controller/pkg/ovn/endpoints.go b/go-controller/pkg/ovn/endpoints.go index 016c2cae2b..f13a873d2e 100644 --- a/go-controller/pkg/ovn/endpoints.go +++ b/go-controller/pkg/ovn/endpoints.go @@ -90,8 +90,9 @@ func (ovn *Controller) AddEndpoints(ep *kapi.Endpoints) error { svcPort.Protocol, err) continue } - err = ovn.createLoadBalancerVIP(loadBalancer, - svc.Spec.ClusterIP, svcPort.Port, ips, targetPort) + err = ovn.createLoadBalancerVIPs(loadBalancer, + []string{svc.Spec.ClusterIP}, + svcPort.Port, ips, targetPort) if err != nil { klog.Errorf("Error in creating Cluster IP for svc %s, target port: %d - %v\n", svc.Name, targetPort, err) continue @@ -109,8 +110,8 @@ func (ovn *Controller) AddEndpoints(ep *kapi.Endpoints) error { func (ovn *Controller) handleNodePortLB(node *kapi.Node) error { physicalGateway := util.GWRouterPrefix + node.Name - var physicalIP string - if physicalIP, _ = ovn.getGatewayPhysicalIP(physicalGateway); physicalIP == "" { + var physicalIPs []string + if physicalIPs, _ = ovn.getGatewayPhysicalIPs(physicalGateway); physicalIPs == nil { return fmt.Errorf("gateway physical IP for node %q does not yet exist", node.Name) } namespaces, err := ovn.watchFactory.GetNamespaces() @@ -143,7 +144,7 @@ func (ovn *Controller) handleNodePortLB(node *kapi.Node) error { return fmt.Errorf("%s load balancer for node %q does not yet exist", proto, node.Name) } - err = ovn.createLoadBalancerVIP(k8sNSLb, physicalIP, svcPort.NodePort, ips, targetPort) + err = ovn.createLoadBalancerVIPs(k8sNSLb, physicalIPs, svcPort.NodePort, ips, targetPort) if err != nil { klog.Errorf("failed to create VIP in load balancer %s - %v", k8sNSLb, err) continue @@ -210,21 +211,22 @@ func (ovn *Controller) handleExternalIPs(svc *kapi.Service, svcPort kapi.Service if len(svc.Spec.ExternalIPs) == 0 { return } - for _, extIP := range svc.Spec.ExternalIPs { - lb := ovn.getDefaultGatewayLoadBalancer(svcPort.Protocol) - if lb == "" { - klog.Warningf("No default gateway found for protocol %s\n\tNote: 'nodeport' flag needs to be enabled for default gateway", svcPort.Protocol) - continue - } - if removeLoadBalancerVIP { + lb := ovn.getDefaultGatewayLoadBalancer(svcPort.Protocol) + if lb == "" { + klog.Warningf("No default gateway found for protocol %s\n\tNote: 'nodeport' flag needs to be enabled for default gateway", svcPort.Protocol) + return + } + + if removeLoadBalancerVIP { + for _, extIP := range svc.Spec.ExternalIPs { vip := util.JoinHostPortInt32(extIP, svcPort.Port) klog.V(5).Infof("Removing external VIP: %s from load balancer: %s", vip, lb) ovn.deleteLoadBalancerVIP(lb, vip) - } else { - err := ovn.createLoadBalancerVIP(lb, extIP, svcPort.Port, ips, targetPort) - if err != nil { - klog.Errorf("Error in creating external IP for service: %s, externalIP: %s", svc.Name, extIP) - } + } + } else { + err := ovn.createLoadBalancerVIPs(lb, svc.Spec.ExternalIPs, svcPort.Port, ips, targetPort) + if err != nil { + klog.Errorf("Error in creating external IPs for service: %s", svc.Name) } } } diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 8f5b388444..a58ed9bc1e 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -18,14 +18,14 @@ func (ovn *Controller) getOvnGateways() ([]string, string, error) { return strings.Fields(out), stderr, err } -func (ovn *Controller) getGatewayPhysicalIP(physicalGateway string) (string, error) { +func (ovn *Controller) getGatewayPhysicalIPs(physicalGateway string) ([]string, error) { physicalIP, _, err := util.RunOVNNbctl("get", "logical_router", physicalGateway, "external_ids:physical_ip") if err != nil { - return "", err + return nil, err } - return physicalIP, nil + return []string{physicalIP}, nil } func (ovn *Controller) getGatewayLoadBalancer(physicalGateway string, protocol kapi.Protocol) (string, error) { @@ -60,7 +60,7 @@ func (ovn *Controller) createGatewayVIPs(protocol kapi.Protocol, sourcePort int3 if loadBalancer == "" { continue } - physicalIP, err := ovn.getGatewayPhysicalIP(physicalGateway) + physicalIPs, err := ovn.getGatewayPhysicalIPs(physicalGateway) if err != nil { klog.Errorf("physical gateway %s does not have physical ip (%v)", physicalGateway, err) @@ -68,7 +68,7 @@ func (ovn *Controller) createGatewayVIPs(protocol kapi.Protocol, sourcePort int3 } // With the physical_ip:sourcePort as the VIP, add an entry in // 'load_balancer'. - err = ovn.createLoadBalancerVIP(loadBalancer, physicalIP, sourcePort, targetIPs, targetPort) + err = ovn.createLoadBalancerVIPs(loadBalancer, physicalIPs, sourcePort, targetIPs, targetPort) if err != nil { klog.Errorf("Failed to create VIP in load balancer %s - %v", loadBalancer, err) continue @@ -95,15 +95,17 @@ func (ovn *Controller) deleteGatewayVIPs(protocol kapi.Protocol, sourcePort int3 if loadBalancer == "" { continue } - physicalIP, err := ovn.getGatewayPhysicalIP(physicalGateway) + physicalIPs, err := ovn.getGatewayPhysicalIPs(physicalGateway) if err != nil { klog.Errorf("physical gateway %s does not have physical ip (%v)", physicalGateway, err) continue } - // With the physical_ip:sourcePort as the VIP, delete an entry in 'load_balancer'. - vip := util.JoinHostPortInt32(physicalIP, sourcePort) - klog.V(5).Infof("Removing gateway VIP: %s from loadbalancer: %s", vip, loadBalancer) - ovn.deleteLoadBalancerVIP(loadBalancer, vip) + for _, physicalIP := range physicalIPs { + // With the physical_ip:sourcePort as the VIP, delete an entry in 'load_balancer'. + vip := util.JoinHostPortInt32(physicalIP, sourcePort) + klog.V(5).Infof("Removing gateway VIP: %s from loadbalancer: %s", vip, loadBalancer) + ovn.deleteLoadBalancerVIP(loadBalancer, vip) + } } } diff --git a/go-controller/pkg/ovn/loadbalancer.go b/go-controller/pkg/ovn/loadbalancer.go index 361e0328e0..dc88024e05 100644 --- a/go-controller/pkg/ovn/loadbalancer.go +++ b/go-controller/pkg/ovn/loadbalancer.go @@ -126,22 +126,34 @@ func (ovn *Controller) configureLoadBalancer(lb, sourceIP string, sourcePort int return nil } -// createLoadBalancerVIP either creates or updates a load balancer VIP mapping from -// sourceIP:sourcePort to targetIP:targetPort for each IP in targetIPs. If targetIPs -// is non-empty then the reject ACL for the service is removed. -func (ovn *Controller) createLoadBalancerVIP(lb, sourceIP string, sourcePort int32, targetIPs []string, targetPort int32) error { - klog.V(5).Infof("Creating lb with %s, %s, %d, [%v], %d", lb, sourceIP, sourcePort, targetIPs, targetPort) +// createLoadBalancerVIPs either creates or updates a set of load balancer VIPs mapping +// from sourcePort on each IP of a given address family in sourceIPs, to targetPort on +// each IP of the same address family in targetIPs, removing the reject ACL for any +// source IP that is now in use. +func (ovn *Controller) createLoadBalancerVIPs(lb string, + sourceIPs []string, sourcePort int32, + targetIPs []string, targetPort int32) error { + klog.V(5).Infof("Creating lb with %s, [%v], %d, [%v], %d", lb, sourceIPs, sourcePort, targetIPs, targetPort) - var targets []string - for _, targetIP := range targetIPs { - targets = append(targets, util.JoinHostPortInt32(targetIP, targetPort)) - } - err := ovn.configureLoadBalancer(lb, sourceIP, sourcePort, targets) - if len(targets) > 0 { - // ensure the ACL is removed if it exists - ovn.deleteLoadBalancerRejectACL(lb, util.JoinHostPortInt32(sourceIP, sourcePort)) + for _, sourceIP := range sourceIPs { + isIPv6 := utilnet.IsIPv6String(sourceIP) + + var targets []string + for _, targetIP := range targetIPs { + if utilnet.IsIPv6String(targetIP) == isIPv6 { + targets = append(targets, util.JoinHostPortInt32(targetIP, targetPort)) + } + } + err := ovn.configureLoadBalancer(lb, sourceIP, sourcePort, targets) + if len(targets) > 0 { + // ensure the ACL is removed if it exists + ovn.deleteLoadBalancerRejectACL(lb, util.JoinHostPortInt32(sourceIP, sourcePort)) + } + if err != nil { + return err + } } - return err + return nil } func (ovn *Controller) getLogicalSwitchesForLoadBalancer(lb string) ([]string, error) { diff --git a/go-controller/pkg/ovn/service.go b/go-controller/pkg/ovn/service.go index f485a7aaaf..727db54cc5 100644 --- a/go-controller/pkg/ovn/service.go +++ b/go-controller/pkg/ovn/service.go @@ -214,24 +214,26 @@ func (ovn *Controller) createService(service *kapi.Service) error { if loadBalancer == "" { continue } - physicalIP, err := ovn.getGatewayPhysicalIP(physicalGateway) + physicalIPs, err := ovn.getGatewayPhysicalIPs(physicalGateway) if err != nil { klog.Errorf("physical gateway %s does not have physical ip (%v)", physicalGateway, err) continue } - // With the physical_ip:port as the VIP, add an entry in - // 'load_balancer'. - vip := util.JoinHostPortInt32(physicalIP, port) - // Skip creating LB if endpoints watcher already did it - if _, hasEps := ovn.getServiceLBInfo(loadBalancer, vip); hasEps { - klog.V(5).Infof("Load Balancer already configured for %s, %s", loadBalancer, vip) - } else if ovn.svcQualifiesForReject(service) { - aclUUID, err := ovn.createLoadBalancerRejectACL(loadBalancer, physicalIP, port, protocol) - if err != nil { - return fmt.Errorf("failed to create service ACL") + for _, physicalIP := range physicalIPs { + // With the physical_ip:port as the VIP, add an entry in + // 'load_balancer'. + vip := util.JoinHostPortInt32(physicalIP, port) + // Skip creating LB if endpoints watcher already did it + if _, hasEps := ovn.getServiceLBInfo(loadBalancer, vip); hasEps { + klog.V(5).Infof("Load Balancer already configured for %s, %s", loadBalancer, vip) + } else if ovn.svcQualifiesForReject(service) { + aclUUID, err := ovn.createLoadBalancerRejectACL(loadBalancer, physicalIP, port, protocol) + if err != nil { + return fmt.Errorf("failed to create service ACL") + } + klog.V(5).Infof("Service Reject ACL created for physical gateway: %s", aclUUID) } - klog.V(5).Infof("Service Reject ACL created for physical gateway: %s", aclUUID) } } } From 5f9c69b0348cce1c72d2677ec052e704bdf9e15c Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 9 Apr 2020 20:05:11 -0400 Subject: [PATCH 24/27] Fix references to "minions" and "master nodes" Kubernetes has "masters" and "nodes". "Minion" was deprecated years ago, and "master node" doesn't mean anything, unless you mean a master which is also a node, but most of the uses of "master node" here didn't necessarily mean that. Signed-off-by: Dan Winship --- README_MANUAL.md | 23 ++++++++------- dist/READMEcontainer.md | 6 ++-- dist/images/ovndb-vip.sh | 2 +- dist/images/ovnkube.sh | 4 +-- dist/templates/ovnkube-db.yaml.j2 | 2 +- dist/templates/ovnkube-master.yaml.j2 | 4 +-- docs/INSTALL.SSL.md | 34 +++++++++++------------ docs/INSTALL.UBUNTU.md | 2 +- docs/debugging.md | 8 +++--- docs/ha.md | 28 +++++++++---------- go-controller/cmd/ovnkube/ovnkube.go | 2 +- go-controller/pkg/config/config.go | 2 +- go-controller/pkg/node/management-port.go | 3 +- go-controller/pkg/ovn/master_test.go | 2 +- 14 files changed, 60 insertions(+), 62 deletions(-) diff --git a/README_MANUAL.md b/README_MANUAL.md index 14c8ed2a19..baa44c20bd 100644 --- a/README_MANUAL.md +++ b/README_MANUAL.md @@ -73,18 +73,18 @@ that node over IP address. internal interface). 3. All containers should be able to communicate with the k8s daemons running -in the master node. +on the master. - This can be achieved by running OVN gateways in the minion nodes. With + This can be achieved by running OVN gateways on the nodes. With at least one OVN gateway, the pods can reach the k8s central daemons with NAT. -### Master node initialization +### Master initialization -* Start the central components on a k8s master node. +* Start the central components on a k8s master. OVN architecture has a central component which stores your networking intent -in a database. Start this central component on one of the nodes where you +in a database. Start this central component on one of the hosts where you have started your k8s central daemons and which has an IP address of $CENTRAL_IP. (For HA of the central component, please read [HA.md]) @@ -110,7 +110,7 @@ Also start ovn-controller on this node. /usr/share/openvswitch/scripts/ovn-ctl start_controller ``` -Now start the ovnkube utility on the master node. +Now start the ovnkube utility on the master The below command expects the user to provide * A cluster wide private address range of $CLUSTER_IP_SUBNET @@ -140,20 +140,19 @@ uses the hostname. kubelet allows this name to be overridden with Note: Make sure to read /var/log/ovn-kubernetes/ovnkube.log to see that there were no obvious errors with argument passing. Also, you should only pass -"-init-node" argument if there is a kubelet running on the master node too. +"-init-node" argument if there is a kubelet running on the master too. If you want to use SSL instead of TCP for OVN databases, please read [INSTALL.SSL.md]. -### Minion node initialization. +### Node initialization. On each host, you will need to run the following command once. The below command expects the user to provide * A cluster wide private address range of $CLUSTER_IP_SUBNET (e.g: 192.168.0.0/16). The pods are provided IP address from this range. -This value should be the same as the one provided to ovnkube in the master -node. +This value should be the same as the one provided to ovnkube in the master. * $NODE_NAME should be the same as the one used by kubelet. kubelet by default uses the hostname. kubelet allows this name to be overridden with @@ -187,10 +186,10 @@ Notes on gateway nodes: * Gateway nodes are needed for North-South connectivity in OVN. OVN has support for multiple gateway nodes. In the above command, since '-init-gateways' has been provided as an option, a OVN -gateway will be created on each minion. +gateway will be created on each node. * Just providing '-init-gateways', will make OVN choose the -interface in your minion via which the minion's default gateway +interface in your node via which the node default gateway is reached. * If you want to chose the interface for your gateway, you should diff --git a/dist/READMEcontainer.md b/dist/READMEcontainer.md index 1bde7b1dfe..6cde493075 100644 --- a/dist/READMEcontainer.md +++ b/dist/READMEcontainer.md @@ -24,7 +24,7 @@ to start openvswitch. it must be running for the ovn-daemonsets to run. There are two daemonsets that support ovn. ovnkube-master runs -on the cluster master node, ovnkube runs on the remaining nodes. +on the cluster masters, ovnkube runs on all nodes. The daemonsets run with hostNetwork: true. The both daemonsets run the node daemons, ovn-controller and ovn-node. @@ -32,8 +32,8 @@ In addition the daemonset runs ovn-northd and ovn-master. The startup sequence requires this startup order: - ovs -- ovnkube-master on the master node -- ovnkube on the rest of the nodes. +- ovnkube-master on the masters +- ovnkube on all nodes. =============================== diff --git a/dist/images/ovndb-vip.sh b/dist/images/ovndb-vip.sh index cfbb47ba7d..56c3e54b88 100755 --- a/dist/images/ovndb-vip.sh +++ b/dist/images/ovndb-vip.sh @@ -31,7 +31,7 @@ ovnkube_version="3" ovn_daemonset_version=${OVN_DAEMONSET_VERSION:-"3"} # hostname is the host's hostname when using host networking, -# This is useful on the master node +# This is useful on the master # otherwise it is the container ID (useful for debugging). ovn_pod_host=$(hostname) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 4fa6c381f7..e6284ae8af 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -84,7 +84,7 @@ ovnkube_version="3" ovn_daemonset_version=${OVN_DAEMONSET_VERSION:-"3"} # hostname is the host's hostname when using host networking, -# This is useful on the master node +# This is useful on the master # otherwise it is the container ID (useful for debugging). ovn_pod_host=${K8S_NODE:-$(hostname)} @@ -905,7 +905,7 @@ display_version # run-ovn-northd Runs ovn-northd as a process does not run nb_ovsdb or sb_ovsdb (v3) # nb-ovsdb Runs nb_ovsdb as a process (no detach or monitor) (v3) # sb-ovsdb Runs sb_ovsdb as a process (no detach or monitor) (v3) -# ovn-master - master node only (v3) +# ovn-master - master only (v3) # ovn-controller - all nodes (v3) # ovn-node - all nodes (v3) # cleanup-ovn-node - all nodes (v3) diff --git a/dist/templates/ovnkube-db.yaml.j2 b/dist/templates/ovnkube-db.yaml.j2 index b12b0dca61..77cbae00f9 100644 --- a/dist/templates/ovnkube-db.yaml.j2 +++ b/dist/templates/ovnkube-db.yaml.j2 @@ -23,7 +23,7 @@ spec: # ovnkube-db # daemonset version 3 # starts ovn NB/SB ovsdb daemons, each in a separate container -# it is running on master node for now, but does not need to be the case +# it is running on master for now, but does not need to be the case kind: Deployment apiVersion: apps/v1 metadata: diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index 5873e5196d..dc0eda46d3 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -1,7 +1,7 @@ # ovnkube-master # daemonset version 3 # starts master daemons, each in a separate container -# it is run on the master node(s) +# it is run on the master(s) kind: Deployment apiVersion: apps/v1 metadata: @@ -10,7 +10,7 @@ metadata: namespace: ovn-kubernetes annotations: kubernetes.io/description: | - This Deployment launches the ovn-kubernetes master node networking components. + This Deployment launches the ovn-kubernetes master networking components. spec: progressDeadlineSeconds: 600 replicas: 1 diff --git a/docs/INSTALL.SSL.md b/docs/INSTALL.SSL.md index 53c558ce7f..d2369d07bc 100644 --- a/docs/INSTALL.SSL.md +++ b/docs/INSTALL.SSL.md @@ -20,22 +20,22 @@ ovs-pki init --force The above command creates 2 certificate authorities. But we are concerned only with one of them, i.e the "switch" certificate authority. We will use this -certificate authority to sign individual certificates of all the minions. We -will then use the same certificate authority's certificate to verify minion's +certificate authority to sign individual certificates of all the nodes. We +will then use the same certificate authority's certificate to verify a node's connections to the master. -Copy this certificate to the master node and each of the minion nodes. $CENTRAL_IP -is the IP address of the master node. +Copy this certificate to the master and each of the nodes. $CENTRAL_IP +is the IP address of the master. ``` scp /var/lib/openvswitch/pki/switchca/cacert.pem \ root@$CENTRAL_IP:/etc/openvswitch/. ``` -### Generate signed certificates for OVN components running on the master node. +### Generate signed certificates for OVN components running on the master. #### Generate signed certificates for OVN NB Database -On the master node, run the following commands. +On the master, run the following commands. ``` cd /etc/openvswitch @@ -52,7 +52,7 @@ ovs-pki -b sign ovnnb ``` The above command will generate ovnnb-cert.pem. Copy over this file back -to the master node's /etc/openvswitch. The ovnnb-privkey.pem and ovnnb-cert.pem +to the master's /etc/openvswitch. The ovnnb-privkey.pem and ovnnb-cert.pem will be used by the ovsdb-server that fronts the OVN NB database. Now run the following commands to ask ovsdb-server to use these @@ -82,7 +82,7 @@ ovs-pki -b sign ovnsb ``` The above command will generate ovnsb-cert.pem. Copy over this file back -to the master node's /etc/openvswitch. The ovnsb-privkey.pem and ovnsb-cert.pem +to the master's /etc/openvswitch. The ovnsb-privkey.pem and ovnsb-cert.pem will be used by the ovsdb-server that fronts the OVN SB database. Now run the following commands to ask ovsdb-server to use these @@ -98,24 +98,24 @@ ovn-sbctl set-connection pssl:6642 #### Generate signed certificates for OVN Northd -If you are running ovn-northd on the same node as OVN NB and SB database servers, then +If you are running ovn-northd on the same host as the OVN NB and SB database servers, then there is no need to secure the communication between ovn-northd and OVN NB/SB daemons. ovn-northd will communicate using UNIX path. -In case, you still want to secure the commnication or the daemons are running on -separate nodes, then follow the instructions on this page [OVN-NORTHD.SSL.md] +In case you still want to secure the communication, or the daemons are running on +separate hosts, then follow the instructions on this page [OVN-NORTHD.SSL.md] -### Generate certificates for the minion. +### Generate certificates for the nodes -On each minion, create a certificate request. +On each node, create a certificate request. ``` cd /etc/openvswitch ovs-pki req ovncontroller ``` -The above command will create a new private key for the minion called +The above command will create a new private key for the node called ovncontroller-privkey.pem and a certificate request file called ovncontroller-req.pem. Copy this certificate request file to the secure machine where you created the certificate authority and from the directory @@ -125,9 +125,9 @@ where the copied file exists, run: ovs-pki -b sign ovncontroller switch ``` -The above will create the certificate for the minion called +The above will create the certificate for the node called "ovncontroller-cert.pem". You should copy this certificate back to the -minion's /etc/openvswitch directory. +node's /etc/openvswitch directory. ## One time setup. @@ -185,7 +185,7 @@ certificates to it. For e.g: ``` sudo ovnkube -k8s-kubeconfig $HOME/kubeconfig.yaml -loglevel=4 \ -k8s-apiserver="http://$CENTRAL_IP:8080" \ - -init-node="$MINION_NAME" \ + -init-node="$NODE_NAME" \ -nodeport \ -nb-address="ssl:$CENTRAL_IP:6641" \ -sb-address="ssl:$CENTRAL_IP:6642" -k8s-token=$TOKEN \ diff --git a/docs/INSTALL.UBUNTU.md b/docs/INSTALL.UBUNTU.md index 5511e53b07..e72bbcc6c3 100644 --- a/docs/INSTALL.UBUNTU.md +++ b/docs/INSTALL.UBUNTU.md @@ -23,7 +23,7 @@ sudo apt-get install openvswitch-datapath-dkms -y sudo apt-get install openvswitch-switch openvswitch-common -y ``` -On the master node, where you intend to start OVN's central components, +On the master, where you intend to start OVN's central components, run: ``` diff --git a/docs/debugging.md b/docs/debugging.md index c71287178d..c077bee54b 100644 --- a/docs/debugging.md +++ b/docs/debugging.md @@ -17,7 +17,7 @@ same system-id for all your VMs - which is a problem. ### All nodes should register with OVN SB database. -On the master node, run: +On the master, run: ``` ovn-sbctl list chassis @@ -83,14 +83,14 @@ You can use the following command to achieve it via iptables. ### Check ovn-northd's log file. -On the master node, look at /var/log/openvswitch/ovn-northd.log to see +On the master, look at /var/log/openvswitch/ovn-northd.log to see for any errors with the setup of the OVN central node. ## Runtime issues ### Check the watcher's log file. -On the master node, check whether ovnkube is running by: +On the master, check whether ovnkube is running by: ``` ps -ef | grep ovnkube @@ -106,7 +106,7 @@ When you create a pod and it gets scheduled on a particular host, the OVN CNI plugin on that host, tries to access the pod's information from the K8s api server. Specifically, it tries to get the IP address and mac address for that pod. This information is logged in the OVN CNI log -file on each minion if you have specified a log file via +file on each node if you have specified a log file via "/etc/openvswitch/ovn_k8s.conf". You can read how to provide a logfile by reading 'man ovn_k8s.conf.5'. diff --git a/docs/ha.md b/docs/ha.md index a61813dcc0..0370dd4644 100644 --- a/docs/ha.md +++ b/docs/ha.md @@ -3,14 +3,14 @@ OVN architecture has two central databases that can be clustered. The databases are OVN_Northbound and OVN_Southbound. This document explains how to cluster them and start various daemons for the -ovn-kubernetes integration. You will ideally need atleast 3 master -nodes for a HA cluster. (You will need a miniumum of OVS/OVN 2.9.2 +ovn-kubernetes integration. You will ideally need at least 3 masters +for a HA cluster. (You will need a miniumum of OVS/OVN 2.9.2 for clustering.) -## Master1 node initialization +## Master1 initialization -To bootstrap your cluster, you need to start on one master node. -For a lack of better name, lets call it MASTER1 with an IP +To bootstrap your cluster, you need to start on one master. +For a lack of better name, let's call it MASTER1 with an IP address of $MASTER1 On MASTER1, delete any stale OVN databases and stop any @@ -23,7 +23,7 @@ sudo rm /etc/openvswitch/ovn*.db sudo /usr/share/openvswitch/scripts/ovn-ctl stop_northd ``` -Start the two databases on that node with: +Start the two databases on that host with: ``` LOCAL_IP=$MASTER1 @@ -35,7 +35,7 @@ sudo /usr/share/openvswitch/scripts/ovn-ctl \ ``` -## Master2, Master3... node initialization +## Master2, Master3... initialization Delete any stale databases and stop any running ovn-northd daemons. e.g: @@ -47,8 +47,8 @@ sudo rm /etc/openvswitch/ovn*.db sudo /usr/share/openvswitch/scripts/ovn-ctl stop_northd ``` -On master node, with a IP of $LOCAL_IP, start -the databases and ask it to join $MASTER1 +On master with a IP of $LOCAL_IP, start the databases and ask it to +join $MASTER1 ``` LOCAL_IP=$LOCAL_IP @@ -76,7 +76,7 @@ sudo ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl \ ## Start 'ovn-kube -init-master' -On any one of the master nodes, we need to start 'ovnkube -init-master'. +On any one of the masters, we need to start 'ovnkube -init-master'. (This should ideally be a daemonset with replica count of 1.) IP1="$MASTER1" @@ -100,8 +100,8 @@ nohup sudo ovnkube -k8s-kubeconfig kubeconfig.yaml \ ## start ovn-northd -On any one of the nodes (ideally via a daemonset with replica count as 1), -start ovn-northd. Let the 3 master node IPs be $IP1, $IP2 and $IP3. +On any one of the masters (ideally via a daemonset with replica count as 1), +start ovn-northd. Let the 3 master IPs be $IP1, $IP2 and $IP3. ``` IP1="$MASTER1" @@ -119,7 +119,7 @@ sudo ovn-northd -vconsole:emer -vsyslog:err -vfile:info \ ## Start 'ovn-kube -init-node' -In all other minions (and if needed on other masters), start ovnkube with +On all nodes (and if needed on other masters), start ovnkube with '-init-node'. For e.g: ``` @@ -133,7 +133,7 @@ ovn_sb="tcp:$IP1:6642,tcp:$IP2:6642,tcp:$IP3:6642" nohup sudo ovnkube -k8s-kubeconfig $HOME/kubeconfig.yaml -loglevel=4 \ -logfile="/var/log/openvswitch/ovnkube.log" \ -k8s-apiserver="http://$K8S_APISERVER_IP:8080" \ - -init-node="$MINION_NAME" \ + -init-node="$NODE_NAME" \ -nb-address="${ovn_nb}" \ -sb-address="${ovn_sb}" \ -k8s-token="$TOKEN" \ diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index 9af2709ff0..b3858098e1 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -211,7 +211,7 @@ func runOvnKube(ctx *cli.Context) error { if master != "" { if runtime.GOOS == "windows" { - return fmt.Errorf("Windows is not supported as master node") + return fmt.Errorf("Windows is not supported as a master") } // register prometheus metrics exported by the master metrics.RegisterMasterMetrics() diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index 341d423b97..b44f8ca651 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -673,7 +673,7 @@ var OVNGatewayFlags = []cli.Flag{ }, cli.StringFlag{ Name: "gateway-interface", - Usage: "The interface in minions that will be the gateway interface. " + + Usage: "The interface on nodes that will be the gateway interface. " + "If none specified, then the node's interface on which the " + "default gateway is configured will be used as the gateway " + "interface. Only useful with \"init-gateways\"", diff --git a/go-controller/pkg/node/management-port.go b/go-controller/pkg/node/management-port.go index 498fc4120a..6c59948a45 100644 --- a/go-controller/pkg/node/management-port.go +++ b/go-controller/pkg/node/management-port.go @@ -24,8 +24,7 @@ func (n *OvnNode) createManagementPort(localSubnet *net.IPNet, nodeAnnotator kub // uppercase letters, this causes a mismatch between what the watcher // will try to fetch and what kubernetes provides, thus failing to // create the port on the logical switch. - // Until the above is changed, switch to a lowercase hostname for - // initMinion. + // Until the above is changed, switch to a lowercase hostname nodeName := strings.ToLower(n.name) // Make sure br-int is created. diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 02e2f25972..39d4291707 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -542,7 +542,7 @@ subnet=%s "ovn-sbctl --timeout=15 --data=bare --no-heading --columns=name find Chassis hostname=" + node1Name, }) - // Expect the code to re-add the master node (which still exists) + // Expect the code to re-add the master (which still exists) // when the factory watch begins and enumerates all existing // Kubernetes API nodes fexec.AddFakeCmdsNoOutputNoError([]string{ From d976aba267828ec20b4f3297446f40d70b97470e Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Tue, 7 Apr 2020 19:06:24 -0700 Subject: [PATCH 25/27] SSL for OVN daemonsets -- assumes that all the required private keys and corresponding signed certificates are mounted into the container at /ovn-cert path -- the private keys and certificates for various OVN components are named as below ovncontroller-cert.pem ovncontroller-privkey.pem ovnnb-cert.pem ovnnb-privkey.pem ovnnorthd-cert.pem ovnnorthd-privkey.pem ovnsb-cert.pem ovnsb-privkey.pem -- the name of the CA certificate that signed the CSRs is ca-cert.pem -- disabled by default since there are lot of pre-requisites to get this thing to work. to enable it, one needs to set the OVN_SSL_ENABLE environment variable in each of the container Signed-off-by: Girish Moodalbail --- dist/images/daemonset.sh | 11 +++- dist/images/ovndb-raft-functions.sh | 65 +++++++++++++------ dist/images/ovnkube.sh | 89 +++++++++++++++++++++++--- dist/templates/ovnkube-db-raft.yaml.j2 | 19 ++++++ dist/templates/ovnkube-db.yaml.j2 | 14 ++++ dist/templates/ovnkube-master.yaml.j2 | 20 +++++- dist/templates/ovnkube-node.yaml.j2 | 14 ++++ 7 files changed, 199 insertions(+), 33 deletions(-) diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 5f2bad48f3..714d2ec6ea 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -17,11 +17,11 @@ OVN_SVC_DIDR="" OVN_K8S_APISERVER="" OVN_GATEWAY_MODE="" OVN_GATEWAY_OPTS="" -# In the future we will have RAFT based HA support. OVN_DB_VIP_IMAGE="" OVN_DB_VIP="" OVN_DB_REPLICAS="" OVN_MTU="" +OVN_SSL_ENABLE="" KIND="" MASTER_LOGLEVEL="" NODE_LOGLEVEL="" @@ -93,6 +93,9 @@ while [ "$1" != "" ]; do --ovn-loglevel-nbctld) OVN_LOGLEVEL_NBCTLD=$VALUE ;; + --ssl) + OVN_SSL_ENABLE="yes" + ;; *) echo "WARNING: unknown parameter \"$PARAM\"" exit 1 @@ -142,6 +145,8 @@ ovn_hybrid_overlay_enable=${OVN_HYBRID_OVERLAY_ENABLE} echo "ovn_hybrid_overlay_enable: ${ovn_hybrid_overlay_enable}" ovn_hybrid_overlay_net_cidr=${OVN_HYBRID_OVERLAY_NET_CIDR} echo "ovn_hybrid_overlay_net_cidr: ${ovn_hybrid_overlay_net_cidr}" +ovn_ssl_en=${OVN_SSL_ENABLE:-"no"} +echo "ovn_ssl_enable: ${ovn_ssl_en}" ovn_image=${image} \ ovn_image_pull_policy=${image_pull_policy} \ @@ -152,6 +157,7 @@ ovn_image=${image} \ ovn_loglevel_controller=${ovn_loglevel_controller} \ ovn_hybrid_overlay_net_cidr=${ovn_hybrid_overlay_net_cidr} \ ovn_hybrid_overlay_enable=${ovn_hybrid_overlay_enable} \ + ovn_ssl_en=${ovn_ssl_en} \ j2 ../templates/ovnkube-node.yaml.j2 -o ../yaml/ovnkube-node.yaml ovn_image=${image} \ @@ -161,12 +167,14 @@ ovn_image=${image} \ ovn_loglevel_nbctld=${ovn_loglevel_nbctld} \ ovn_hybrid_overlay_net_cidr=${ovn_hybrid_overlay_net_cidr} \ ovn_hybrid_overlay_enable=${ovn_hybrid_overlay_enable} \ + ovn_ssl_en=${ovn_ssl_en} \ j2 ../templates/ovnkube-master.yaml.j2 -o ../yaml/ovnkube-master.yaml ovn_image=${image} \ ovn_image_pull_policy=${image_pull_policy} \ ovn_loglevel_nb=${ovn_loglevel_nb} \ ovn_loglevel_sb=${ovn_loglevel_sb} \ + ovn_ssl_en=${ovn_ssl_en} \ j2 ../templates/ovnkube-db.yaml.j2 -o ../yaml/ovnkube-db.yaml ovn_db_vip_image=${ovn_db_vip_image} \ @@ -180,6 +188,7 @@ ovn_image=${image} \ ovn_db_replicas=${ovn_db_replicas} \ ovn_db_minAvailable=${ovn_db_minAvailable} \ ovn_loglevel_nb=${ovn_loglevel_nb} ovn_loglevel_sb=${ovn_loglevel_sb} \ + ovn_ssl_en=${ovn_ssl_en} \ j2 ../templates/ovnkube-db-raft.yaml.j2 -o ../yaml/ovnkube-db-raft.yaml # ovn-setup.yaml diff --git a/dist/images/ovndb-raft-functions.sh b/dist/images/ovndb-raft-functions.sh index 6653248fbc..b02937ea33 100644 --- a/dist/images/ovndb-raft-functions.sh +++ b/dist/images/ovndb-raft-functions.sh @@ -4,6 +4,11 @@ verify-ovsdb-raft() { check_ovn_daemonset_version "3" + if [[ ${ovn_db_host} == "" ]]; then + echo "failed to retrieve the IP address of the host $(hostname). Exiting..." + exit 1 + fi + replicas=$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \ get statefulset -n ${ovn_kubernetes_namespace} ovnkube-db -o=jsonpath='{.spec.replicas}') if [[ ${replicas} -lt 3 || $((${replicas} % 2)) -eq 0 ]]; then @@ -16,16 +21,17 @@ verify-ovsdb-raft() { # This waits for ovnkube-db-0 POD to come up ready_to_join_cluster() { # See if ep is available ... - db=${1} - port=${2} + local db=${1} + local port=${2} init_ip="$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \ get pod -n ${ovn_kubernetes_namespace} ovnkube-db-0 -o=jsonpath='{.status.podIP}')" if [[ $? != 0 ]]; then return 1 fi - target=$(ovn-${db}ctl --db=tcp:${init_ip}:${port} --data=bare --no-headings --columns=target list connection 2>/dev/null) - if [[ "${target}" != "ptcp:${port}" ]]; then + target=$(ovn-${db}ctl --db=${transport}:${init_ip}:${port} ${ovndb_ctl_ssl_opts} --data=bare --no-headings \ + --columns=target list connection 2>/dev/null) + if [[ "${target}" != "p${transport}:${port}" ]]; then return 1 fi return 0 @@ -37,7 +43,7 @@ check_ovnkube_db_ep() { # TODO: Right now only checks for NB ovsdb instances echo "======= checking ${dbaddr}:${dbport} OVSDB instance ===============" - ovsdb-client list-dbs tcp:${dbaddr}:${dbport} >/dev/null 2>&1 + ovsdb-client ${ovndb_ctl_ssl_opts} list-dbs ${transport}:${dbaddr}:${dbport} >/dev/null 2>&1 if [[ $? != 0 ]]; then return 1 fi @@ -85,7 +91,8 @@ check_and_apply_ovnkube_db_ep() { # election timer can only be at most doubled each time, and it can only be set on the leader set_election_timer() { - local election_timer=${1} + local db=${1} + local election_timer=${2} local current_election_timer echo "setting election timer for ${database} to ${election_timer} ms" @@ -127,7 +134,8 @@ set_election_timer() { # the pod is a part of mutli-pods cluster and may not be a leader and the connection information should # have already been set, so we don't care. set_connection() { - local port=${1} + local db=${1} + local port=${2} local target local output @@ -137,8 +145,8 @@ set_connection() { # this instance is a leader, check if we need to make any changes echo "found the current value of target and inactivity probe to be ${output}" target=$(echo "${output}" | awk 'ORS=","') - if [[ "${target}" != "ptcp:${port},0," ]]; then - ovn-${db}ctl --inactivity-probe=0 set-connection ptcp:${port} + if [[ "${target}" != "p${transport}:${port},0," ]]; then + ovn-${db}ctl --inactivity-probe=0 set-connection p${transport}:${port} if [[ $? != 0 ]]; then echo "Failed to set connection and disable inactivity probe. Exiting...." exit 12 @@ -186,30 +194,43 @@ ovsdb-raft() { ovn_db_pidfile=${OVN_RUNDIR}/ovn${db}_db.pid eval ovn_log_db=\$ovn_log_${db} ovn_db_file=${OVN_ETCDIR}/ovn${db}_db.db - database="OVN_Northbound" - if [[ ${db} == "sb" ]]; then - database="OVN_Southbound" - fi rm -f ${ovn_db_pidfile} verify-ovsdb-raft - if [[ ${ovn_db_host} == "" ]] ; then - echo "failed to retrieve the IP address of the host $(hostname). Exiting..." - exit 1 - fi - iptables-rules ${raft_port} - echo "=============== run ${db}-ovsdb-raft pod ${POD_NAME} ==========" if [[ ! -e ${ovn_db_file} ]] || ovsdb-tool db-is-standalone ${ovn_db_file}; then initialize="true" fi + + local db_ssl_opts="" + if [[ ${db} == "nb" ]]; then + database="OVN_Northbound" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + db_ssl_opts=" + --ovn-nb-db-ssl-key=${ovn_nb_pk} + --ovn-nb-db-ssl-cert=${ovn_nb_cert} + --ovn-nb-db-ssl-ca-cert=${ovn_ca_cert} + " + } + else + database="OVN_Southbound" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + db_ssl_opts=" + --ovn-sb-db-ssl-key=${ovn_sb_pk} + --ovn-sb-db-ssl-cert=${ovn_sb_cert} + --ovn-sb-db-ssl-ca-cert=${ovn_ca_cert} + " + } + fi if [[ "${POD_NAME}" == "ovnkube-db-0" ]]; then run_as_ovs_user_if_needed \ ${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \ --db-${db}-cluster-local-addr=${ovn_db_host} \ --db-${db}-cluster-local-port=${raft_port} \ + --db-${db}-cluster-local-proto=${transport} \ + ${db_ssl_opts} \ --ovn-${db}-log="${ovn_log_db}" & else # join the remote cluster node if the DB is not created @@ -220,6 +241,8 @@ ovsdb-raft() { ${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \ --db-${db}-cluster-local-addr=${ovn_db_host} --db-${db}-cluster-remote-addr=${init_ip} \ --db-${db}-cluster-local-port=${raft_port} --db-${db}-cluster-remote-port=${raft_port} \ + --db-${db}-cluster-local-proto=${transport} --db-${db}-cluster-remote-proto=${transport} \ + ${db_ssl_opts} \ --ovn-${db}-log="${ovn_log_db}" & fi @@ -238,12 +261,12 @@ ovsdb-raft() { # set the election timer value before other servers join the cluster and it can # only be set on the leader so we must do this in ovnkube-db-0 when it is still # a single-node cluster - set_election_timer ${election_timer} + set_election_timer ${db} ${election_timer} if [[ ${db} == "nb" ]]; then set_northd_probe_interval fi # set the connection and disable inactivity probe, this deletes the old connection if any - set_connection ${port} + set_connection ${db} ${port} fi last_node_index=$(expr ${replicas} - 1) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 4fa6c381f7..f719c0c01c 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -61,6 +61,7 @@ fi # OVN_SB_RAFT_PORT - ovn south db raft port (default 6644) # OVN_NB_RAFT_ELECTION_TIMER - ovn north db election timer in ms (default 1000) # OVN_SB_RAFT_ELECTION_TIMER - ovn south db election timer in ms (default 1000) +# OVN_SSL_ENABLE - use SSL transport to NB/SB db and northd (default: no) # The argument to the command is the operation to be performed # ovn-master ovn-controller ovn-node display display_env ovn_debug @@ -100,8 +101,26 @@ else k8s_token=${K8S_TOKEN} fi +# certs and private keys for k8s and OVN K8S_CACERT=${K8S_CACERT:-/var/run/secrets/kubernetes.io/serviceaccount/ca.crt} +ovn_ca_cert=/ovn-cert/ca-cert.pem +ovn_nb_pk=/ovn-cert/ovnnb-privkey.pem +ovn_nb_cert=/ovn-cert/ovnnb-cert.pem +ovn_sb_pk=/ovn-cert/ovnsb-privkey.pem +ovn_sb_cert=/ovn-cert/ovnsb-cert.pem +ovn_northd_pk=/ovn-cert/ovnnorthd-privkey.pem +ovn_northd_cert=/ovn-cert/ovnnorthd-cert.pem +ovn_controller_pk=/ovn-cert/ovncontroller-privkey.pem +ovn_controller_cert=/ovn-cert/ovncontroller-cert.pem + +transport="tcp" +ovndb_ctl_ssl_opts="" +if [[ "yes" == ${OVN_SSL_ENABLE} ]]; then + transport="ssl" + ovndb_ctl_ssl_opts="-p ${ovn_controller_pk} -c ${ovn_controller_cert} -C ${ovn_ca_cert}" +fi + # ovn-northd - /etc/sysconfig/ovn-northd ovn_northd_opts=${OVN_NORTHD_OPTS:-""} @@ -222,7 +241,6 @@ wait_for_event() { break fi done - } # OVN DBs must be up and initialized before ovn-master and ovn-node PODs can come up @@ -239,7 +257,7 @@ ready_to_start_node() { # cannot use ovsdb-client in the case of raft, since it will succeed even if one of the # instance of DB is up and running. HOwever, ovn-nbctl always connects to the leader in the clustered # database, so use it. - ovn-nbctl --db=${ovn_nbdb_conn} list NB_Global >/dev/null 2>&1 + ovn-nbctl --db=${ovn_nbdb_conn} ${ovndb_ctl_ssl_opts} list NB_Global >/dev/null 2>&1 if [[ $? != 0 ]]; then return 1 fi @@ -260,8 +278,6 @@ check_ovn_daemonset_version() { } get_ovn_db_vars() { - # OVN_NORTH and OVN_SOUTH override derived host - # Currently limited to tcp (ssl is not supported yet) ovn_nbdb_str="" ovn_sbdb_str="" for i in ${!ovn_db_hosts[@]}; do @@ -269,9 +285,10 @@ get_ovn_db_vars() { ovn_nbdb_str=${ovn_nbdb_str}"," ovn_sbdb_str=${ovn_sbdb_str}"," fi - ovn_nbdb_str=${ovn_nbdb_str}tcp:${ovn_db_hosts[${i}]}:${ovn_nb_port} - ovn_sbdb_str=${ovn_sbdb_str}tcp:${ovn_db_hosts[${i}]}:${ovn_sb_port} + ovn_nbdb_str=${ovn_nbdb_str}${transport}://${ovn_db_hosts[${i}]}:${ovn_nb_port} + ovn_sbdb_str=${ovn_sbdb_str}${transport}://${ovn_db_hosts[${i}]}:${ovn_sb_port} done + # OVN_NORTH and OVN_SOUTH override derived host ovn_nbdb=${OVN_NORTH:-$ovn_nbdb_str} ovn_sbdb=${OVN_SOUTH:-$ovn_sbdb_str} @@ -618,7 +635,11 @@ nb-ovsdb() { # setting northd probe interval set_northd_probe_interval - ovn-nbctl set-connection ptcp:${ovn_nb_port}:${ovn_db_host} -- set connection . inactivity_probe=0 + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn-nbctl set-ssl ${ovn_nb_pk} ${ovn_nb_cert} ${ovn_ca_cert} + echo "=============== nb-ovsdb ========== reconfigured for SSL" + } + ovn-nbctl --inactivity-probe=0 set-connection p${transport}:${ovn_nb_port}:${ovn_db_host} tail --follow=name ${OVN_LOGDIR}/ovsdb-server-nb.log & ovn_tail_pid=$! @@ -647,7 +668,11 @@ sb-ovsdb() { wait_for_event attempts=3 process_ready ovnsb_db echo "=============== sb-ovsdb ========== RUNNING" - ovn-sbctl set-connection ptcp:${ovn_sb_port}:${ovn_db_host} -- set connection . inactivity_probe=0 + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn-sbctl set-ssl ${ovn_sb_pk} ${ovn_sb_cert} ${ovn_ca_cert} + echo "=============== sb-ovsdb ========== reconfigured for SSL" + } + ovn-sbctl --inactivity-probe=0 set-connection p${transport}:${ovn_sb_port}:${ovn_db_host} # create the ovnkube_db endpoint for other pods to query the OVN DB IP set_ovnkube_db_ep ${ovn_db_host} @@ -675,10 +700,20 @@ run-ovn-northd() { # no monitor (and no detach), start northd which connects to the # ovnkube-db service + local ovn_northd_ssl_opts="" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn_northd_ssl_opts=" + --ovn-northd-ssl-key=${ovn_northd_pk} + --ovn-northd-ssl-cert=${ovn_northd_cert} + --ovn-northd-ssl-ca-cert=${ovn_ca_cert} + " + } + run_as_ovs_user_if_needed \ ${OVNCTL_PATH} start_northd \ --no-monitor --ovn-manage-ovsdb=no \ --ovn-northd-nb-db=${ovn_nbdb_conn} --ovn-northd-sb-db=${ovn_sbdb_conn} \ + ${ovn_northd_ssl_opts} \ --ovn-northd-log="${ovn_loglevel_northd}" \ ${ovn_northd_opts} @@ -728,6 +763,17 @@ ovn-master() { hybrid_overlay_flags="${hybrid_overlay_flags} --hybrid-overlay-cluster-subnets=${ovn_hybrid_overlay_net_cidr}" fi fi + local ovn_master_ssl_opts="" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn_master_ssl_opts=" + --nb-client-privkey ${ovn_controller_pk} + --nb-client-cert ${ovn_controller_cert} + --nb-client-cacert ${ovn_ca_cert} + --sb-client-privkey ${ovn_controller_pk} + --sb-client-cert ${ovn_controller_cert} + --sb-client-cacert ${ovn_ca_cert} + " + } echo "=============== ovn-master ========== MASTER ONLY" /usr/bin/ovnkube \ @@ -739,6 +785,7 @@ ovn-master() { ${hybrid_overlay_flags} \ --pidfile ${OVN_RUNDIR}/ovnkube-master.pid \ --logfile /var/log/ovn-kubernetes/ovnkube-master.log \ + ${ovn_master_ssl_opts} \ --metrics-bind-address "0.0.0.0:9409" & echo "=============== ovn-master ========== running" wait_for_event attempts=3 process_ready ovnkube-master @@ -769,8 +816,17 @@ ovn-controller() { rm -f /var/run/ovn-kubernetes/cni/* rm -f ${OVN_RUNDIR}/ovn-controller.*.ctl + local ovn_controller_ssl_opts="" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn_controller_ssl_opts=" + --ovn-controller-ssl-key=${ovn_controller_pk} + --ovn-controller-ssl-cert=${ovn_controller_cert} + --ovn-controller-ssl-ca-cert=${ovn_ca_cert} + " + } run_as_ovs_user_if_needed \ ${OVNCTL_PATH} --no-monitor start_controller \ + ${ovn_controller_ssl_opts} \ --ovn-controller-log="${ovn_loglevel_controller}" \ ${ovn_controller_opts} @@ -818,6 +874,18 @@ ovn-node() { OVN_ENCAP_IP=$(echo --encap-ip=${ovn_encap_ip}) fi + local ovn_node_ssl_opts="" + [[ "yes" == ${OVN_SSL_ENABLE} ]] && { + ovn_node_ssl_opts=" + --nb-client-privkey ${ovn_controller_pk} + --nb-client-cert ${ovn_controller_cert} + --nb-client-cacert ${ovn_ca_cert} + --sb-client-privkey ${ovn_controller_pk} + --sb-client-cert ${ovn_controller_cert} + --sb-client-cacert ${ovn_ca_cert} + " + } + echo "=============== ovn-node --init-node" /usr/bin/ovnkube --init-node ${K8S_NODE} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ @@ -830,6 +898,7 @@ ovn-node() { --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ --pidfile ${OVN_RUNDIR}/ovnkube.pid \ --logfile /var/log/ovn-kubernetes/ovnkube.log \ + ${ovn_node_ssl_opts} \ --metrics-bind-address "0.0.0.0:9410" & wait_for_event attempts=3 process_ready ovnkube @@ -878,8 +947,8 @@ run-nbctld() { echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_conn ${ovn_nbdb_conn}" echo "ovn_loglevel_nbctld=${ovn_loglevel_nbctld}" - # use unix socket - /usr/bin/ovn-nbctl ${ovn_loglevel_nbctld} --pidfile --db=${ovn_nbdb_conn} --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach + /usr/bin/ovn-nbctl ${ovn_loglevel_nbctld} --pidfile --db=${ovn_nbdb_conn} \ + --log-file=${OVN_LOGDIR}/ovn-nbctl.log --detach ${ovndb_ctl_ssl_opts} wait_for_event attempts=3 process_ready ovn-nbctl echo "=============== run_ovn_nbctl ========== RUNNING" diff --git a/dist/templates/ovnkube-db-raft.yaml.j2 b/dist/templates/ovnkube-db-raft.yaml.j2 index 001997f25c..76424836e7 100644 --- a/dist/templates/ovnkube-db-raft.yaml.j2 +++ b/dist/templates/ovnkube-db-raft.yaml.j2 @@ -128,6 +128,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -155,6 +158,8 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" # end of container # sb-ovsdb - v3 @@ -192,6 +197,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -219,6 +227,8 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" # end of container # db-metrics-exporter - v3 @@ -245,6 +255,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -262,6 +275,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" # end of container volumes: @@ -274,5 +289,9 @@ spec: - name: host-var-run-ovs hostPath: path: /var/run/openvswitch + - name: host-ovn-cert + hostPath: + path: /etc/ovn + type: DirectoryOrCreate tolerations: - operator: "Exists" diff --git a/dist/templates/ovnkube-db.yaml.j2 b/dist/templates/ovnkube-db.yaml.j2 index b12b0dca61..f5cfe715bd 100644 --- a/dist/templates/ovnkube-db.yaml.j2 +++ b/dist/templates/ovnkube-db.yaml.j2 @@ -93,6 +93,9 @@ spec: - mountPath: /host name: host-slash readOnly: true + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -116,6 +119,8 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db"] @@ -153,6 +158,9 @@ spec: - mountPath: /host name: host-slash readOnly: true + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -176,6 +184,8 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db"] @@ -198,5 +208,9 @@ spec: - name: host-slash hostPath: path: / + - name: host-ovn-cert + hostPath: + path: /etc/ovn + type: DirectoryOrCreate tolerations: - operator: "Exists" diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index 5873e5196d..cd793f4561 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -90,6 +90,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -109,6 +112,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" readinessProbe: exec: command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-northd"] @@ -136,7 +141,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs - + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: cpu: 100m @@ -151,6 +158,8 @@ spec: configMapKeyRef: name: ovn-config key: k8s_apiserver + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" readinessProbe: exec: @@ -181,6 +190,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -218,6 +230,8 @@ spec: value: "{{ ovn_hybrid_overlay_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR value: "{{ ovn_hybrid_overlay_net_cidr }}" + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" # end of container volumes: @@ -234,5 +248,9 @@ spec: - name: host-var-run-ovs hostPath: path: /var/run/openvswitch + - name: host-ovn-cert + hostPath: + path: /etc/ovn + type: DirectoryOrCreate tolerations: - operator: "Exists" diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 891259af96..713e426df6 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -121,6 +121,9 @@ spec: name: host-var-run-ovs - mountPath: /var/run/ovn/ name: host-var-run-ovs + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true resources: requests: @@ -140,6 +143,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" readinessProbe: exec: @@ -185,6 +190,9 @@ spec: name: host-opt-cni-bin - mountPath: /etc/cni/net.d name: host-etc-cni-netd + - mountPath: /ovn-cert + name: host-ovn-cert + readOnly: true {% if kind is defined and kind -%} - mountPath: /var/run/netns name: host-netns @@ -232,6 +240,8 @@ spec: value: "{{ ovn_hybrid_overlay_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR value: "{{ ovn_hybrid_overlay_net_cidr }}" + - name: OVN_SSL_ENABLE + value: "{{ ovn_ssl_en }}" lifecycle: preStop: @@ -278,6 +288,10 @@ spec: - name: host-etc-cni-netd hostPath: path: /etc/cni/net.d + - name: host-ovn-cert + hostPath: + path: /etc/ovn + type: DirectoryOrCreate - name: host-slash hostPath: path: / From 903f2abcb8376e9e02a0d39b395483c6a5d16515 Mon Sep 17 00:00:00 2001 From: Pardhakeswar Pacha Date: Thu, 9 Apr 2020 00:53:43 -0400 Subject: [PATCH 26/27] Adding gatewayRouter name in error messages. error messages in GatewayInit() and GatewayCleanup() functions doesn't have the gatewayRouter name that makes it diffuclt to know for which k8s node the functionality has failed. Signed-off-by: Pardhakeswar Pacha --- go-controller/pkg/util/gateway-cleanup.go | 24 +++++----- go-controller/pkg/util/gateway-init.go | 56 ++++++++++++----------- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/go-controller/pkg/util/gateway-cleanup.go b/go-controller/pkg/util/gateway-cleanup.go index 4736bb6651..55085b5c4a 100644 --- a/go-controller/pkg/util/gateway-cleanup.go +++ b/go-controller/pkg/util/gateway-cleanup.go @@ -20,8 +20,8 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { routerIPNetwork, stderr, err := RunOVNNbctl("--if-exist", "get", "logical_router_port", "rtoj-"+gatewayRouter, "networks") if err != nil { - return fmt.Errorf("Failed to get logical router port, stderr: %q, "+ - "error: %v", stderr, err) + return fmt.Errorf("failed to get logical router port for gateway router %s, "+ + "stderr: %q, error: %v", gatewayRouter, stderr, err) } routerIPNetwork = strings.Trim(routerIPNetwork, "[]\"") @@ -45,7 +45,7 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { // Remove the join switch that connects ovn_cluster_router to gateway router _, stderr, err = RunOVNNbctl("--if-exist", "ls-del", JoinSwitchPrefix+nodeName) if err != nil { - return fmt.Errorf("Failed to delete the join logical switch %s, "+ + return fmt.Errorf("failed to delete the join logical switch %s, "+ "stderr: %q, error: %v", JoinSwitchPrefix+nodeName, stderr, err) } @@ -53,7 +53,7 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { _, stderr, err = RunOVNNbctl("--if-exist", "lr-del", gatewayRouter) if err != nil { - return fmt.Errorf("Failed to delete gateway router %s, stderr: %q, "+ + return fmt.Errorf("failed to delete gateway router %s, stderr: %q, "+ "error: %v", gatewayRouter, stderr, err) } @@ -62,14 +62,14 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { _, stderr, err = RunOVNNbctl("--if-exist", "ls-del", externalSwitch) if err != nil { - return fmt.Errorf("Failed to delete external switch %s, stderr: %q, "+ + return fmt.Errorf("failed to delete external switch %s, stderr: %q, "+ "error: %v", externalSwitch, stderr, err) } // Remove the patch port on the distributed router that connects to join switch _, stderr, err = RunOVNNbctl("--if-exist", "lrp-del", "dtoj-"+nodeName) if err != nil { - return fmt.Errorf("Failed to delete the patch port dtoj-%s on distributed router "+ + return fmt.Errorf("failed to delete the patch port dtoj-%s on distributed router "+ "stderr: %q, error: %v", nodeName, stderr, err) } @@ -81,22 +81,22 @@ func GatewayCleanup(nodeName string, nodeSubnet *net.IPNet) error { if k8sNSLbTCP != "" { _, stderr, err = RunOVNNbctl("lb-del", k8sNSLbTCP) if err != nil { - return fmt.Errorf("failed to delete Gateway router TCP load balancer %s, stderr: %q, "+ - "error: %v", k8sNSLbTCP, stderr, err) + return fmt.Errorf("failed to delete Gateway router %s's TCP load balancer %s, stderr: %q, "+ + "error: %v", gatewayRouter, k8sNSLbTCP, stderr, err) } } if k8sNSLbUDP != "" { _, stderr, err = RunOVNNbctl("lb-del", k8sNSLbUDP) if err != nil { - return fmt.Errorf("failed to delete Gateway router UDP load balancer %s, stderr: %q, "+ - "error: %v", k8sNSLbUDP, stderr, err) + return fmt.Errorf("failed to delete Gateway router %s's UDP load balancer %s, stderr: %q, "+ + "error: %v", gatewayRouter, k8sNSLbUDP, stderr, err) } } if k8sNSLbSCTP != "" { _, stderr, err = RunOVNNbctl("lb-del", k8sNSLbSCTP) if err != nil { - return fmt.Errorf("failed to delete Gateway router SCTP load balancer %s, stderr: %q, "+ - "error: %v", k8sNSLbSCTP, stderr, err) + return fmt.Errorf("failed to delete Gateway router %s's SCTP load balancer %s, stderr: %q, "+ + "error: %v", gatewayRouter, k8sNSLbSCTP, stderr, err) } } return nil diff --git a/go-controller/pkg/util/gateway-init.go b/go-controller/pkg/util/gateway-init.go index 04a08423b3..2a0754a7b5 100644 --- a/go-controller/pkg/util/gateway-init.go +++ b/go-controller/pkg/util/gateway-init.go @@ -118,7 +118,7 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "options:chassis="+l3GatewayConfig.ChassisID, "external_ids:physical_ip="+l3GatewayConfig.IPAddress.IP.String()) if err != nil { - return fmt.Errorf("Failed to create logical router %v, stdout: %q, "+ + return fmt.Errorf("failed to create logical router %v, stdout: %q, "+ "stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } @@ -132,7 +132,7 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet // create the per-node join switch stdout, stderr, err = RunOVNNbctl("--", "--may-exist", "ls-add", joinSwitch) if err != nil { - return fmt.Errorf("Failed to create logical switch %q, stdout: %q, stderr: %q, error: %v", + return fmt.Errorf("failed to create logical switch %q, stdout: %q, stderr: %q, error: %v", joinSwitch, stdout, stderr, err) } @@ -151,7 +151,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "--", "--may-exist", "lrp-add", gatewayRouter, gwRouterPort, gwLRPMAC.String(), fmt.Sprintf("%s/%d", gwLRPIp.String(), prefixLen)) if err != nil { - return fmt.Errorf("Failed to add logical router port %q, stderr: %q, error: %v", gwRouterPort, stderr, err) + return fmt.Errorf("failed to add logical router port %q for gateway router %s, "+ + "stderr: %q, error: %v", gwRouterPort, gatewayRouter, stderr, err) } // jtod/dtoj - patch ports that connect the per-node join switch to distributed router @@ -172,7 +173,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "--", "--may-exist", "lrp-add", k8sClusterRouter, drRouterPort, drLRPMAC.String(), fmt.Sprintf("%s/%d", drLRPIp.String(), prefixLen)) if err != nil { - return fmt.Errorf("Failed to add logical router port %q, stderr: %q, error: %v", drRouterPort, stderr, err) + return fmt.Errorf("failed to add logical router port %q to %s, "+ + "stderr: %q, error: %v", drRouterPort, k8sClusterRouter, stderr, err) } // When there are multiple gateway routers (which would be the likely @@ -182,8 +184,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl("set", "logical_router", gatewayRouter, "options:lb_force_snat_ip="+gwLRPIp.String()) if err != nil { - return fmt.Errorf("Failed to set logical router, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to set logical router %s's lb_force_snat_ip option, "+ + "stdout: %q, stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } for _, entry := range clusterIPSubnet { @@ -191,9 +193,9 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl("--may-exist", "lr-route-add", gatewayRouter, entry.String(), drLRPIp.String()) if err != nil { - return fmt.Errorf("Failed to add a static route in GR with distributed "+ + return fmt.Errorf("failed to add a static route in GR %s with distributed "+ "router as the nexthop, stdout: %q, stderr: %q, error: %v", - stdout, stderr, err) + gatewayRouter, stdout, stderr, err) } } @@ -221,8 +223,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet fmt.Sprintf("external_ids:%s_lb_gateway_router=%s", proto, gatewayRouter), fmt.Sprintf("protocol=%s", strings.ToLower(string(proto)))) if err != nil { - return fmt.Errorf("failed to create load balancer for protocol %s: "+ - "stderr: %q, error: %v", proto, stderr, err) + return fmt.Errorf("failed to create load balancer for gateway router %s for protocol %s: "+ + "stderr: %q, error: %v", gatewayRouter, proto, stderr, err) } } } @@ -234,8 +236,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl("set", "logical_router", gatewayRouter, "load_balancer="+lbString) if err != nil { return fmt.Errorf("failed to set north-south load-balancers to the "+ - "gateway router, stdout: %q, stderr: %q, error: %v", - stdout, stderr, err) + "gateway router %s, stdout: %q, stderr: %q, error: %v", + gatewayRouter, stdout, stderr, err) } } @@ -244,8 +246,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl("--may-exist", "ls-add", externalSwitch) if err != nil { - return fmt.Errorf("failed to create logical switch, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to create logical switch %s, stdout: %q, "+ + "stderr: %q, error: %v", externalSwitch, stdout, stderr, err) } // Add external interface as a logical port to external_switch. @@ -267,8 +269,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl(cmdArgs...) if err != nil { - return fmt.Errorf("Failed to add logical port to switch, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to add logical port to switch %s, stdout: %q, "+ + "stderr: %q, error: %v", externalSwitch, stdout, stderr, err) } // Connect GR to external_switch with mac address of external interface @@ -283,8 +285,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "--", "set", "logical_router_port", "rtoe-"+gatewayRouter, "external-ids:gateway-physical-ip=yes") if err != nil { - return fmt.Errorf("Failed to add logical port to router, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to add logical port to router %s, stdout: %q, "+ + "stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } // Connect the external_switch to the router. @@ -294,8 +296,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "options:router-port=rtoe-"+gatewayRouter, "addresses="+"\""+l3GatewayConfig.MACAddress.String()+"\"") if err != nil { - return fmt.Errorf("Failed to add logical port to router, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to add logical port to router %s, stdout: %q, "+ + "stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } // Add a static route in GR with physical gateway as the default next hop. @@ -309,9 +311,9 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet gatewayRouter, allIPs, l3GatewayConfig.NextHop.String(), fmt.Sprintf("rtoe-%s", gatewayRouter)) if err != nil { - return fmt.Errorf("Failed to add a static route in GR with physical "+ + return fmt.Errorf("failed to add a static route in GR %s with physical "+ "gateway as the default next hop, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + "stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } // Add source IP address based routes in distributed router @@ -320,9 +322,9 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet "--policy=src-ip", "lr-route-add", k8sClusterRouter, hostSubnet.String(), gwLRPIp.String()) if err != nil { - return fmt.Errorf("Failed to add source IP address based "+ - "routes in distributed router, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to add source IP address based "+ + "routes in distributed router %s, stdout: %q, "+ + "stderr: %q, error: %v", k8sClusterRouter, stdout, stderr, err) } // Default SNAT rules. @@ -330,8 +332,8 @@ func GatewayInit(clusterIPSubnet []*net.IPNet, hostSubnet *net.IPNet, joinSubnet stdout, stderr, err = RunOVNNbctl("--may-exist", "lr-nat-add", gatewayRouter, "snat", l3GatewayConfig.IPAddress.IP.String(), entry.String()) if err != nil { - return fmt.Errorf("Failed to create default SNAT rules, stdout: %q, "+ - "stderr: %q, error: %v", stdout, stderr, err) + return fmt.Errorf("failed to create default SNAT rules for gateway router %s, "+ + "stdout: %q, stderr: %q, error: %v", gatewayRouter, stdout, stderr, err) } } From bde64f938bc300bac003c27a435f83adbf3b7f7e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 13 Apr 2020 10:15:08 -0500 Subject: [PATCH 27/27] Fixup for dual-stack changes --- go-controller/pkg/cni/helper_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-controller/pkg/cni/helper_linux.go b/go-controller/pkg/cni/helper_linux.go index fabd06aae0..1df3b8b80d 100644 --- a/go-controller/pkg/cni/helper_linux.go +++ b/go-controller/pkg/cni/helper_linux.go @@ -335,7 +335,7 @@ func (pr *PodRequest) ConfigureInterface(namespace string, podName string, ifInf if err != nil { return false, nil } - return strings.Contains(stdout, ifInfo.IP.IP.String()), nil + return strings.Contains(stdout, ifInfo.IPs[0].IP.String()), nil }) if err != nil { return nil, fmt.Errorf("timed out dumping br-int flow entries for sandbox: %v", err)