Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Release 1.26] Add e2e test about dnscache #5228

Merged
merged 1 commit into from Jan 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
99 changes: 99 additions & 0 deletions tests/e2e/dnscache/Vagrantfile
@@ -0,0 +1,99 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || ["server-0", "agent-0" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
manuelbuil marked this conversation as resolved.
Show resolved Hide resolved
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i
CNI = (ENV['E2E_CNI'] || "canal") # canal, cilium and calico supported
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "fd11:decf:c0ff:ee"
install_type = ""

def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
node_ip6_gw = "#{NETWORK6_PREFIX}::1"
# Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface
vm.network "private_network",
:ip => node_ip4,
:netmask => "255.255.255.0",
:libvirt__dhcp_enabled => false,
:libvirt__forward_mode => "none",
:libvirt__guest_ipv6 => "yes",
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box]

install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -4 -c 2 rke2.io"

if roles.include?("server")
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
token: vagrant-rke2
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:43:0::/112
bind-address: #{NETWORK4_PREFIX}.100
cni: #{CNI}
kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695
YAML
end
end
if roles.include?("agent")
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.install_path = false
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:9345
token: vagrant-rke2
cni: #{CNI}
kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695
YAML
end
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload", "vagrant-libvirt"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end
121 changes: 121 additions & 0 deletions tests/e2e/dnscache/dnscache_test.go
@@ -0,0 +1,121 @@
package dnscache

import (
"flag"
"fmt"

"os"
"strings"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/rke2/tests/e2e"
)

// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")

// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+rke2r1 or nil for latest commit from master
manuelbuil marked this conversation as resolved.
Show resolved Hide resolved

func Test_E2Ednscache(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Validate dnscache Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Verify dnscache Configuration", Ordered, func() {

It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Verifies that each node has IPv4 and IPv6", func() {
for _, node := range serverNodeNames {
cmd := fmt.Sprintf("kubectl get node %s -o jsonpath='{.status.addresses}' --kubeconfig=%s | jq '.[] | select(.type == \"ExternalIP\") | .address'",
node, kubeConfigFile)
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), res)
Expect(res).Should(ContainSubstring("10.10.10"))
Expect(res).Should(ContainSubstring("fd11:decf:c0ff"))
}
})

It("Verifies nodecache daemonset comes up", func() {
_, err := e2e.DeployWorkload("nodecache.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get ds node-local-dns -n kube-system -o jsonpath='{.status.numberReady}' --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("2"))
})

It("Verifies nodecache is working", func() {
cmd := "dig +retries=0 @169.254.20.10 www.kubernetes.io"
for _, nodeName := range serverNodeNames {
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("status: NOERROR"), "failed cmd: "+cmd)
}
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
10 changes: 10 additions & 0 deletions tests/e2e/resource_files/nodecache.yaml
@@ -0,0 +1,10 @@
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-coredns
namespace: kube-system
spec:
valuesContent: |-
nodelocal:
enabled: true