Skip to content

Commit

Permalink
E2E test for token coverage (k3s-io#8184)
Browse files Browse the repository at this point in the history
Signed-off-by: Derek Nola <derek.nola@suse.com>
  • Loading branch information
dereknola committed Sep 5, 2023
1 parent a4c7a50 commit 23a5e37
Show file tree
Hide file tree
Showing 2 changed files with 251 additions and 0 deletions.
82 changes: 82 additions & 0 deletions tests/e2e/token/Vagrantfile
@@ -0,0 +1,82 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "agent-0", "agent-1" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# This key must be created using tailscale web
NETWORK_PREFIX = "10.10.10"
install_type = ""

def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)
addCoverageDir(vm, roles, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"

if roles.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
cluster-init: true
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
node-external-ip: #{node_ip}
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", "INSTALL_K3S_SKIP_START=true", install_type]
end
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt", "vagrant-scp"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end
169 changes: 169 additions & 0 deletions tests/e2e/token/token_test.go
@@ -0,0 +1,169 @@
package snapshotrestore

import (
"flag"
"fmt"
"os"
"strings"
"testing"
"time"

"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Valid nodeOS:
// generic/ubuntu2004, generic/centos7, generic/rocky8, opensuse/Leap-15.4.x86_64

var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")

// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.27.1+k3s2 (default: latest commit from master)

func Test_E2EToken(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "SnapshotRestore Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)

var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Use the token CLI to create and join agents", Ordered, func() {
Context("Agent joins with permanent token:", func() {
It("Starts up with no issues", func() {
var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks Node and Pod Status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)

fmt.Printf("\nFetching Pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})

var permToken string
It("Creates a permanent agent token", func() {
permToken = "perage.s0xt4u0hl5guoyi6"
_, err := e2e.RunCmdOnNode("k3s token create --ttl=0 "+permToken, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())

res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(MatchRegexp(`perage\s+<forever>\s+<never>`))
})
It("Joins an agent with the permanent token", func() {
cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", permToken)
_, err := e2e.RunCmdOnNode(cmd, agentNodeNames[0])
Expect(err).NotTo(HaveOccurred())
_, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[0])
Expect(err).NotTo(HaveOccurred())

Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).Should(Equal(2))
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "60s", "5s").Should(Succeed())
})
})
Context("Agent joins with temporary token:", func() {
It("Creates a 20s agent token", func() {
_, err := e2e.RunCmdOnNode("k3s token create --ttl=20s 20sect.jxnpve6vg8dqm895", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(MatchRegexp(`20sect\s+[0-9]{2}s`))
})
It("Cleans up 20s token automatically", func() {
Eventually(func() (string, error) {
return e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
}, "20s", "5s").ShouldNot(ContainSubstring("20sect"))
})
var tempToken string
It("Creates a 10m agent token", func() {
tempToken = "10mint.ida18trbbk43szwk"
_, err := e2e.RunCmdOnNode("k3s token create --ttl=10m "+tempToken, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
time.Sleep(2 * time.Second)
res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(MatchRegexp(`10mint\s+[0-9]m`))
})
It("Joins an agent with the 10m token", func() {
cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", tempToken)
_, err := e2e.RunCmdOnNode(cmd, agentNodeNames[1])
Expect(err).NotTo(HaveOccurred())
_, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[1])
Expect(err).NotTo(HaveOccurred())

Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).Should(Equal(3))
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "60s", "5s").Should(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

0 comments on commit 23a5e37

Please sign in to comment.