Skip to content

Commit

Permalink
use server/agent instead of master/worker
Browse files Browse the repository at this point in the history
  • Loading branch information
iwilltry42 committed Jul 14, 2020
1 parent 26cd8bb commit ec3f10e
Show file tree
Hide file tree
Showing 28 changed files with 286 additions and 286 deletions.
72 changes: 36 additions & 36 deletions cmd/cluster/clusterCreate.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ import (
const clusterCreateDescription = `
Create a new k3s cluster with containerized nodes (k3s in docker).
Every cluster will consist of one or more containers:
- 1 (or more) master node container (k3s)
- 1 (or more) server node container (k3s)
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
- (optionally) 1 (or more) worker node containers (k3s)
- (optionally) 1 (or more) agent node containers (k3s)
`

// NewCmdClusterCreate returns a new cobra command
Expand All @@ -71,8 +71,8 @@ func NewCmdClusterCreate() *cobra.Command {

// create cluster
if updateDefaultKubeconfig || updateCurrentContext {
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-master")
cluster.CreateClusterOpts.WaitForMaster = true
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
cluster.CreateClusterOpts.WaitForServer = true
}
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
// rollback if creation failed
Expand Down Expand Up @@ -111,28 +111,28 @@ func NewCmdClusterCreate() *cobra.Command {
/*********
* Flags *
*********/
cmd.Flags().StringP("api-port", "a", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
cmd.Flags().String("network", "", "Join an existing network")
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@worker[0,1] -v /tmp/test:/tmp/other@master[0]`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", true, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")

/* Image Importing */
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")

/* Multi Master Configuration */
/* Multi Server Configuration */

// multi-master - datastore
// TODO: implement multi-master setups with external data store
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi master clusters)")
// multi-server - datastore
// TODO: implement multi-server setups with external data store
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
/*
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
Expand All @@ -143,8 +143,8 @@ func NewCmdClusterCreate() *cobra.Command {
*/

/* k3s */
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on master nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on worker nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")

/* Subcommands */

Expand Down Expand Up @@ -181,14 +181,14 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
image = version.GetK3sVersion(true)
}

// --masters
masterCount, err := cmd.Flags().GetInt("masters")
// --servers
serverCount, err := cmd.Flags().GetInt("servers")
if err != nil {
log.Fatalln(err)
}

// --workers
workerCount, err := cmd.Flags().GetInt("workers")
// --agents
agentCount, err := cmd.Flags().GetInt("agents")
if err != nil {
log.Fatalln(err)
}
Expand All @@ -203,7 +203,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
network.Name = networkName
network.External = true
}
if networkName == "host" && (masterCount+workerCount) > 1 {
if networkName == "host" && (serverCount+agentCount) > 1 {
log.Fatalln("Can only run a single node in hostnetwork mode")
}

Expand Down Expand Up @@ -323,31 +323,31 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
// generate list of nodes
cluster.Nodes = []*k3d.Node{}

// MasterLoadBalancer
// ServerLoadBalancer
if !createClusterOpts.DisableLoadBalancer {
cluster.MasterLoadBalancer = &k3d.Node{
cluster.ServerLoadBalancer = &k3d.Node{
Role: k3d.LoadBalancerRole,
}
}

/****************
* Master Nodes *
* Server Nodes *
****************/

for i := 0; i < masterCount; i++ {
for i := 0; i < serverCount; i++ {
node := k3d.Node{
Role: k3d.MasterRole,
Role: k3d.ServerRole,
Image: image,
Args: createClusterOpts.K3sServerArgs,
MasterOpts: k3d.MasterOpts{},
ServerOpts: k3d.ServerOpts{},
}

// TODO: by default, we don't expose an API port: should we change that?
// -> if we want to change that, simply add the exposeAPI struct here

// first master node will be init node if we have more than one master specified but no external datastore
if i == 0 && masterCount > 1 {
node.MasterOpts.IsInit = true
// first server node will be init node if we have more than one server specified but no external datastore
if i == 0 && serverCount > 1 {
node.ServerOpts.IsInit = true
cluster.InitNode = &node
}

Expand All @@ -356,12 +356,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}

/****************
* Worker Nodes *
* Agent Nodes *
****************/

for i := 0; i < workerCount; i++ {
for i := 0; i < agentCount; i++ {
node := k3d.Node{
Role: k3d.WorkerRole,
Role: k3d.AgentRole,
Image: image,
Args: createClusterOpts.K3sAgentArgs,
}
Expand All @@ -381,11 +381,11 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}

// append ports
nodeCount := masterCount + workerCount
nodeCount := serverCount + agentCount
nodeList := cluster.Nodes
if !createClusterOpts.DisableLoadBalancer {
nodeCount++
nodeList = append(nodeList, cluster.MasterLoadBalancer)
nodeList = append(nodeList, cluster.ServerLoadBalancer)
}
for portmap, filters := range portFilterMap {
if len(filters) == 0 && (nodeCount) > 1 {
Expand Down
10 changes: 5 additions & 5 deletions cmd/cluster/clusterList.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
defer tabwriter.Flush()

if !flags.noHeader {
headers := []string{"NAME", "MASTERS", "WORKERS"} // TODO: getCluster: add status column
headers := []string{"NAME", "SERVERS", "AGENTS"} // TODO: getCluster: add status column
if flags.token {
headers = append(headers, "TOKEN")
}
Expand All @@ -116,13 +116,13 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
k3cluster.SortClusters(clusters)

for _, cluster := range clusters {
masterCount := cluster.MasterCount()
workerCount := cluster.WorkerCount()
serverCount := cluster.ServerCount()
agentCount := cluster.AgentCount()

if flags.token {
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, masterCount, workerCount, cluster.Token)
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, serverCount, agentCount, cluster.Token)
} else {
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, masterCount, workerCount)
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, serverCount, agentCount)
}
}
}
2 changes: 1 addition & 1 deletion cmd/cluster/clusterStart.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func NewCmdClusterStart() *cobra.Command {

// add flags
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) (and loadbalancer) to be ready before returning.")
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", false, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")

// add subcommands
Expand Down
2 changes: 1 addition & 1 deletion cmd/node/nodeCreate.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func NewCmdNodeCreate() *cobra.Command {

// add flags
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
cmd.Flags().String("role", string(k3d.WorkerRole), "Specify node role [master, worker]")
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
log.Fatalln("Failed to register flag completion for '--role'", err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/node/nodeStart.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func NewCmdNodeStart() *cobra.Command {

// parseStartNodeCmd parses the command input into variables required to start a node
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: startNode: allow node filters, e.g. `k3d start nodes mycluster@worker` to start all worker nodes of cluster 'mycluster'
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
log.Fatalln("No node name given")
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/node/nodeStop.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func NewCmdNodeStop() *cobra.Command {

// parseStopNodeCmd parses the command input into variables required to stop a node
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: allow node filters, e.g. `k3d stop nodes mycluster@worker` to stop all worker nodes of cluster 'mycluster'
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
log.Fatalln("No node name given")
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ nodeLoop:
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {

var completions []string
roles := []string{string(k3d.MasterRole), string(k3d.WorkerRole)}
roles := []string{string(k3d.ServerRole), string(k3d.AgentRole)}

for _, role := range roles {
if strings.HasPrefix(role, toComplete) {
Expand Down
28 changes: 14 additions & 14 deletions cmd/util/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (
)

// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)

// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
func SplitFiltersFromFlag(flag string) (string, []string, error) {
Expand Down Expand Up @@ -72,16 +72,16 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
}

// map roles to subsets
masterNodes := []*k3d.Node{}
workerNodes := []*k3d.Node{}
var masterlb *k3d.Node
serverNodes := []*k3d.Node{}
agentNodes := []*k3d.Node{}
var serverlb *k3d.Node
for _, node := range nodes {
if node.Role == k3d.MasterRole {
masterNodes = append(masterNodes, node)
} else if node.Role == k3d.WorkerRole {
workerNodes = append(workerNodes, node)
if node.Role == k3d.ServerRole {
serverNodes = append(serverNodes, node)
} else if node.Role == k3d.AgentRole {
agentNodes = append(agentNodes, node)
} else if node.Role == k3d.LoadBalancerRole {
masterlb = node
serverlb = node
}
}

Expand Down Expand Up @@ -110,12 +110,12 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {

// Choose the group of nodes to operate on
groupNodes := []*k3d.Node{}
if submatches["group"] == string(k3d.MasterRole) {
groupNodes = masterNodes
} else if submatches["group"] == string(k3d.WorkerRole) {
groupNodes = workerNodes
if submatches["group"] == string(k3d.ServerRole) {
groupNodes = serverNodes
} else if submatches["group"] == string(k3d.AgentRole) {
groupNodes = agentNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
filteredNodes = append(filteredNodes, masterlb)
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}

Expand Down
8 changes: 4 additions & 4 deletions docs/faq/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

## Issues with ZFS

- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d cluster create multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
```bash
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
```
Expand All @@ -25,10 +25,10 @@
- clean up or expand docker root filesystem
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`

## Restarting a multi-master cluster or the initializing master node fails
## Restarting a multi-server cluster or the initializing server node fails

- What you do: You create a cluster with more than one master node and later, you either stop `master-0` or stop/start the whole cluster
- What you do: You create a cluster with more than one server node and later, you either stop `server-0` or stop/start the whole cluster
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing master node to go down
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing server node to go down
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)
2 changes: 1 addition & 1 deletion docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ You have several options there:

## Quick Start

Create a cluster named `mycluster` with just a single master node:
Create a cluster named `mycluster` with just a single server node:

```bash
k3d cluster create mycluster
Expand Down
8 changes: 4 additions & 4 deletions docs/internals/defaults.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Defaults

- multiple master nodes
- by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
- the initializing master node will have the `--cluster-init` flag appended
- all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
- multiple server nodes
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
- API-Ports
- by default, we don't expose any API-Port (no host port mapping)
- kubeconfig
Expand Down
4 changes: 2 additions & 2 deletions docs/internals/networking.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ Existing networks won't be managed by k3d together with the cluster lifecycle.
### `host` network

When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
you won't be able to create more than **one master node**.
An edge case would be one master node (with agent disabled) and one worker node.
you won't be able to create more than **one server node**.
An edge case would be one server node (with agent disabled) and one agent node.

### `bridge` network

Expand Down
2 changes: 1 addition & 1 deletion docs/usage/.pages
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@ title: Usage
arrange:
- commands.md
- kubeconfig.md
- multimaster.md
- multiserver.md
- guides
Loading

0 comments on commit ec3f10e

Please sign in to comment.