diff --git a/website/config.toml b/website/config.toml index 8c28a749c..e5fc9f867 100644 --- a/website/config.toml +++ b/website/config.toml @@ -125,7 +125,7 @@ archived_version = false # point people to the main doc site. # url_latest_version = "https://clusterlink.net" -latest_stable_version = "v0.3" +latest_stable_version = "v0.4" # Repository configuration (URLs for in-page links to opening issues and suggesting changes) github_repo = "https://github.com/clusterlink-net/clusterlink" @@ -234,6 +234,9 @@ path = "github.com/martignoni/hugo-notice" version = "main-DRAFT" url = "/docs/main" +[[params.versions]] +version = "v0.4" +url = "/docs/v0.4/" [[params.versions]] version = "v0.3" diff --git a/website/content/en/docs/v0.4/_index.md b/website/content/en/docs/v0.4/_index.md new file mode 100644 index 000000000..dd752fb42 --- /dev/null +++ b/website/content/en/docs/v0.4/_index.md @@ -0,0 +1,12 @@ +--- +title: v0.4 +cascade: + version: v0.4 + versName: &name v0.4 + git_version_tag: v0.4.0 + exclude_search: false +linkTitle: *name +simple_list: true +weight: -9999 # Weight for doc version vX.Y should be -(100*X + Y)0 +# For example: v0.2.x => -20 v3.6.7 => -3060. `main` is arbitrarily set to -9999 +--- diff --git a/website/content/en/docs/v0.4/concepts/_index.md b/website/content/en/docs/v0.4/concepts/_index.md new file mode 100644 index 000000000..5ea1086e1 --- /dev/null +++ b/website/content/en/docs/v0.4/concepts/_index.md @@ -0,0 +1,5 @@ +--- +title: Core Concepts +description: Core Concepts of the ClusterLink system +weight: 30 +--- diff --git a/website/content/en/docs/v0.4/concepts/fabric.md b/website/content/en/docs/v0.4/concepts/fabric.md new file mode 100644 index 000000000..fdf39206f --- /dev/null +++ b/website/content/en/docs/v0.4/concepts/fabric.md @@ -0,0 +1,54 @@ +--- +title: Fabric +description: Defining a ClusterLink fabric +weight: 10 +--- + +The concept of a *Fabric* encapsulates a set of cooperating [peers][]. + All peers in a fabric can communicate and may share [services][] + between them, with access governed by [policies][]. + The Fabric acts as a root of trust for peer-to-peer communications (i.e., + it functions as the certificate authority enabling mutual authentication between + peers). + +Currently, the concept of a Fabric is just that - a concept. It is not represented + or backed by any managed resource in a ClusterLink deployment. Once a Fabric is created, + its only relevance is in providing a certificate for use by each peer's gateways. + One could potentially consider a more elaborate implementation where a central + management entity explicitly deals with Fabric life cycle, association of peers to + a fabric, etc. The role of this central management component in ClusterLink is currently + delegated to users who are responsible for coordinating the transfer of certificates + between peers, out of band. + +## Initializing a new fabric + +### Prerequisites + +The following sections assume that you have access to the `clusterlink` CLI and one or more + peers (i.e., clusters) where you'll deploy ClusterLink. The CLI can be downloaded + from the ClusterLink [releases page on GitHub][]. + +### Create a new fabric CA + +To create a new fabric certificate authority (CA), execute the following CLI command: + +```sh +clusterlink create fabric --name +``` + +This command will create the CA files `cert.pem` and `key.pem` in a directory named . + The `--name` option is optional, and by default, "default_fabric" will be used. + While you will need access to these files to create the peers` gateway certificates later, + the private key file should be protected and not shared with others. + +## Related tasks + +Once a Fabric has been created and initialized, you can proceed with configuring + [peers][]. For a complete, end-to-end use case, please refer to the + [iperf tutorial][]. + +[peers]: {{< relref "peers" >}} +[services]: {{< relref "services" >}} +[policies]: {{< relref "policies" >}} +[releases page on GitHub]: https://github.com/clusterlink-net/clusterlink/releases/tag/{{% param git_version_tag %}} +[iperf tutorial]: {{< relref "../tutorials/iperf" >}} diff --git a/website/content/en/docs/v0.4/concepts/peers.md b/website/content/en/docs/v0.4/concepts/peers.md new file mode 100644 index 000000000..16b431186 --- /dev/null +++ b/website/content/en/docs/v0.4/concepts/peers.md @@ -0,0 +1,214 @@ +--- +title: Peers +description: Defining ClusterLink peers as part of a fabric +weight: 20 +--- + +A *Peer* represents a location, such as a Kubernetes cluster, participating in a + [fabric][]. Each peer may host one or more [services][] + that it may wish to share with other peers. A peer is managed by a peer administrator, + which is responsible for running the ClusterLink control and data planes. The + administrator will typically deploy the ClusterLink components by configuring + the [Deployment Custom Resource (CR)][operator-cr]. They may also wish to define + coarse-grained access policies, in accordance with high level corporate + policies (e.g., "production peers should only communicate with other production peers"). + +Once a peer has been added to a fabric, it can communicate with any other peer + belonging to it. All configuration relating to service sharing (e.g., the exporting + and importing of services, and the setting of fine grained application policies) can be + done with lowered privileges (e.g., by users, such as application owners). Remote peers are + represented by the Peer Custom Resources (CRs). Each peer CR instance + defines a remote cluster and the network endpoints of its ClusterLink gateways. + +## Prerequisites + +The following sections assume that you have access to the `clusterlink` CLI and one or more + peers (i.e., clusters) where you'll deploy ClusterLink. The CLI can be downloaded + from the ClusterLink [releases page on GitHub][]. + It also assumes that you have access to the [previously created fabric][] + CA files. + +## Initializing a new peer + +{{< notice warning >}} +Creating a new peer is a **fabric administrator** level operation and should be appropriately + protected. +{{< /notice >}} + +### Create a new peer certificate + +To create a new peer certificate belonging to a fabric, confirm that the fabric + Certificate Authority (CA) files are available in the current working directory, + and then execute the following CLI command: + +```sh +clusterlink create peer-cert --name --fabric +``` + +{{< notice tip >}} +The fabric CA files (certificate and private key) are expected to be in a subdirectory + (i.e., `.//cert.name` and `.//key.pem`). +{{< /notice >}} + +This will create the certificate and private key files (`cert.pem` and + `key.pem`, respectively) of the new peer. By default, the files are + created in a subdirectory named `` under the subdirectory of the fabric ``. + You can override the default by setting the `--output ` option. + +{{< notice info >}} +You will need the CA certificate (but **not** the CA private key) and the peer's certificate + and private key pair in the next step. They can be provided out of band (e.g., over email) to the + peer administrator or by any other means for secure transfer of sensitive data. +{{< /notice >}} + +## Deploy ClusterLink to a new peer + +{{< notice info >}} +This operation is typically done by a local **peer administrator**, usually different + than the **fabric administrator**. +{{< /notice >}} + +Before proceeding, ensure that the following files (created in the previous step) are + available in the current working directory: + + 1. CA certificate; + 1. peer certificate; and + 1. peer private key. + +### Install the ClusterLink deployment operator + +Install the ClusterLink operator by running the following command: + +```sh +clusterlink deploy peer --name --fabric +``` + +The command assumes that kubectl is set to the correct context and credentials + and that the certificates were created in respective sub-directories + under the current working directory. + If they were not, add the `--path ` CLI option to set the correct path. + +This command will deploy the ClusterLink deployment CRDs using the current + `kubectl` context. The operation requires cluster administrator privileges + in order to install CRDs into the cluster. + The ClusterLink operator is installed to the `clusterlink-operator` namespace. + The CA, peer certificate, and private key are set as K8s secrets + in the namespace where ClusterLink components are installed, which by default is + `clusterlink-system`. You can confirm the successful completion of this step + using the following commands: + +```sh +kubectl get crds +kubectl get secret --namespace clusterlink-system +``` + +{{% expand summary="Example output" %}} + +```sh +$ kubectl get crds +NAME CREATED AT +accesspolicies.clusterlink.net 2024-04-07T12:08:24Z +exports.clusterlink.net 2024-04-07T12:08:24Z +imports.clusterlink.net 2024-04-07T12:08:24Z +instances.clusterlink.net 2024-04-07T12:08:24Z +peers.clusterlink.net 2024-04-07T12:08:24Z +privilegedaccesspolicies.clusterlink.net 2024-04-07T12:08:24Z + +$ kubectl get secret --namespace clusterlink-system +NAME TYPE DATA AGE +cl-controlplane Opaque 2 19h +cl-dataplane Opaque 2 19h +cl-ca Opaque 1 19h +cl-peer Opaque 1 19h +``` + +{{% /expand %}} + +### Deploy ClusterLink via the operator and ClusterLink CR + +After the operator is installed, you can deploy ClusterLink by applying + the ClusterLink CR. This will cause the ClusterLink operator to + attempt reconciliation of the actual and intended ClusterLink deployment. + By default, the operator will install the ClusterLink control and data plane + components into a dedicated and privileged namespace (defaults to `clusterlink-system`). + Configurations affecting the entire peer, such as the list of known peers, are also maintained + in the same namespace. + +Refer to the [operator documentation][] for a description of the ClusterLink CR fields. + +## Add or remove peers + +{{< notice info >}} +This operation is typically done by a local **peer administrator**, usually different + than the **fabric administrator**. +{{< /notice >}} + +Managing peers is done by creating, deleting and updating peer CRs + in the dedicated ClusterLink namespace (typically, `clusterlink-system`). Peers are + added to the ClusterLink namespace by the peer administrator. Information + regarding peer gateways and attributes is communicated out of band (e.g., provided + by the fabric or remote peer administrator over email). In the future, these may + be configured via a management plane. + +{{% expand summary="Peer Custom Resource" %}} + +```go +type Peer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PeerSpec `json:"spec"` + Status PeerStatus `json:"status,omitempty"` +} + + +type PeerSpec struct { + Gateways []Endpoint `json:"gateways"` +} + +type PeerStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type Endpoint struct { + Host string `json:"host"` + Port uint16 `json:"port"` +} +``` + +{{% /expand %}} + +There are two fundamental attributes in the peer CRD: the peer name and the list of + ClusterLink gateway endpoints through which the remote peer's services are available. + Peer names are unique and must align with the Subject name present in their certificate + during connection establishment. The name is used by importers in referencing an export + (see [services][] for details). + +Gateway endpoint would typically be implemented via a `NodePort` or `LoadBalancer` + K8s service. A `NodePort` service would typically be used in local deployments + (e.g., when running in kind clusters during development) and a `LoadBalancer` service + would be used in cloud-based deployments. These can be automatically configured and + created via the [ClusterLink CR][]. + The peer's status section includes a `Reachable` condition indicating whether the peer is currently reachable, + and in case it is not reachable, the last time it was. + +{{% expand summary="Example YAML for `kubectl apply -f `" %}} +{{< readfile file="/static/files/peer_crd_sample.yaml" code="true" lang="yaml" >}} +{{% /expand %}} + +## Related tasks + +Once a peer has been created and initialized with the ClusterLink control and data + planes as well as one or more remote peers, you can proceed with configuring + [services][] and [policies][]. + For a complete end-to-end use case, refer to the [iperf tutorial][]. + +[fabric]: {{< relref "fabric" >}} +[previously created fabric]: {{< relref "fabric#create-a-new-fabric-ca" >}} +[services]: {{< relref "services" >}} +[policies]: {{< relref "policies" >}} +[releases page on GitHub]: https://github.com/clusterlink-net/clusterlink/releases/tag/{{% param git_version_tag %}} +[operator-cr]: {{< relref "../tasks/operator#deploy-cr-instance" >}} +[operator documentation]: {{< relref "../tasks/operator#commandline-flags" >}} +[ClusterLink CR]: {{< relref "peers#deploy-clusterlink-via-the-operator-and-clusterlink-cr" >}} +[iperf tutorial]: {{< relref "../tutorials/iperf" >}} diff --git a/website/content/en/docs/v0.4/concepts/policies.md b/website/content/en/docs/v0.4/concepts/policies.md new file mode 100644 index 000000000..d8ac5cafe --- /dev/null +++ b/website/content/en/docs/v0.4/concepts/policies.md @@ -0,0 +1,183 @@ +--- +title: Access Policies +description: Controlling service access across peers +weight: 40 +--- + +Access policies allow users and administrators fine-grained control over + which client workloads may access which service. This is an important security + mechanism for applying [micro-segmentation][], which is a basic requirement of [zero-trust][] + systems. Another zero-trust principle, "Deny by default / Allow by exception," is also + addressed by ClusterLink's access policies: a connection without an explicit policy allowing it + will be dropped. Access policies can also be used for enforcing corporate security rules, + as well as segmenting the fabric into trust zones. + +ClusterLink's access policies are based on attributes that are attached to + [peers][], [services][] and client workloads. + Each attribute is a key/value pair, similar to how [labels][] + are used in Kubernetes. This approach, called ABAC (Attribute Based Access Control), + allows referring to a set of entities in a single policy, rather than listing individual + entity names. Using attributes is safer, more resilient to changes, and easier to + control and audit. At the moment, a limited set of attributes is available to use. + We plan to enrich this set in the future. + +Every instance of an access policy either allows or denies a given set of connections. +This set is defined by specifying the sources and destinations of these connections. +Sources are defined in terms of the attributes attached to the client workloads. +Destinations are defined in terms of the attributes attached to the target services. +Both client workloads and target services may inherit some attributes from their hosting peer. + +There are two tiers of access policies in ClusterLink. The high-priority tier + is intended for cluster/peer administrators to set access rules which cannot be + overridden by cluster users. High-priority policies are controlled by the + `PrivilegedAccessPolicy` CRD, and are cluster scoped (i.e., have no namespace). + Regular policies are intended for cluster users, such as application developers + and owners, and are controlled by the `AccessPolicy` CRD. Regular policies are + namespaced, and have an effect in their namespace only. That is, they do not + affect connections to/from other namespaces. + +For a connection to be established, both the ClusterLink gateway on the client + side and the ClusterLink gateway on the service side must allow the connection. + Each gateway (independently) follows these steps to decide if the connection is allowed: + +1. All instances of `PrivilegedAccessPolicy` in the cluster with `deny` action are considered. + If the connection matches any of them, the connection is dropped. +1. All instances of `PrivilegedAccessPolicy` in the cluster with `allow` action are considered. + If the connection matches any of them, the connection is allowed. +1. All instances of `AccessPolicy` in the relevant namespace with `deny` action are considered. + If the connection matches any of them, the connection is dropped. +1. All instances of `AccessPolicy` in the relevant namespace with `allow` action are considered. + If the connection matches any of them, the connection is allowed. +1. If the connection matched none of the above policies, the connection is dropped. + +**Note**: The relevant namespace for a given connection is the namespace of + the corresponding Import CR on the client side and the namespace of the corresponding + Export on the service side. + +## Prerequisites + +The following assumes that you have `kubectl` access to two or more clusters where ClusterLink + has already been [deployed and configured][]. + +### Creating access policies + +Recall that a connection is dropped if it does not match any access policy. + Hence, for a connection to be allowed, an access policy with an `allow` action + must be created on both sides of the connection. + Creating an access policy is accomplished by creating an `AccessPolicy` CR in + the relevant namespace (see note above). + Creating a high-priority access policy is accomplished by creating a `PrivilegedAccessPolicy` CR. + Instances of `PrivilegedAccessPolicy` have no namespace and affect the entire cluster. + +{{% expand summary="PrivilegedAccessPolicy and AccessPolicy Custom Resources" %}} + +```go +type PrivilegedAccessPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AccessPolicySpec `json:"spec,omitempty"` +} + +type AccessPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AccessPolicySpec `json:"spec,omitempty"` +} + +type AccessPolicySpec struct { + Action AccessPolicyAction `json:"action"` + From WorkloadSetOrSelectorList `json:"from"` + To WorkloadSetOrSelectorList `json:"to"` +} + +type AccessPolicyAction string + +const ( + AccessPolicyActionAllow AccessPolicyAction = "allow" + AccessPolicyActionDeny AccessPolicyAction = "deny" +) + +type WorkloadSetOrSelectorList []WorkloadSetOrSelector + +type WorkloadSetOrSelector struct { + WorkloadSets []string `json:"workloadSets,omitempty"` + WorkloadSelector *metav1.LabelSelector `json:"workloadSelector,omitempty"` +} +``` + +{{% /expand %}} + +The `AccessPolicySpec` defines the following fields: + +- **Action** (string, required): whether the policy allows or denies the + specified connection. Value must be either `allow` or `deny`. +- **From** (WorkloadSetOrSelector array, required): specifies connection sources. + A connection's source must match one of the specified sources to be matched by the policy. +- **To** (WorkloadSetOrSelectorList array, required): specifies connection destinations. + A connection's destination must match one of the specified destinations to be matched by the policy. + +A `WorkloadSetOrSelector` object has two fields; exactly one of them must be specified. + +- **WorkloadSets** (string array, optional) - an array of predefined sets of workload. + Currently not supported. +- **WorkloadSelector** (LabelSelector, optional) - a [Kubernetes label selector][] + defining a set of client workloads or a set of services, based on their + attributes. An empty selector matches all workloads/services. + +### Example policies +The following policy allows all incoming/outgoing connections in the `default` namespace. + +```yaml +apiVersion: clusterlink.net/v1alpha1 +kind: AccessPolicy +metadata: + name: allow-all + namespace: default +spec: + action: allow + from: + - workloadSelector: {} + to: + - workloadSelector: {} +``` + +The following privileged policy denies incoming/outgoing connections originating from a cluster with a Peer named `testing`. +```yaml +apiVersion: clusterlink.net/v1alpha1 +kind: PrivilegedAccessPolicy +metadata: + name: deny-from-testing +spec: + action: deny + from: + - workloadSelector: + matchLabels: + peer.clusterlink.net/name: testing + to: + - workloadSelector: {} +``` + +More examples are available on our repo under [examples/policies][]. + +### Available attributes +The following attributes (labels) are set by ClusterLink on each connection request, and can be used in access policies within a `workloadSelector`. +#### Peer attributes - set when running `clusterlink deploy peer` +* `peer.clusterlink.net/name` - Peer name +#### Client attributes - derived from Pod info, as retrieved from Kubernetes API. Only relevant in the `from` section of access policies +* `client.clusterlink.net/namespace` - Pod's Namespace +* `client.clusterlink.net/service-account` - Pod's Service Account +* `client.clusterlink.net/labels.` - Pod's labels - an attribute for each Pod label with key `` +#### Service attributes - derived from the Export CR. Only relevant in the `to` section of access policies +* `export.clusterlink.net/name` - Export name +* `export.clusterlink.net/namespace` - Export namespace + +[peers]: {{< relref "peers" >}} +[services]: {{< relref "services" >}} +[micro-segmentation]: https://en.wikipedia.org/wiki/Microsegmentation_(network_security) +[zero-trust]: https://en.wikipedia.org/wiki/Zero_trust_security_model +[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[deployed and configured]: {{< relref "../getting-started/users#setup" >}} +[Kuberenetes label selector]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#labelselector-v1-meta +[examples/policies]: https://github.com/clusterlink-net/clusterlink/tree/main/examples/policies diff --git a/website/content/en/docs/v0.4/concepts/services.md b/website/content/en/docs/v0.4/concepts/services.md new file mode 100644 index 000000000..1b078779c --- /dev/null +++ b/website/content/en/docs/v0.4/concepts/services.md @@ -0,0 +1,258 @@ +--- +title: Services +description: Sharing services +weight: 30 +--- + +ClusterLink uses services as the unit of sharing between peers. + One or more peers can expose an (internal) K8s Service to + be consumed by other [peers][] in the [fabric][]. + A service is exposed by creating an *Export* CR referencing it in the + source cluster. Similarly, the exported service can be made accessible to workloads + in a peer by defining an *Import* CR in the destination cluster[^KEP-1645]. + Thus, service sharing is an explicit operation. Services are not automatically + shared by peers in the fabric. Note that the exporting cluster must be + [configured as a peer][] of the importing cluster. + +{{< notice info >}} +Services sharing is done on a per namespace basis and does not require cluster wide privileges. + It is intended to be used by application owners having access to their own namespaces only. +{{< /notice >}} + +A service is shared using a logical name. The logical name does not have to match + the actual Kubernetes Service name in the exporting cluster. Exporting a service + does not expose cluster Pods or their IP addresses to the importing clusters. + Any load balancing and scaling decisions are kept local in the exporting cluster. + This reduces the amount, frequency and sensitivity of information shared between + clusters. Similarly, the imported service can have any arbitrary name in the + destination cluster, allowing independent choice of naming. + +Orchestration of service sharing is the responsibility of users wishing to + export or import it, and any relevant information (e.g, the exported service + name and namespace) must be communicated out of band. In the future, this could + be done by a centralized management plane. + + + + + +## Prerequisites + +The following assume that you have `kubectl` access to two or more clusters where ClusterLink + has already been [deployed and configured][]. + +### Exporting a service + +In order to make a service potentially accessible by other clusters, it must be + explicitly configured for remote access via ClusterLink. Exporting is + accomplished by creating an Export CR in the **same** namespace + as the service being exposed. The CR acts as a marker for enabling + remote access to the service via ClusterLink. + +{{% expand summary="Export Custom Resource" %}} + +```go +type Export struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExportSpec `json:"spec,omitempty"` + Status ExportStatus `json:"status,omitempty"` +} + +type ExportSpec struct { + Host string `json:"host,omitempty"` + Port uint16 `json:"port,omitempty"` +} + +type ExportStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` +} +``` + +{{% /expand %}} + +The ExportSpec defines the following fields: + +- **Host** (string, optional): the name of the service being exported. The service + must be defined in the same namespace as the Export CR. If empty, + the export shall refer to a Kubernetes Service with the same name as the instance's + `metadata.name`. It is an error to refer to a non-existent service or one that is + not present in the local namespace. The error will be reflected in the CRD's status. +- **Port** (integer, required): the port number being exposed. If you wish to export + a multi-port service[^multiport], you will need to define multiple Exports using + the same `Host` value and a different `Port` each. This is aligned with ClusterLink's + principle of being explicit in sharing and limiting exposure whenever possible. + +Note that exporting a Service does not automatically make is accessible to other + peers, but only enables *potential* access. To complete service sharing, you must + define at least one [access control policy][concept-policy] that allows + access in the exporting cluster. + In addition, users in consuming clusters must still explicitly configure + [service imports][] and [policies][] in their respective namespaces. + +{{% expand summary="Example YAML for `kubectl apply -f `" %}} + +```yaml +apiVersion: clusterlink.net/v1alpha1 +kind: Export +metadata: + name: iperf3-server + namespace: default +spec: + port: 5000 +``` + +{{% /expand %}} + +### Importing a service + +Exposing remote services to a peer is accomplished by creating an Import CR + to a namespace. The CR represents the imported service and its + available backends across all peers. In response to an Import CR, ClusterLink + control plane will create a local Kubernetes Service selecting the ClusterLink + data plane Pods. The use of native Kubernetes constructs, allows ClusterLink + to work with any compliant cluster and CNI, transparently. + +The Import instance creates the service endpoint in the same namespace as it is + defined in. The created service will have the Import's `metadata.Name`. This + allows maintaining independent names for services between peers. Alternately, + you may use the same name for the import and related source exports. + You can define multiple Import CRs for the same set of Exports in different + namespaces. These are independent of each other. + +{{% expand summary="Import Custom Resource" %}} + +```go +type Import struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ImportSpec `json:"spec"` + Status ImportStatus `json:"status,omitempty"` +} + +type ImportSpec struct { + Port uint16 `json:"port"` + TargetPort uint16 `json:"targetPort,omitempty"` + Sources []ImportSource `json:"sources"` + LBScheme string `json:"lbScheme"` +} + +type ImportSource struct { + Peer string `json:"peer"` + ExportName string `json:"exportName"` + ExportNamespace string `json:"exportNamespace"` +} + +type ImportStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` +} +``` + +{{% /expand %}} + +The ImportSpec defines the following fields: + +- **Port** (integer, required): the imported, user facing, port number defined + on the created service object. +- **TargetPort** (integer, optional): this is the internal listening port + used by the ClusterLink data plane pods to represent the remote services. Typically the + choice of TargetPort should be left to the ClusterLink control plane, allowing + it to select a random and non-conflicting port, but there may be cases where + you wish to assume responsibility for port selection (e.g., a-priori define + local cluster Kubernetes NetworkPolicy object instances). This may result in + [port conflicts][] as is done for NodePort services. +- **Sources** (source array, required): references to remote exports providing backends + for the Import. Each reference names a different export through the combination of: + - *Peer* (string, required): name of ClusterLink peer where the export is defined. + - *ExportNamespace* (string, required): name of the namespace on the remote peer where + the export is defined. + - *ExportName* (string, required): name of the remote export. +- **LBScheme** (string, optional): load balancing method to select between different + Sources defined. The default policy is `random`, but you could override it to use + `round-robin` or `static` (i.e., fixed) assignment. + + + +As with exports, importing a service does not automatically make it accessible by + workloads, but only enables *potential* access. To complete service sharing, + you must define at least one [access control policy][] that + allows access in the importing cluster. To grant access, a connection must be + evaluated to "allow" by both egress (importing cluster) and ingress (exporting + cluster) policies. + +{{% expand summary="Example YAML for `kubectl apply -f `" %}} + +```yaml +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: iperf3-server + namespace: default +spec: + port: 5000 + sources: + - exportName: iperf3-server + exportNamespace: default + peer: server +``` + +{{% /expand %}} + + +In certain cases, a service can be imported without creating another corresponding service at the imported side, but merging it along with a pre-existing service with the same `name`. This can be specified by adding the label `import.clusterlink.net/merge`, which is set to `true`. This would trigger the creation of an endpointslice which services requests to the imported service (by setting `kubernetes.io/service-name` to the imported service name). + +{{% expand summary="Example YAML for `kubectl apply -f `" %}} + +```yaml +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: iperf3-server + namespace: default + labels: + import.clusterlink.net/merge: true +spec: + port: 5000 + sources: + - exportName: iperf3-server + exportNamespace: default + peer: server +``` +{{% /expand %}} + +## Related tasks + +Once a service is exported and imported by one or more clusters, you should + configure [polices][] governing its access. + For a complete end to end use case, refer to [iperf tutorial][]. + +[^KEP-1645]: While using similar terminology as the Kubernetes Multicluster Service + enhancement proposal ([MCS KEP][]), the ClusterLink implementation intentionally + differs from and is not compliant with the KEP (e.g., there is no `ClusterSet` + and "name sameness" assumption). + +[^multiport]: ClusterLink intentionally does not expose all service ports, as + typically only a small subset in a multi-port service is meant to be user + accessible, and other ports are service internal (e.g., ports used for internal + service coordination and replication). + +[fabric]: {{< relref "fabric" >}} +[peers]: {{< relref "peers" >}} +[configured as a peer]: {{< relref "peers#add-or-remove-peers" >}} +[policies]: {{< relref "policies" >}} +[service imports]: #importing-a-service +[port conflicts]: https://kubernetes.io/docs/concepts/services-networking/service/#avoid-nodeport-collisions +[access control policy]: {{< relref "policies" >}} +[iperf tutorial]: {{< relref "../tutorials/iperf" >}} +[deployed and configured]: {{< relref "../getting-started/users#setup" >}} +[MCS KEP]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api diff --git a/website/content/en/docs/v0.4/doc-contribution/_index.md b/website/content/en/docs/v0.4/doc-contribution/_index.md new file mode 100644 index 000000000..b59dba19f --- /dev/null +++ b/website/content/en/docs/v0.4/doc-contribution/_index.md @@ -0,0 +1,96 @@ +--- +title: Contribution Guidelines +weight: 60 +description: How to contribute to the website +--- + +We use [Hugo][] to format and generate our [website][], the [Docsy][] theme + for styling and site structure, and [Netlify][] to manage the deployment of the site. + Hugo is an open-source static site generator that provides us with templates, + content organization in a standard directory structure, and a website generation + engine. We write the pages in Markdown (or HTML if you want), and Hugo wraps + them up into a website. + +All submissions, including submissions by project members, require review. We + use GitHub pull requests for this purpose. Consult + [GitHub Help][] for more information on using pull requests. + +## Quick start with Netlify + +Here's a quick guide to updating the docs. It assumes you're familiar with the + GitHub workflow and you're happy to use the automated preview of your website + updates: + +1. Fork the [ClusterLink repo][] on GitHub. +1. The documentation site is under the `website` directory. +1. Make your changes and send a pull request (PR). +1. If you're not yet ready for a review, add "WIP" to the PR name to indicate + it's a work in progress. (**Don't** add the Hugo property + "draft = true" to the page front matter, because that prevents the + auto-deployment of the content preview described in the next point). +1. Wait for the automated PR workflow to do some checks. When it's ready, + you should see a check named like this: **Pages changed - clusterlink-net** +1. Click **Details** to the right of "Pages changed" to see a preview + of your updates. Previews will be deployed to `https://deploy-preview---clusterlink-net.netlify.app/` +1. Continue updating your doc and pushing your changes until you're happy with + the content. +1. When you are ready for a review, add a comment to the PR, and remove any + "WIP" markers. + +## Updating a single page + +If you've just spotted something you'd like to change while using the docs, Docsy has a + shortcut for you: + +1. Click **Edit this page** in the top right hand corner of the page. +1. If you don't already have an up-to-date fork of the project repo, you are prompted to + get one - click **Fork this repository and propose changes** or **Update your Fork** to + get an up-to-date version of the project to edit. The appropriate page in your fork is + displayed in edit mode. +1. Follow the rest of the [Quick start with Netlify][] process above to make, preview, + and propose your changes. + +## Previewing your changes locally + +If you want to run your own local Hugo server to preview your changes as you work: + + + +1. Follow the instructions in [Getting started][] to install Hugo + and any other tools you need. You'll need at least **Hugo version 0.110** (we recommend + using the most recent available version), and it must be the **extended** version, + which supports SCSS. +1. Run `hugo server --gc` in the `website` directory. By default your site will be available + at http://localhost:1313/. Now that you're serving your site locally, Hugo will watch + for changes to the content and automatically refresh your site. +1. Continue with the usual GitHub workflow to edit files, commit them, push the + changes up to your fork, and create a pull request. + +## Creating an issue + +If you've found a problem in the docs, but you're not sure how to fix it yourself, + please create an [issue][] in the ClusterLink repo. + You can also create an issue about a specific page by clicking the **Create Issue** + button in the top right hand corner of the page. + +## Useful resources + +* [Docsy user guide][]: All about Docsy, including how it manages navigation, + look and feel, and multi-language support. +* [Hugo documentation][]: Comprehensive reference for Hugo. +* [Github Hello World!][]: A basic introduction to GitHub concepts and workflow. + + + +[Hugo]: https://gohugo.io/ +[website]: https://clusterlink.net +[Docsy]: https://github.com/google/docsy +[Netlify]: https://www.netlify.com/ +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ +[Quick start with Netlify]: #quick-start-with-netlify +[Getting started]: {{< relref "../getting-started/" >}} +[ClusterLink repo]: https://github.com/clusterlink-net/clusterlink +[issue]: https://github.com/clusterlink-net/clusterlink/issues +[Docsy user guide]: https://www.docsy.dev/docs/ +[Hugo documentation]: https://gohugo.io/documentation/ +[Github Hello World!]: https://guides.github.com/activities/hello-world/ diff --git a/website/content/en/docs/v0.4/getting-started/_index.md b/website/content/en/docs/v0.4/getting-started/_index.md new file mode 100644 index 000000000..cb828f264 --- /dev/null +++ b/website/content/en/docs/v0.4/getting-started/_index.md @@ -0,0 +1,14 @@ +--- +title: Getting Started +description: Getting started guides for users and developers +weight: 20 +--- + +The following sections provide quick start guides for [users][] and [developers][]. + +If you're a content author who wishes to contribute additional documentation or guides, + please refer to the [contribution guidelines][]. + +[users]: {{< relref "users" >}} +[developers]: {{< relref "developers" >}} +[contribution guidelines]: {{< relref "../doc-contribution/" >}} diff --git a/website/content/en/docs/v0.4/getting-started/developers.md b/website/content/en/docs/v0.4/getting-started/developers.md new file mode 100644 index 000000000..3722293d5 --- /dev/null +++ b/website/content/en/docs/v0.4/getting-started/developers.md @@ -0,0 +1,81 @@ +--- +title: Developers +description: Setting up a development environment and contributing code +weight: 24 +--- + +This guide provides a quick start for developers wishing to contribute to ClusterLink. + +## Setting up a development environment + +Here are the key steps for setting up your developer environment, making a change and testing it: + +1. Install required tools (you can either do this manually or use the project's + [devcontainer specification][]) + - [Go][] version 1.20 or higher. + - [Git][] command line. + - We recommend using a [local development environment][] such as kind/kubectl for + local development and integration testing. + - Additional development packages, such as `goimports` and `golangci-lint`. See the full list in + [post-create.sh][]. +1. Clone our repository with `git clone git@github.com:clusterlink-net/clusterlink.git`. +1. Run `make test-prereqs` and install any missing required development tools. +1. Run `make build` to ensure the code builds as expected. This will pull in all needed + dependencies. + +## Making code changes + +- If you are planning on contributing back to the project, please carefully read the + [contribution guide][]. +- We follow [GitHub's Standard Fork & Pull Request Workflow][]. + +All contributed code should pass precommit checks such as linting and other tests. These + are run automatically as part of the CI process on every pull request. You may wish to + run these locally, before initiating a PR: + +```sh +$ make precommit +$ make unit-tests tests-e2e-k8s +$ go test ./... +``` + +Output of the end-to-end tests is saved to `/tmp/clusterlink-k8s-tests`. In case + of failures, you can also (re-)run individual tests by name: + +```sh +$ go test -v ./tests/e2e/k8s -testify.m TestConnectivity +``` + +### Tests in CICD + +All pull requests undergo automated testing before being merged. This includes, for example, + linting, end-to-end tests and DCO validation. Logs in CICD default to `info` level, and + can be increased to `debug` by setting environment variable `DEBUG=1`. You can also enable + debug logging from the UI when re-running a CICD job, by selecting "enable debug logging". + +## Release management + +ClusterLink releases, including container images and binaries, are built based + on version tags in github. Applying a tag that's prefixed by `-v` will automatically + trigger a new release through the github [release][] action. + +To aid in auto-generation of changelog from commits, please kindly mark all PR's + with one or more of the following labels: + +- `ignore-for-release`: PR should not be included in the changelog report. + This label should not be used together with any other label in this list. +- `documentation`: PR is a documentation update. +- `bugfix`: PR is fixing a bug in existing code. +- `enhancement`: PR provides new or extended functionality. +- `breaking-change`: PR introduces a breaking change in user facing aspects + (e.g., API or CLI). This label may be used in addition to other labels (e.g., + `bugfix` or `enhancement`). + +[devcontainer specification]: https://github.com/clusterlink-net/clusterlink/tree/main/.devcontainer/dev +[Go]: https://go.dev/doc/install +[Git]: https://git-scm.com/downloads +[local development environment]: https://kubernetes.io/docs/tasks/tools/ +[post-create.sh]: https://github.com/clusterlink-net/clusterlink/blob/main/.devcontainer/dev/post-create.sh +[contribution guide]: https://github.com/clusterlink-net/clusterlink/blob/main/CONTRIBUTING.md +[GitHub's Standard Fork & Pull Request Workflow]: https://gist.github.com/Chaser324/ce0505fbed06b947d962 +[release]: https://github.com/clusterlink-net/clusterlink/blob/main/.github/workflows/release.yml diff --git a/website/content/en/docs/v0.4/getting-started/users.md b/website/content/en/docs/v0.4/getting-started/users.md new file mode 100644 index 000000000..b9531319b --- /dev/null +++ b/website/content/en/docs/v0.4/getting-started/users.md @@ -0,0 +1,128 @@ +--- +title: Users +description: Installing and configuring a basic ClusterLink deployment +weight: 22 +--- + +This guide will give you a quick start on installing and setting up ClusterLink on a Kubernetes cluster. + +## Prerequisites + +Before you start, you must have access to a Kubernetes cluster. +For example, you can set up a local environment using [kind][]. + +## Installation + +1. {{< anchor install-cli>}}To install ClusterLink CLI on Linux or Mac, use the installation script: + + ```sh + curl -L https://github.com/clusterlink-net/clusterlink/releases/download/{{% param git_version_tag %}}/clusterlink.sh | sh - + ``` + +1. Check the installation by running the command: + + ```sh + clusterlink --version + ``` + +{{% expand summary="Download specific CLI version" %}} + To install a specific version of the ClusterLink CLI, use the URL path of the version release: + For example, to download version v0.2.1: + + ```sh + curl -L https://github.com/clusterlink-net/clusterlink/releases/download/v0.2.1/clusterlink.sh | sh - + ``` + +{{% /expand %}} + +## Setup + +To set up ClusterLink on a Kubernetes cluster, follow these steps: + +1. {{< anchor create-fabric-ca >}}Create the fabric's certificate authority (CA) certificate and private key: + + ```sh + clusterlink create fabric --name + ``` + + The ClusterLink fabric is defined as all K8s clusters (peers) that install ClusterLink gateways + and can share services between the clusters, enabling communication among those services. + This command will create the CA files `cert.pem` and `key.pem` in a directory named . + The `--name` option is optional, and by default, "default_fabric" will be used. + +1. {{< anchor create-peer-certs >}}Create a peer (cluster) certificate: + + ```sh + clusterlink create peer-cert --name --fabric + ``` + + This command will create the certificate files `cert.pem` and `key.pem` + in a directory named ``/``. + The `--path ` flag can be used to change the directory location. + Here too, the `--name` option is optional, and by default, "default_fabric" will be used. + +**All the peer certificates in the fabric should be created from the same fabric CA files in step 1.** + +1. {{< anchor install-cl-operator >}}Install ClusterLink deployment: + + ```sh + clusterlink deploy peer --name --fabric + ``` + + This command will deploy the ClusterLink operator on the `clusterlink-operator` namespace + and convert the peer certificates to secrets in the namespace where ClusterLink components will be installed. + By default, the `clusterlink-system` namespace is used. + In addition, it will create a ClusterLink instance custom resource object and deploy it to the operator. + The operator will then create the ClusterLink components in the `clusterlink-system` namespace and enable ClusterLink in the cluster. + The command assumes that `kubectl` is set to the correct peer (K8s cluster) + and that the certificates were created by running the previous command on the same working directory. + If they were not, use the flag `--path ` for pointing to the working directory + that was used in the previous command. + The `--fabric` option is optional, and by default, "default_fabric" will be used. + To install a specific image of ClusterLink use the `--tag ` flag. + For more details and deployment configuration see [ClusterLink deployment operator][]. +{{< notice note >}} +To set up ClusterLink on another cluster, create another set of peer certificates (step 2). +Deploy ClusterLink in a console with access to the cluster (step 3). +{{< /notice >}} + +## Try it out + +Check out the [ClusterLink tutorials][] for setting up multi-cluster connectivity + for applications using two or more clusters. + +## Uninstall ClusterLink + +1. To remove a ClusterLink instance from the cluster, please delete the ClusterLink instance custom resource. + The ClusterLink operator will subsequently remove all instance components (control-plane, data-plane, and ingress service). + + ```sh + kubectl delete instances.clusterlink.net -A --all + ``` + +2. To completely remove ClusterLink from the cluster, including the operator, CRDs, namespaces, and instances, + use the following command: + + ```sh + clusterlink delete peer --name peer1 + ``` + +{{< notice note >}} +This command using the current `kubectl` context. +{{< /notice >}} + +3. To uninstall the ClusterLink CLI, use the following command: + + ```sh + rm `which clusterlink` + ``` + +## Links for further information + +* [Kind](https://kind.sigs.k8s.io/) +* [ClusterLink deployment operator][] +* [ClusterLink tutorials][] + +[Kind]: https://kind.sigs.k8s.io/docs/user/quick-start/ +[ClusterLink deployment operator]: {{< relref "../tasks/operator/" >}} +[ClusterLink tutorials]: {{< relref "../tutorials/" >}} diff --git a/website/content/en/docs/v0.4/overview/_index.md b/website/content/en/docs/v0.4/overview/_index.md new file mode 100644 index 000000000..b2fc22859 --- /dev/null +++ b/website/content/en/docs/v0.4/overview/_index.md @@ -0,0 +1,51 @@ +--- +title: Overview +description: A high level overview of ClusterLink +weight: 10 +--- + +## What is ClusterLink? + + +ClusterLink simplifies the connection between application services that are located in different domains, + networks, and cloud infrastructures. + +## When should I use it? + + + +ClusterLink is useful when multiple parties are collaborating across administrative boundaries. + With ClusterLink, information sharing policies can be defined, customized, and programmatically + accessed around the world by the right people for maximum productivity while optimizing network + performance and security. + +## How does it work? + +ClusterLink uses a set of unprivileged gateways serving connections to and from K8s services according to policies + defined through the management APIs. ClusterLink gateways establish mTLS connections between them and + continuously exchange control-plane information, forming a secure distributed control plane. + In addition, ClusterLink gateways represent the remotely deployed services to applications running in a local cluster, + acting as L4 proxies. On connection establishment, the control plane components in the source and the target ClusterLink + gateways validate and establish the connection based on specified policies. + +## Why is it unique? + +The distributed control plane and the fine-grained connection establishment control are the main + advantages of ClusterLink over some of its competitors. Performance evaluation on clusters deployed in the same + Google Cloud zone shows that ClusterLink can outperform some existing solutions by almost 2× while providing + fine-grained authorization on a per connection basis. + +## Where should I go next? + +* [Getting Started][]: Get started with ClusterLink. +* [Tutorials][]: Check out some examples and step-by-step instructions for different use cases. + +[Getting Started]: {{< relref "../getting-started/" >}} +[Tutorials]: {{< relref "../tutorials/" >}} diff --git a/website/content/en/docs/v0.4/tasks/_index.md b/website/content/en/docs/v0.4/tasks/_index.md new file mode 100644 index 000000000..0271126bf --- /dev/null +++ b/website/content/en/docs/v0.4/tasks/_index.md @@ -0,0 +1,5 @@ +--- +title: Tasks +description: How to do single specific targeted activities with ClusterLink +weight: 35 +--- diff --git a/website/content/en/docs/v0.4/tasks/operator.md b/website/content/en/docs/v0.4/tasks/operator.md new file mode 100644 index 000000000..d958a640d --- /dev/null +++ b/website/content/en/docs/v0.4/tasks/operator.md @@ -0,0 +1,195 @@ +--- +title: Deployment Operator +description: Usage and configuration of the ClusterLink deployment operator +weight: 50 +--- + +The ClusterLink deployment operator allows easy deployment of ClusterLink to a K8s cluster. +The preferred deployment approach involves utilizing the ClusterLink CLI, +which automatically deploys both the ClusterLink operator and ClusterLink components. +However, it's important to note that ClusterLink deployment necessitates peer certificates for proper functioning. +Detailed instructions for creating these peer certificates can be found in the [user guide][]. + +## The common use case + +The common use case for deploying ClusterLink on a cloud-based K8s cluster (i.e., EKS, GKE, IKS, etc.) is using the CLI command: + +```sh +clusterlink deploy peer --name --fabric +``` + +The command assumes that `kubectl` is configured to access the correct peer (K8s cluster) +and that certificate files are placed in the current working directory. +If they are not, use the flag `--path ` to reference the directory where certificate files are stored. +The command deploys the ClusterLink operator in the `clusterlink-operator` namespace and converts +the peer certificates to secrets in the `clusterlink-system` namespace, where ClusterLink components will be installed. +By default, these components are deployed in the `clusterlink-system` namespace. +In addition, the command will create a ClusterLink instance custom resource object and deploy it to the operator. +The operator will then create the ClusterLink components in the `clusterlink-system` namespace and enable ClusterLink in the cluster. +Additionally, a `LoadBalancer` service is created to allow cross-cluster connectivity using ClusterLink. + +## Deployment for Kind environment + +To deploy ClusterLink in a local environment like Kind, you can use the following command: + +```sh +clusterlink deploy peer --name --fabric --ingress=NodePort --ingress-port=30443 +``` + +The Kind environment doesn't allocate an external IP to the `LoadBalancer` service by default. +In this case, we will use a `NodePort` service to establish multi-cluster connectivity using ClusterLink. +Alternatively, you can install MetalLB to add a Load Balancer implementation to the Kind cluster. See instructions +[here][]. +The port flag is optional, and by default, ClusterLink will use any allocated NodePort that the Kind cluster provides. +However, it is more convenient to use a fixed setting NodePort for peer configuration, as demonstrated in the +[ClusterLink Tutorials][]. + +## Deployment of specific version + +To deploy a specific ClusterLink image version use the `tag` flag: + +```sh +clusterlink deploy peer --name --fabric --tag +``` + +The `tag` flag will change the tag version in the ClusterLink instance custom resource object that will be deployed to the operator. + +## Deployment using manually defined ClusterLink custom resource + +The deployment process can be split into two steps: + +1. Deploy only ClusterLink operator: + + ```sh + clusterlink deploy peer --name --fabric --start operator + ``` + + The `start` flag will deploy only the ClusterLink operator and the certificate's secrets as described in the [common use case][] above. + +2. {{< anchor deploy-cr-instance >}} Deploy a ClusterLink instance custom resource object: + + ```yaml + kubectl apply -f - < + dataplane: + type: envoy + replicas: 1 + logLevel: info + namespace: clusterlink-system + EOF + ``` + +## Full list of the deployment configuration flags + +The `deploy peer` {{< anchor commandline-flags >}} command has the following flags: + +1. Flags that are mapped to the corresponding fields in the ClusterLink custom resource: + + - **namespace:** This field determines the namespace where the ClusterLink components are deployed. + By default, it uses `clusterlink-system`, which is created by the `clusterlink deploy peer` command. + If a different namespace is desired, that namespace must already exist. + - **dataplane:** This field determines the type of ClusterLink dataplane, with supported values `go` or `envoy`. By default, it uses `envoy`. + - **dataplane-replicas:** This field determines the number of ClusterLink dataplane replicas. By default, it uses 1. + - **ingress:** This field determines the type of ingress service to expose ClusterLink deployment, + with supported values: `LoadBalancer`, `NodePort`, or `None`. By default, it uses `LoadBalancer`. + - **ingress-port:** This field determines the port number of the external service. + By default, it uses port `443` for the `LoadBalancer` ingress type. + For the `NodePort` ingress type, the port number will be allocated by Kubernetes. + In case the user changes the default value, it is the user's responsibility to ensure the port number is valid and available for use. + - **ingress-annotations:** This field adds annotations to the ingress service. + The flag can be repeated to add several annotations. For example: `--ingress-annotations load-balancer-type=nlb --ingress-annotations load-balancer-name=cl-nlb`. + - **log-level:** This field determines the severity log level for all the components (controlplane and dataplane). + By default, it uses `info` log level. + - **container-registry:** This field determines the container registry to pull the project images. + By default, it uses `ghcr.io/clusterlink-net`. + - **tag:** This field determines the version of project images to pull. By default, it uses the `latest` version. + +2. General deployment flags: + - **start:** Determines which components to deploy and start in the cluster. + `all` (default) starts the clusterlink operator, converts the peer certificates to secrets, + and deploys the operator ClusterLink custom resource to create the ClusterLink components. + `operator` deploys only the `ClusterLink` operator and convert the peer certificates to secrets. + Creates a custom resource example file that can be deployed to the operator. + `none` doesn't deploy the operator and creates a `k8s.yaml` file that allows deploying ClusterLink without the operator. + - **path**: Represents the path where the peer and fabric certificates are stored, + by default is the working current working directory. + +## Manual Deployment without the operator + +To deploy the ClusterLink without using the Operator, follow the instructions below: + +1. Create a `k8s.yaml` file to deploy ClusterLink without the operator: + + ```sh + clusterlink deploy peer --name --fabric --start none + ``` + + The `k8s.yaml` file contains the deployment of all ClusterLink components and can be configured for various purposes, such as adding sidecar pods or managing the ClusterLink certificates. + +1. Deploy ClusterLink CRDs: + + ```sh + curl -L https://github.com/clusterlink-net/clusterlink/archive/refs/heads/main.tar.gz | tar -xzO clusterlink-main/config/crds | kubectl apply -f - + ``` + +1. Apply the `k8s.yaml` file to the cluster: + + ```sh + kubectl apply .///k8s.yaml + ``` + +## Manual Deployment without CLI + +To deploy the ClusterLink without using the CLI, follow the instructions below: + +1. Download the configuration files (CRDs, operator RBACs, and deployment) from GitHub: + + ```sh + git clone git@github.com:clusterlink-net/clusterlink.git + ``` + +2. Install ClusterLink CRDs: + + ```sh + kubectl apply --recursive -f ./clusterlink/config/crds + ``` + +3. Install the ClusterLink operator: + + ```sh + kubectl apply --recursive -f ./clusterlink/config/operator + ``` + +4. Convert the peer and fabric certificates to secrets: + + ```sh + export CERTS = + kubectl create secret generic cl-ca -n clusterlink-system --from-file=ca=$CERTS /cert.pem + kubectl create secret generic cl-peer -n clusterlink-system --from-file=ca.pem=$CERTS /cert.pem --from-file=cert.pem=$CERTS /peer1/cert.pem --from-file=key.pem=$CERTS /peer1/key.pem + kubectl create secret generic cl-controlplane -n clusterlink-system --from-file=cert=$CERTS /peer1/controlplane/cert.pem --from-file=key=$CERTS /peer1/controlplane/key.pem + kubectl create secret generic cl-dataplane -n clusterlink-system --from-file=cert=$CERTS /peer1/dataplane/cert.pem --from-file=key=$CERTS /peer1/dataplane/key.pem + ``` + +5. Deploy a ClusterLink K8s custom resource object: + + ```yaml + kubectl apply -f - <}} +[ClusterLink tutorials]: {{< relref "../tutorials/" >}} +[here]: https://kind.sigs.k8s.io/docs/user/loadbalancer/ +[common use case]: #the-common-use-case diff --git a/website/content/en/docs/v0.4/tasks/private-networks/frp-system.png b/website/content/en/docs/v0.4/tasks/private-networks/frp-system.png new file mode 100644 index 000000000..4d2d89231 Binary files /dev/null and b/website/content/en/docs/v0.4/tasks/private-networks/frp-system.png differ diff --git a/website/content/en/docs/v0.4/tasks/private-networks/index.md b/website/content/en/docs/v0.4/tasks/private-networks/index.md new file mode 100644 index 000000000..ed3ddfe76 --- /dev/null +++ b/website/content/en/docs/v0.4/tasks/private-networks/index.md @@ -0,0 +1,378 @@ +--- +title: Private Networks +description: Running ClusterLink in a private network, behind a NAT or firewall. +--- + + +This task involves connecting ClusterLink behind a NAT or firewall. +To connect the ClusterLink gateway, each peer should have a public IP that will be reachable from other peers to enable cross-cluster communications. However, this is not always possible if clusters are behind corporate NAT or firewalls that allow outgoing connections only. In such scenarios, we will use the [Fast Reverse Proxy][] (FRP) open-source project to create reverse tunnels and connect all clusters behind a private network. With FRP, only one IP needs to be public to connect all the clusters in the fabric. + +To enable connectivity between the ClusterLink gateways, we need to set up one FRP server with a public IP and create an FRP client for each ClusterLink gateway that connects to the server. + +In this task, we will use the FRP Kubernetes image to create the FRP server and clients. We will create one FRP server and two FRP clients: one to create a reverse tunnel and provide access to the server cluster behind a NAT, and another to connect to the FRP server and provide access to the cluster behind the NAT. + +drawing +
+
+ +The FRP server can support multiple clusters behind a private network. However, it is also possible to establish multiple FRP servers, with one for each cluster. If a cluster gateway has a public IP, communication can occur without using FRP. +This task includes instructions on how to connect the peers using FRP. Instructions for creating full connectivity between applications to remote services can be found in the [Nginx tutorial][] and [iPerf3 tutorial][]. + +In this task, we will extend the peer connectivity instructions to use FRP. + +## Create FRP Server + +In this step, we will create the FRP server on the same cluster we use for ClusterLink (the `client cluster`), but it can be on any peer or Kubernetes cluster. + +1. Create a namespace for all FRP components: + + *Client cluster*: + + ```sh + echo " + apiVersion: v1 + kind: Namespace + metadata: + name: frp + " | kubectl apply -f - + ``` + + *Server cluster*: + + ```sh + echo " + apiVersion: v1 + kind: Namespace + metadata: + name: frp + " | kubectl apply -f - + ``` + +2. Create a configmap that contains the FRP server configuration: + + *Client cluster*: + + ```sh + echo " + apiVersion: v1 + kind: ConfigMap + metadata: + name: frps-config + namespace: frp + data: + frps.toml: | + bindPort = 4443 + " | kubectl apply -f - + ``` + + In this setup, we expose the FRP server pod on port `4443`. +3. Create FRP server deployment: + + *Client cluster*: + + ```sh + echo " + apiVersion: apps/v1 + kind: Deployment + metadata: + name: frps + namespace: frp + spec: + replicas: 1 + selector: + matchLabels: + app: frps + template: + metadata: + labels: + app: frps + spec: + hostNetwork: true + containers: + - name: frps + image: snowdreamtech/frps + volumeMounts: + - name: frps-config-volume + mountPath: /etc/frp/frps.toml + subPath: frps.toml + volumes: + - name: frps-config-volume + configMap: + name: frps-config + restartPolicy: Always + " | kubectl apply -f - + ``` + +4. Create an ingress service to expose the FRP server: + + *Client cluster*: +v + ```sh + echo " + apiVersion: v1 + kind: Service + metadata: + name: clusterlink-frps + namespace: frp + spec: + type: NodePort + selector: + app: frps + ports: + - port: 4443 + targetPort: 4443 + nodePort: 30444 + " | kubectl apply -f - + ``` + + In this case, we use a `NodePort` service, but it can be other types like `LoadBalancer`. + +## Create FRP Clients + +1. Set the `FRP_SERVER_IP` and `FRP_SECRET_KEY` variables for each cluster: + + *Client cluster*: + + ```sh + export FRP_SERVER_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' client-control-plane` + export FRP_SECRET_KEY=`echo $USER | sha256sum | head -c 10` + ``` + + *Server cluster*: + + ```sh + export FRP_SERVER_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' client-control-plane` + export FRP_SECRET_KEY=`echo $USER | sha256sum | head -c 10` + ``` + + The `FRP_SECRET_KEY` should be identical across all clusters. + +2. Deploy FRP client configuration on each cluster: + + *Client cluster*: + + ```sh + echo " + apiVersion: v1 + kind: ConfigMap + metadata: + name: frpc-config + namespace: frp + data: + frpc.toml: | + # Set server address + serverAddr = \""${FRP_SERVER_IP}"\" + serverPort = 30444 + + [[visitors]] + name = \"clusterlink-client-to-server-visitor\" + type = \"stcp\" + serverName = \"clusterlink-server\" + secretKey = \""${FRP_SECRET_KEY}"\" + bindAddr = \"::\" + bindPort = 6002 + " | kubectl apply -f - + ``` + + *Server cluster*: + + ```sh + echo " + apiVersion: v1 + kind: ConfigMap + metadata: + name: frpc-config + namespace: frp + data: + frpc.toml: | + # Set server address + serverAddr = \""${FRP_SERVER_IP}"\" + serverPort = 30444 + + [[proxies]] + name = \"clusterlink-server\" + type = \"stcp\" + localIP = \"clusterlink.clusterlink-system.svc.cluster.local\" + localPort = 443 + secretKey = \""${FRP_SECRET_KEY}"\" + + " | kubectl apply -f - + ``` + + For each configuration, we first set the FRP server's IP address and port number. + + In the server cluster, we create a `proxy` that connects to the local ClusterLink gateway and establishes a reverse tunnel to the FRP server, allowing other FRP clients to connect to it. + In the client cluster, we create an FRP `visitor` that specifies which other peers this client wants to connect to. (You need to create a visitor for each peer you want to connect to.) For more details about FRP configuration, you can refer to the [FRP configuration documentation][]. For an example of connecting multiple clusters behind a private network, see the [ClusterLink FRP example][]. + +3. Create a K8s service that connects to the FRP client `visitor`, allowing ClusterLink to connect to it: + + *Client cluster*: + + ```sh + echo ' + apiVersion: v1 + kind: Service + metadata: + name: server-peer-clusterlink + namespace: frp + spec: + type: ClusterIP + selector: + app: frpc + ports: + - port: 6002 + targetPort: 6002 + ' | kubectl apply -f - + ``` + +4. Create FRP client deployment for each cluster: + + *Client cluster*: + + ```sh + echo " + apiVersion: apps/v1 + kind: Deployment + metadata: + name: frpc + namespace: frp + spec: + replicas: 1 + selector: + matchLabels: + app: frpc + template: + metadata: + labels: + app: frpc + spec: + containers: + - name: frpc + image: snowdreamtech/frpc + volumeMounts: + - name: frpc-config-volume + mountPath: /etc/frp + volumes: + - name: frpc-config-volume + configMap: + name: frpc-config + restartPolicy: Always + " | kubectl apply -f - + ``` + + *Server cluster*: + + ```sh + echo " + apiVersion: apps/v1 + kind: Deployment + metadata: + name: frpc + namespace: frp + spec: + replicas: 1 + selector: + matchLabels: + app: frpc + template: + metadata: + labels: + app: frpc + spec: + containers: + - name: frpc + image: snowdreamtech/frpc + volumeMounts: + - name: frpc-config-volume + mountPath: /etc/frp + volumes: + - name: frpc-config-volume + configMap: + name: frpc-config + restartPolicy: Always + " | kubectl apply -f - + ``` + +## Create Peer CRD + +1. Create a Peer CRD that points to the server cluster. The Peer CRD should connect to a Kubernetes service that points to the `visitor` port in the frpc client. + + *Client cluster*: + + ```sh + echo " + apiVersion: clusterlink.net/v1alpha1 + kind: Peer + metadata: + name: server + namespace: clusterlink-system + spec: + gateways: + - host: server-peer-clusterlink.frp.svc.cluster.local + port: 6002 + " | kubectl apply -f - + ``` + + To verify that the connectivity between the peers is established correctly, + please check if the condition `PeerReachable` has been added to the peer CR status in each cluster. + + ```sh + kubectl describe peers.clusterlink.net -A + ``` + + {{% expand summary="Sample output" %}} + + ``` + Name: client + Namespace: clusterlink-system + Labels: + Annotations: + API Version: clusterlink.net/v1alpha1 + Kind: Peer + Metadata: + Creation Timestamp: 2024-05-28T12:47:33Z + Generation: 1 + Resource Version: 807 + UID: 1fdeafff-707a-43e2-bb3a-826f003a42ed + Spec: + Gateways: + Host: 172.18.0.4 + Port: 30443 + Status: + Conditions: + Last Transition Time: 2024-05-28T12:47:33Z + Message: + Reason: Heartbeat + Status: True + Type: PeerReachable + ``` + + {{% /expand %}} + +## Connect Application Services + +After creating the peer connectivity using FRP, continue to the next step of exporting services, importing services, and creating policies as described in the tutorials [Nginx tutorial][] and [iPerf3 tutorial][]. + +## Cleanup + +To remove all FRP components, delete the `frp` namespace: + +*Client cluster*: + +```sh +kubectl delete namespace frp +``` + +*Server cluster*: + +```sh +kubectl delete namespace frp +``` + +This part remove only the FRP components. To remove all ClusterLink components, please refer to the full instructions in the tutorials. + +[Nginx tutorial]: {{< relref "../../tutorials/nginx/_index.md" >}} +[iPerf3 tutorial]: {{< relref "../../tutorials/iperf/_index.md" >}} +[Fast Reverse Proxy]: https://github.com/fatedier/frp +[FRP configuration documentation]: https://github.com/fatedier/frp?tab=readme-ov-file#example-usage +[ClusterLink FRP example]: https://github.com/clusterlink-net/clusterlink/tree/main/demos/frp \ No newline at end of file diff --git a/website/content/en/docs/v0.4/tasks/relay/index.md b/website/content/en/docs/v0.4/tasks/relay/index.md new file mode 100644 index 000000000..e7905bab4 --- /dev/null +++ b/website/content/en/docs/v0.4/tasks/relay/index.md @@ -0,0 +1,165 @@ +--- +title: Relay Cluster +description: Running basic connectivity between nginx server and client through a relay cluster using ClusterLink. +--- + +This task involves creating multi-hop connectivity between a client and a server using relay clusters. +Multi-hop connectivity using a relay may be necessary for several reasons, such as: + +1. When the client needs to use an indirect connection due to network path limitations. +2. Using multiple relays allows for explicit selection between multiple network paths without impacting or changing the underlying routing information. +3. Using multiple relays provides failover for the network paths. + +In this task, we'll establish multi-hop connectivity across clusters using ClusterLink to access a remote nginx server. +In this case, the client will not access the service directly in the server cluster but will pass through a relay cluster. +The example uses three clusters: + +1) Client cluster - runs ClusterLink along with a client. +2) Relay cluster - runs ClusterLink and connects the services between the client and the server. +3) Server cluster - runs ClusterLink along with an nginx server. + +System illustration: + +drawing + +## Run basic nginx Tutorial + +This is an extension of the basic [nginx toturial][]. Please run it first and set up the nginx server and client cluster. + +## Create relay Cluster with ClusterLink + +1. Open third terminal for the relay cluster: + + *Relay cluster*: + + ```sh + cd nginx-tutorial + kind create cluster --name=relay + ``` + +1. Setup `KUBECONFIG` the relay cluster: + + *Relay cluster*: + + ```sh + kubectl config use-context kind-relay + cp ~/.kube/config $PWD/config-relay + export KUBECONFIG=$PWD/config-relay + ``` + +1. Create peer certificates for the relay: + + *Relay cluster*: + + ```sh + clusterlink create peer-cert --name relay + ``` + + {{< notice note >}} + The relay cluster certificates should use the same Fabric CA files as the server and the client. + {{< /notice >}} + +1. Deploy ClusterLink on the relay cluster: + + *Relay cluster*: + + ```sh + clusterlink deploy peer --name relay --ingress=NodePort --ingress-port=30443 + ``` + +## Enable cross-cluster access using the relay + +1. Establish connectivity between the relay and the server by adding the server peer, importing the nginx service from the server, + and adding the allow policy. + + *Relay cluster*: + + ```sh + export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/nginx/testdata + export SERVER_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' server-control-plane` + curl -s $TEST_FILES/clusterlink/peer-server.yaml | envsubst | kubectl apply -f - + kubectl apply -f $TEST_FILES/clusterlink/import-nginx.yaml + kubectl apply -f $TEST_FILES/clusterlink/allow-policy.yaml + ``` + +1. Establish connectivity between the relay and the client by adding the relay peer to the client cluster, + exporting the nginx service in the relay, and importing it into the client. + + *Relay cluster*: + + ```sh + kubectl apply -f $TEST_FILES/clusterlink/export-nginx.yaml + ``` + + *Client cluster*: + + ```sh + export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/nginx/testdata + export RELAY_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' relay-control-plane` + curl -s $TEST_FILES/clusterlink/peer-relay.yaml | envsubst | kubectl apply -f - + kubectl apply -f $TEST_FILES/clusterlink/import-nginx-relay.yaml + ``` + +## Test service connectivity + +Test the connectivity between the clusters (through the relay) with a batch job of the ```curl``` command: + +*Client cluster*: + +```sh +kubectl apply -f $TEST_FILES/nginx-relay-job.yaml +``` + +Verify the job succeeded: + +```sh +kubectl logs jobs/curl-nginx-relay-homepage +``` + +{{% readfile file="/static/files/tutorials/nginx/nginx-output.md" %}} + +## Cleanup + +1. Delete the kind clusters: + *Client cluster*: + + ```sh + kind delete cluster --name=client + ``` + + *Server cluster*: + + ```sh + kind delete cluster --name=server + ``` + + ```sh + kind delete cluster --name=relay + ``` + +1. Remove the tutorial directory: + + ```sh + cd .. && rm -rf nginx-tutorial + ``` + +1. Unset the environment variables: + *Client cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + + *Server cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + + *Rekay cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + +[nginx toturial]: {{< relref "../../tutorials/nginx/_index.md" >}} diff --git a/website/content/en/docs/v0.4/tasks/relay/nginx-relay.png b/website/content/en/docs/v0.4/tasks/relay/nginx-relay.png new file mode 100644 index 000000000..110b308c6 Binary files /dev/null and b/website/content/en/docs/v0.4/tasks/relay/nginx-relay.png differ diff --git a/website/content/en/docs/v0.4/tutorials/_index.md b/website/content/en/docs/v0.4/tutorials/_index.md new file mode 100644 index 000000000..1b9f89903 --- /dev/null +++ b/website/content/en/docs/v0.4/tutorials/_index.md @@ -0,0 +1,5 @@ +--- +title: Tutorials +description: Guided ClusterLink tutorials +weight: 40 +--- \ No newline at end of file diff --git a/website/content/en/docs/v0.4/tutorials/bookinfo/bookinfo.png b/website/content/en/docs/v0.4/tutorials/bookinfo/bookinfo.png new file mode 100644 index 000000000..d360870b5 Binary files /dev/null and b/website/content/en/docs/v0.4/tutorials/bookinfo/bookinfo.png differ diff --git a/website/content/en/docs/v0.4/tutorials/bookinfo/index.md b/website/content/en/docs/v0.4/tutorials/bookinfo/index.md new file mode 100644 index 000000000..df6f4bed2 --- /dev/null +++ b/website/content/en/docs/v0.4/tutorials/bookinfo/index.md @@ -0,0 +1,380 @@ +--- +title: BookInfo +description: Running BookInfo application with different policies +--- + + +The tutorial sets up the [Istio BookInfo application][] in different clusters. +The tutorial demonstrates the use of AccessPolicy and PrivilegedAccessPolicy custom resources. +The tutorial shows different load-balancing policies like: random, round robin or static destination. +For more details, see the [policies documentation][]. +This test creates three kind clusters: + +* Two productpage microservices (application frontend) and a details microservice run on the first cluster. +* The reviews-v2 (display rating with black stars) and rating microservices run on the second cluster. +* The reviews-v3 (display rating with red stars) and rating microservices run on the third cluster. + +System illustration: + +drawing + +## Install ClusterLink CLI + +{{% readfile file="/static/files/tutorials/cli-installation.md" %}} + +## Initialize clusters + +In this tutorial we set up a local environment using [kind][]. + +To setup three kind clusters: + +1. Install kind using the [kind installation guide][]. +2. Create a directory for all the tutorial files: + + ```sh + mkdir bookinfo-tutorial && cd bookinfo-tutorial + ``` + +3. Create three kind clusters: + + ```sh + kind create cluster --name=client + kind create cluster --name=server1 + kind create cluster --name=server2 + ``` + + {{< notice note >}} + kind uses the prefix `kind`, so the name of created clusters will be **kind-client**, **kind-server1**, and **kind-server2**. + {{< /notice >}} + +## Deploy BookInfo application + +Install the BookInfo application on the clusters: + +```sh +export BOOKINFO_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/bookinfo/manifests +kubectl config use-context kind-client +kubectl apply -f $BOOKINFO_FILES/product/product.yaml +kubectl apply -f $BOOKINFO_FILES/product/product2.yaml +kubectl apply -f $BOOKINFO_FILES/product/details.yaml + +kubectl config use-context kind-server1 +kubectl apply -f $BOOKINFO_FILES/review/review-v2.yaml +kubectl apply -f $BOOKINFO_FILES/review/rating.yaml + +kubectl config use-context kind-server2 +kubectl apply -f $BOOKINFO_FILES/review/review-v3.yaml +kubectl apply -f $BOOKINFO_FILES/review/rating.yaml +``` + +## Deploy ClusterLink + +1. Create the fabric and peer certificates and deploy ClusterLink to the clusters: + + *Client cluster*: + + ```sh + clusterlink create fabric + + kubectl config use-context kind-client + clusterlink create peer-cert --name client + clusterlink deploy peer --name client --ingress=NodePort --ingress-port=30443 + + kubectl config use-context kind-server1 + clusterlink create peer-cert --name server1 + clusterlink deploy peer --name server1 --ingress=NodePort --ingress-port=30443 + + kubectl config use-context kind-server2 + clusterlink create peer-cert --name server2 + clusterlink deploy peer --name server2 --ingress=NodePort --ingress-port=30443 + ``` + + {{< notice note >}} + This tutorial uses NodePort to create an external access point for the kind clusters. + By default `deploy peer` creates an ingress of type LoadBalancer, + which is more suitable for Kubernetes clusters running in the cloud. + {{< /notice >}} + +2. Verify that the ClusterLink control and data plane components are running. + + It may take a few seconds for the deployments to be successfully created. + + ```sh + kubectl rollout status deployment cl-controlplane -n clusterlink-system + kubectl rollout status deployment cl-dataplane -n clusterlink-system + ``` + + {{% expand summary="Sample output" %}} + + ```sh + deployment "cl-controlplane" successfully rolled out + deployment "cl-dataplane" successfully rolled out + ``` + + {{% /expand %}} + +## Enable cross-cluster access + +In this step, we enable connectivity access for the BookInfo application + by connecting the productpage service (client) to the reviews-v2 service (server1) + and reviews-v3 (server2). We establish connections between the peers, export the reviews service on the server side, + import the reviews service on the client side, and create a policy to allow the connection. + +{{% readfile file="/static/files/tutorials/envsubst.md" %}} + + ```sh + kubectl config use-context kind-client + export SERVER1_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' server1-control-plane` + curl -s $BOOKINFO_FILES/clusterlink/peer-server1.yaml | envsubst | kubectl apply -f - + export SERVER2_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' server2-control-plane` + curl -s $BOOKINFO_FILES/clusterlink/peer-server2.yaml | envsubst | kubectl apply -f - + kubectl apply -f $BOOKINFO_FILES/clusterlink/import-reviews.yaml + kubectl apply -f $BOOKINFO_FILES/clusterlink/allow-policy.yaml + + kubectl config use-context kind-server1 + export CLIENT_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' client-control-plane` + curl -s $BOOKINFO_FILES/clusterlink/peer-client.yaml | envsubst | kubectl apply -f - + kubectl apply -f $BOOKINFO_FILES/clusterlink/export-reviews.yaml + kubectl apply -f $BOOKINFO_FILES/clusterlink/allow-policy.yaml + + kubectl config use-context kind-server2 + export CLIENT_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' client-control-plane` + curl -s $BOOKINFO_FILES/clusterlink/peer-client.yaml | envsubst | kubectl apply -f - + kubectl apply -f $BOOKINFO_FILES/clusterlink/export-reviews.yaml + kubectl apply -f $BOOKINFO_FILES/clusterlink/allow-policy.yaml + ``` + +## BookInfo test + +To run the BookInfo application use a Firefox web browser to connect the productpage microservice: + + ```sh + kubectl config use-context kind-client + firefox http://$CLIENT_IP:30001/productpage + firefox http://$CLIENT_IP:30002/productpage + ``` + +{{< notice note >}} +By default, a round robin policy is set. +{{< /notice >}} + +## Apply privileged access policy + +In the previous steps, an unprivileged access policy was set to allow connectivity. +To enforce high-priority policy use the `PrivilegedAccessPolicy` CR. +In this example, we enforce that the productpage service can access only reviews-v3 from server2, +and deny all services from server1: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl config use-context kind-client +kubectl apply -f $BOOKINFO_FILES/clusterlink/deny-server1-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: PrivilegedAccessPolicy +metadata: + name: deny-from-server1 +spec: + action: deny + from: + - workloadSelector: {} + to: + - workloadSelector: { + matchLabels: { + peer.clusterlink.net/name: server1 + } + } +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +To remove the privileged access policy use the following command: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl delete -f $BOOKINFO_FILES/clusterlink/deny-server1-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: PrivilegedAccessPolicy +metadata: + name: deny-from-server1 +spec: + action: deny + from: + - workloadSelector: {} + to: + - workloadSelector: { + matchLabels: { + peer.clusterlink.net/name: server1 + } + } +" | kubectl delete -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +For more details regarding policy configuration, see [policies documentation][]. + +## Apply random load-balancing policy + +To apply a random load-balancing policy on connection to reviews import: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl config use-context kind-client +kubectl apply -f $BOOKINFO_FILES/clusterlink/import-reviews-lb-random.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: reviews + namespace: default +spec: + port: 9080 + sources: + - exportName: reviews + exportNamespace: default + peer: server1 + - exportName: reviews + exportNamespace: default + peer: server2 + lbScheme: random + +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +## Apply static load balancing policy + +To apply a static policy that selects the first peer in the sources array and uses the other peer for failover cases, + use the following: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl config use-context kind-client +kubectl apply -f $BOOKINFO_FILES/clusterlink/import-reviews-lb-static.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: reviews + namespace: default +spec: + port: 9080 + sources: + - exportName: reviews + exportNamespace: default + peer: server1 + - exportName: reviews + exportNamespace: default + peer: server2 + lbScheme: static + +" | kubectl apply -f - + +{{% /tab %}} +{{< /tabpane >}} + +## Apply round robin load-balancing policy + +To apply a round robin load-balancing policy (which is used by default) to the connection to reviews import: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl config use-context kind-client +kubectl apply -f $BOOKINFO_FILES/clusterlink/import-reviews.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: reviews + namespace: default +spec: + port: 9080 + sources: + - exportName: reviews + exportNamespace: default + peer: server1 + - exportName: reviews + exportNamespace: default + peer: server2 + lbScheme: round-robin + +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +## Cleanup + +1. Delete the `kind` clusters: + + ```sh + kind delete cluster --name=client + kind delete cluster --name=server1 + kind delete cluster --name=server2 + ``` + +2. Remove the tutorial directory: + + ```sh + cd .. && rm -rf bookinfo-tutorial + ``` + +3. Unset the environment variables: + + ```sh + unset BOOKINFO_FILES CLIENT_IP SERVER1_IP SERVER2_IP + ``` + + +[Istio BookInfo application]: https://istio.io/latest/docs/examples/bookinfo/ +[policies documentation]: {{< relref "../../concepts/policies/_index.md" >}} +[kind installation guide]: https://kind.sigs.k8s.io/docs/user/quick-start +[kind]: https://kind.sigs.k8s.io/ diff --git a/website/content/en/docs/v0.4/tutorials/iperf/index.md b/website/content/en/docs/v0.4/tutorials/iperf/index.md new file mode 100644 index 000000000..af835993d --- /dev/null +++ b/website/content/en/docs/v0.4/tutorials/iperf/index.md @@ -0,0 +1,288 @@ +--- +title: iPerf3 +description: Running basic connectivity between iPerf3 applications across two sites using ClusterLink +--- + +In this tutorial we'll establish iPerf3 connectivity between two kind clusters using ClusterLink. +The tutorial uses two kind clusters: + +1) Client cluster - runs ClusterLink along with an iPerf3 client. +2) Server cluster - runs ClusterLink along with an iPerf3 server. + +## Install ClusterLink CLI + +{{% readfile file="/static/files/tutorials/cli-installation.md" %}} + +## Initialize clusters + +In this tutorial we set up a local environment using [kind][]. + You can skip this step if you already have access to existing clusters, just be sure to + set KUBECONFIG accordingly. + +To setup two kind clusters: + +1. Install kind using [kind installation guide][]. +1. Create a directory for all the tutorial files: + + ```sh + mkdir iperf3-tutorial + ``` + +1. Open two terminals in the tutorial directory and create a kind cluster in each terminal: + + *Client cluster*: + + ```sh + cd iperf3-tutorial + kind create cluster --name=client + ``` + + *Server cluster*: + + ```sh + cd iperf3-tutorial + kind create cluster --name=server + ``` + + {{< notice note >}} + kind uses the prefix `kind`, so the name of created clusters will be **kind-client** and **kind-server**. + {{< /notice >}} + +1. Setup `KUBECONFIG` on each terminal to access the cluster: + + *Client cluster*: + + ```sh + kubectl config use-context kind-client + cp ~/.kube/config $PWD/config-client + export KUBECONFIG=$PWD/config-client + ``` + + *Server cluster*: + + ```sh + kubectl config use-context kind-server + cp ~/.kube/config $PWD/config-server + export KUBECONFIG=$PWD/config-server + ``` + +{{< notice tip >}} +You can run the tutorial in a single terminal and switch access between the clusters +using `kubectl config use-context kind-client` and `kubectl config use-context kind-server`. +{{< /notice >}} + +## Deploy iPerf3 client and server + +Install iPerf3 (client and server) on the clusters: + +*Client cluster*: + +```sh +export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/iperf3/testdata/manifests +kubectl apply -f $TEST_FILES/iperf3-client/iperf3-client.yaml +``` + +*Server cluster*: + +```sh +export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/iperf3/testdata/manifests +kubectl apply -f $TEST_FILES/iperf3-server/iperf3.yaml +``` + +## Deploy ClusterLink + +{{% readfile file="/static/files/tutorials/deploy-clusterlink.md" %}} + +## Enable cross-cluster access + +In this step, we enable connectivity access between the iPerf3 client and server. + For each step, you have an example demonstrating how to apply the command from a + file or providing the complete custom resource (CR) associated with the command. + +{{% readfile file="/static/files/tutorials/envsubst.md" %}} + +### Set-up peers + +{{% readfile file="/static/files/tutorials/peer.md" %}} + +### Export the iPerf server endpoint + +In the server cluster, export the iperf3-server service: + +*Server cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/export-iperf3.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Export +metadata: + name: iperf3-server + namespace: default +spec: + port: 5000 +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +### Set-up import + +In the client cluster, import the iperf3-server service from the server cluster: + +*Client cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/import-iperf3.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: iperf3-server + namespace: default +spec: + port: 5000 + sources: + - exportName: iperf3-server + exportNamespace: default + peer: server +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +### Set-up access policies + +Create access policies on both clusters to allow connectivity: + +*Client cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/allow-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +{{% readfile file="/static/files/tutorials/allow-all-policy.md" %}} + +{{% /tab %}} +{{< /tabpane >}} + +*Server cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/allow-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +{{% readfile file="/static/files/tutorials/allow-all-policy.md" %}} + +{{% /tab %}} +{{< /tabpane >}} + +For more details regarding policy configuration, see [policies][] documentation. + +## Test service connectivity + +Test the iperf3 connectivity between the clusters: + +*Client cluster*: + +```sh +export IPERF3CLIENT=`kubectl get pods -l app=iperf3-client -o custom-columns=:metadata.name --no-headers` +kubectl exec -i $IPERF3CLIENT -- iperf3 -c iperf3-server --port 5000 +``` + +{{% expand summary="Sample output" %}} + +```sh +Connecting to host iperf3-server, port 5000 +[ 5] local 10.244.0.5 port 51666 connected to 10.96.46.198 port 5000 +[ ID] Interval Transfer Bitrate Retr Cwnd +[ 5] 0.00-1.00 sec 639 MBytes 5.36 Gbits/sec 0 938 KBytes +[ 5] 1.00-2.00 sec 627 MBytes 5.26 Gbits/sec 0 938 KBytes +[ 5] 2.00-3.00 sec 628 MBytes 5.26 Gbits/sec 0 938 KBytes +[ 5] 3.00-4.00 sec 635 MBytes 5.33 Gbits/sec 0 938 KBytes +[ 5] 4.00-5.00 sec 630 MBytes 5.29 Gbits/sec 0 938 KBytes +[ 5] 5.00-6.00 sec 636 MBytes 5.33 Gbits/sec 0 938 KBytes +[ 5] 6.00-7.00 sec 639 MBytes 5.36 Gbits/sec 0 938 KBytes +[ 5] 7.00-8.00 sec 634 MBytes 5.32 Gbits/sec 0 938 KBytes +[ 5] 8.00-9.00 sec 641 MBytes 5.39 Gbits/sec 0 938 KBytes +[ 5] 9.00-10.00 sec 633 MBytes 5.30 Gbits/sec 0 938 KBytes +- - - - - - - - - - - - - - - - - - - - - - - - - +[ ID] Interval Transfer Bitrate Retr +[ 5] 0.00-10.00 sec 6.19 GBytes 5.32 Gbits/sec 0 sender +[ 5] 0.00-10.00 sec 6.18 GBytes 5.31 Gbits/sec receiver + +iperf Done. +``` + +{{% /expand %}} + +## Cleanup + +1. Delete the kind clusters: + *Client cluster*: + + ```sh + kind delete cluster --name=client + ``` + + *Server cluster*: + + ```sh + kind delete cluster --name=server + ``` + +1. Remove the tutorial directory: + + ```sh + cd .. && rm -rf iperf3-tutorial + ``` + +1. Unset the environment variables: + + *Client cluster*: + + ```sh + unset KUBECONFIG TEST_FILES IPERF3CLIENT + ``` + + *Server cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + +[kind]: https://kind.sigs.k8s.io/ +[kind installation guide]: https://kind.sigs.k8s.io/docs/user/quick-start +[policies]: {{< relref "../../concepts/policies/_index.md" >}} diff --git a/website/content/en/docs/v0.4/tutorials/nginx/index.md b/website/content/en/docs/v0.4/tutorials/nginx/index.md new file mode 100644 index 000000000..06da58408 --- /dev/null +++ b/website/content/en/docs/v0.4/tutorials/nginx/index.md @@ -0,0 +1,268 @@ +--- +title: nginx +description: Running basic connectivity between nginx server and client across two clusters using ClusterLink. +--- + +In this tutorial, we'll establish connectivity across clusters using ClusterLink to access a remote nginx server. +The tutorial uses two kind clusters: + +1) Client cluster - runs ClusterLink along with a client. +2) Server cluster - runs ClusterLink along with a nginx server. + +## Install ClusterLink CLI + +{{% readfile file="/static/files/tutorials/cli-installation.md" %}} + +## Initialize clusters + +This tutorial uses [kind][] as a local Kubernetes environment. + You can skip this step if you already have access to existing clusters, just be sure to + set KUBECONFIG accordingly. + +To setup two kind clusters: + +1. Install kind using [kind installation guide][]. +1. Create a directory for all the tutorial files: + + ```sh + mkdir nginx-tutorial + ``` + +1. Open two terminals in the tutorial directory and create a kind cluster in each terminal: + + *Client cluster*: + + ```sh + cd nginx-tutorial + kind create cluster --name=client + ``` + + *Server cluster*: + + ```sh + cd nginx-tutorial + kind create cluster --name=server + ``` + + {{< notice note >}} + kind uses the prefix `kind`, so the name of created clusters will be **kind-client** and **kind-server**. + {{< /notice >}} + +1. Setup `KUBECONFIG` on each terminal to access the cluster: + + *Client cluster*: + + ```sh + kubectl config use-context kind-client + cp ~/.kube/config $PWD/config-client + export KUBECONFIG=$PWD/config-client + ``` + + *Server cluster*: + + ```sh + kubectl config use-context kind-server + cp ~/.kube/config $PWD/config-server + export KUBECONFIG=$PWD/config-server + ``` + +{{< notice tip >}} +You can run the tutorial in a single terminal and switch access between the clusters +using `kubectl config use-context kind-client` and `kubectl config use-context kind-server`. +{{< /notice >}} + +## Deploy nginx client and server + +Setup the ```TEST_FILES``` variable, and install nginx on the server cluster. + +*Client cluster*: + +```sh +export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/nginx/testdata +``` + +*Server cluster*: + +```sh +export TEST_FILES=https://raw.githubusercontent.com/clusterlink-net/clusterlink/main/demos/nginx/testdata +kubectl apply -f $TEST_FILES/nginx-server.yaml +``` + +## Deploy ClusterLink + +{{% readfile file="/static/files/tutorials/deploy-clusterlink.md" %}} + +## Enable cross-cluster access + +In this step, we enable access between the client and server. + For each step, you have an example demonstrating how to apply the command from a + file or providing the complete custom resource (CR) associated with the command. + +{{% readfile file="/static/files/tutorials/envsubst.md" %}} + +### Set-up peers + +{{% readfile file="/static/files/tutorials/peer.md" %}} + + +### Export the nginx server endpoint + +In the server cluster, export the nginx server service: + +*Server cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/export-nginx.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Export +metadata: + name: nginx + namespace: default +spec: + port: 80 +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +### Set-up import + +In the client cluster, import the nginx service from the server cluster: + +*Client cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/import-nginx.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +```sh +echo " +apiVersion: clusterlink.net/v1alpha1 +kind: Import +metadata: + name: nginx + namespace: default +spec: + port: 80 + sources: + - exportName: nginx + exportNamespace: default + peer: server +" | kubectl apply -f - +``` + +{{% /tab %}} +{{< /tabpane >}} + +### Set-up access policies + +Create access policies on both clusters to allow connectivity: + +*Client cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/allow-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +{{% readfile file="/static/files/tutorials/allow-all-policy.md" %}} + +{{% /tab %}} +{{< /tabpane >}} + +*Server cluster*: + +{{< tabpane text=true >}} +{{% tab header="File" %}} + +```sh +kubectl apply -f $TEST_FILES/clusterlink/allow-policy.yaml +``` + +{{% /tab %}} +{{% tab header="Full CR" %}} + +{{% readfile file="/static/files/tutorials/allow-all-policy.md" %}} + +{{% /tab %}} +{{< /tabpane >}} + +For more details regarding policy configuration, see [policies][] documentation. + +## Test service connectivity + +Test the connectivity between the clusters with a batch job of the ```curl``` command: + +*Client cluster*: + +```sh +kubectl apply -f $TEST_FILES/nginx-job.yaml +``` + +Verify the job succeeded: + +```sh +kubectl logs jobs/curl-nginx-homepage +``` + +{{% readfile file="/static/files/tutorials/nginx/nginx-output.md" %}} + +## Cleanup + +1. Delete the kind clusters: + *Client cluster*: + + ```sh + kind delete cluster --name=client + ``` + + *Server cluster*: + + ```sh + kind delete cluster --name=server + ``` + +1. Remove the tutorial directory: + + ```sh + cd .. && rm -rf nginx-tutorial + ``` + +1. Unset the environment variables: + *Client cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + + *Server cluster*: + + ```sh + unset KUBECONFIG TEST_FILES + ``` + +[kind]: https://kind.sigs.k8s.io/ +[kind installation guide]: https://kind.sigs.k8s.io/docs/user/quick-start +[policies]: {{< relref "../../concepts/policies/_index.md" >}}