diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b96240b7..b51c4296 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -28,7 +28,8 @@ jobs: - name: Install dependencies and build run: | - cd ${{ matrix.repo }} + cd modules && make all + cd ../${{ matrix.repo }} echo "Installing packages in $(pwd)" npm ci echo "Building $(pwd)" diff --git a/Makefile b/Makefile index 208904cc..5b35ebae 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ .PHONY: lint lint: vale sync - vale canary-checker/docs - vale mission-control/docs + vale canary-checker/docs --glob='!**/{README,CHANGELOG,readme,security,SECURITY,CONTRIBUTING,benchmark,development,LICENSE}.md' + vale mission-control/docs --glob='!**/{README,CHANGELOG,readme,security,SECURITY,CONTRIBUTING,benchmark,development,LICENSE}.md' markdownlint mission-control/docs markdownlint canary-checker/docs diff --git a/canary-checker/docs/comparisons/index.md b/canary-checker/docs/comparisons/index.md index b23e0446..0db49073 100644 --- a/canary-checker/docs/comparisons/index.md +++ b/canary-checker/docs/comparisons/index.md @@ -2,5 +2,5 @@ title: Comparisons sidebar_position: 4 sidebar_custom_props: - icon: material-symbols-light:text-compare-outline + icon: compare --- diff --git a/canary-checker/docs/concepts/expressions/index.md b/canary-checker/docs/concepts/expressions/index.md index 585e2ca1..b2a9281d 100644 --- a/canary-checker/docs/concepts/expressions/index.md +++ b/canary-checker/docs/concepts/expressions/index.md @@ -1,7 +1,7 @@ --- title: Expressions sidebar_custom_props: - icon: hugeicons:code + icon: code --- canary-checker can be extended using expressions in 3 ways: diff --git a/canary-checker/docs/concepts/metrics.mdx b/canary-checker/docs/concepts/metrics.mdx index b0ada568..5d5f2be1 100644 --- a/canary-checker/docs/concepts/metrics.mdx +++ b/canary-checker/docs/concepts/metrics.mdx @@ -1,7 +1,7 @@ --- title: Metrics sidebar_custom_props: - icon: clarity:dashboard-line + icon: dashboard-line --- diff --git a/canary-checker/docs/concepts/secret-management.md b/canary-checker/docs/concepts/secret-management.md index 971cf477..8b2c8812 100644 --- a/canary-checker/docs/concepts/secret-management.md +++ b/canary-checker/docs/concepts/secret-management.md @@ -1,7 +1,7 @@ --- title: Env Vars sidebar_custom_props: - icon: stash:search-box-light + icon: shield-lock sidebar_position: 1 --- diff --git a/canary-checker/docs/examples/index.mdx b/canary-checker/docs/examples/index.mdx index 5a67659d..1d5f201b 100644 --- a/canary-checker/docs/examples/index.mdx +++ b/canary-checker/docs/examples/index.mdx @@ -3,7 +3,7 @@ title: Examples sidebar_position: 3 hide_sidebar: true sidebar_custom_props: - icon: stash:graduation-cap-light + icon: learning --- import DocCardList from '@theme/DocCardList'; diff --git a/canary-checker/docs/partials/_domain.mdx b/canary-checker/docs/partials/_domain.mdx index ea95b278..4c8aeaa4 100644 --- a/canary-checker/docs/partials/_domain.mdx +++ b/canary-checker/docs/partials/_domain.mdx @@ -1,4 +1,4 @@ Choose a routable `DOMAIN` for Mission Control - > See [Ingress](/reference/helm/mission-control#ingress) for more options on configuring the ingress including generating certs with cert-manager - >

See [Local Testing](../local-testing) for testing using a kind or minikube without a routable domain

+ > See [Ingress](/installation/self-hosted/ingress) for more options on configuring the ingress including generating certs with cert-manager + >

See [Local Testing](/installation/local-testing) for testing using a kind or minikube without a routable domain

diff --git a/canary-checker/docs/reference/1-kubernetes-resource.mdx b/canary-checker/docs/reference/1-kubernetes-resource.mdx index 41d0ea90..8984b077 100644 --- a/canary-checker/docs/reference/1-kubernetes-resource.mdx +++ b/canary-checker/docs/reference/1-kubernetes-resource.mdx @@ -184,15 +184,3 @@ When this canary is deleted, the **test** namespace is deleted and consequently - -
-
- -This example demonstrates how to test a Helm deployment in a virtual cluster. - -It creates a vcluster, installs the canary-checker helm chart, and then verifies the deployment is working using an http check. - -```yaml title="vcluster-canary-checker.yaml" file=/modules/canary-checker/fixtures/k8s/vcluster-canary-checker.yaml -``` -
-
\ No newline at end of file diff --git a/canary-checker/docs/reference/1-kubernetes.mdx b/canary-checker/docs/reference/1-kubernetes.mdx index fec51199..6720d15d 100644 --- a/canary-checker/docs/reference/1-kubernetes.mdx +++ b/canary-checker/docs/reference/1-kubernetes.mdx @@ -4,6 +4,7 @@ sidebar_position: 0 sidebar_custom_props: icon: k8s --- +import ReactMarkdown from 'react-markdown'; # Kubernetes @@ -15,9 +16,14 @@ The Kubernetes check performs requests on Kubernetes resources such as Pods to g + Failing checks are placed in this namespace, useful if you have shared namespaces. + + **NOTE:** this does not change the namespace of the resources being queried + }, {field: "ignore", description: "Ignore the specified resources from the fetched resources. Can be a glob pattern.", scheme: '[]glob'}, {field: "healthy", description: "Fail the check if any resources are unhealthy", scheme: 'bool'}, {field: "ready", description: "Fail the check if any resources are not ready", scheme: 'bool'}, diff --git a/canary-checker/docs/scripting/gotemplate.mdx b/canary-checker/docs/scripting/gotemplate.mdx index a7f5ca65..6a56e3a4 100644 --- a/canary-checker/docs/scripting/gotemplate.mdx +++ b/canary-checker/docs/scripting/gotemplate.mdx @@ -179,10 +179,12 @@ Reports whether a given object has a property with the given key, or whether a g ### jq -Filters an input object or list using the [jq](https://stedolan.github.io/jq/) language, as implemented by [gojq](https://github.com/itchyny/gojq). +The `jq` function filters input using the [jq query language](https://stedolan.github.io/jq/), implemented via [gojq](https://github.com/itchyny/gojq). -Any JSON datatype may be used as input (NOTE: strings are not JSON-parsed but passed in as is). If the expression results in multiple items (no matter if streamed or as an array) they are wrapped in an array. Otherwise a single item is returned (even if resulting in an array with a single contained element). +Input can be any valid JSON data type, strings are passed directly without JSON parsing. The function returns: +- A single value for single results +- An array for multiple results (whether streamed or array output) JQ filter expressions can be tested at [jqplay](https://jqplay.org/) See also: @@ -922,7 +924,7 @@ Adds all given operators. When one of the inputs is a floating-point number, the Returns the least integer value greater than or equal to a given floating-point number. This wraps Go's [`math.Ceil`](https://golang.org/pkg/math/#Ceil). -**Note:** the return value of this function is a `float64` so that the special-cases `NaN` and `Inf` can be returned appropriately. +**Note:** the return value of this function is a `float64` so that the cases of `NaN` and `Inf` can be returned appropriately. ```go {{ range (slice 5.1 42 "3.14" "0xFF" "NaN" "Inf" "-0") }}ceil {{ printf "%#v" . }} = {{ math.Ceil . }}{{"\n"}}{{ end }} @@ -948,7 +950,7 @@ Divide the first number by the second. Division by zero is disallowed. The resul Returns the greatest integer value less than or equal to a given floating-point number. This wraps Go's [`math.Floor`](https://golang.org/pkg/math/#Floor). -**Note:** the return value of this function is a `float64` so that the special-cases `NaN` and `Inf` can be returned appropriately. +**Note:** the return value of this function is a `float64` so that the cases of `NaN` and `Inf` can be returned appropriately. ```go {{ range (slice 5.1 42 "3.14" "0xFF" "NaN" "Inf" "-0") }}floor {{ printf "%#v" . }} = {{ math.Floor . }}{{"\n"}}{{ end }} @@ -1047,7 +1049,7 @@ Return the remainder from an integer division operation. Returns the nearest integer, rounding half away from zero. -**Note:** the return value of this function is a `float64` so that the special-cases `NaN` and `Inf` can be returned appropriately. +**Note:** the return value of this function is a `float64` so that the cases of `NaN` and `Inf` can be returned appropriately. ```go {{ range (slice -6.5 5.1 42.9 "3.5" 6.5) }}round {{ printf "%#v" . }} = {{ math.Round . }}{{"\n"}}{{ end }} @@ -1635,7 +1637,7 @@ broken Return the number of _runes_ (Unicode code-points) contained within the input. This is similar to the built-in `len` function, but `len` counts the length in _bytes_. The length of an input containing multi-byte code-points should therefore be measured with `strings.RuneCount`. -Inputs will first be converted to strings, and multiple inputs are concatenated. +Inputs are first converted to strings, and multiple inputs are concatenated. This wraps Go's [`utf8.RuneCountInString`](https://golang.org/pkg/unicode/utf8/#RuneCountInString) function. @@ -1840,10 +1842,7 @@ It is{{ if not $t.IsDST }} not{{ end }} daylight savings time. Parses a timestamp defined by the given layout. This wraps [`time.Parse`](https://golang.org/pkg/time/#Parse). -A number of pre-defined layouts are provided as constants, defined -[here](https://golang.org/pkg/time/#pkg-constants). - -Just like [`time.Now`](#now), this is usually used in conjunction with other functions. +A number of pre-defined layouts are provided as [constants](https://golang.org/pkg/time/#pkg-constants) _Note: In the absence of a time zone indicator, `time.Parse` returns a time in UTC._ @@ -1952,15 +1951,14 @@ Return the local system's time zone offset, in seconds east of UTC. Create a version 1 UUID (based on the current MAC address and the current date/time). -Use [`uuid.V4`](#v4) instead in most cases. - +_Note:_ [`uuid.V4`](#v4) is recommended instead ```go {{ uuid.V1 }} // 4d757e54-446d-11e9-a8fa-72000877c7b0 ``` ### V4 -Create a version 4 UUID (randomly generated). +Create a version 4 UUID This function consumes entropy. diff --git a/canary-checker/docs/troubleshooting.mdx b/canary-checker/docs/troubleshooting.mdx index 6e09ea46..295ce8ba 100644 --- a/canary-checker/docs/troubleshooting.mdx +++ b/canary-checker/docs/troubleshooting.mdx @@ -1,7 +1,7 @@ --- title: Troubleshooting sidebar_custom_props: - icon: material-symbols-light:troubleshoot + icon: troubleshoot --- import Install from '@site/docs/snippets/_install.mdx' diff --git a/canary-checker/docs/types.md b/canary-checker/docs/types.md index fef6efd7..7cfaa76d 100644 --- a/canary-checker/docs/types.md +++ b/canary-checker/docs/types.md @@ -3,7 +3,7 @@ hide_title: true title: Common Types sidebar_position: 3 sidebar_custom_props: - icon: fluent:library-16-regular + icon: library --- import Types from '@site/docs/snippets/\_types.md' diff --git a/canary-checker/package.json b/canary-checker/package.json index e83ed1c2..a015a19e 100644 --- a/canary-checker/package.json +++ b/canary-checker/package.json @@ -15,7 +15,7 @@ "@docusaurus/core": "^3.7.0", "@docusaurus/plugin-client-redirects": "^3.7.0", "@docusaurus/preset-classic": "^3.7.0", - "@flanksource/icons": "^1.0.24", + "@flanksource/icons": "^1.0.34", "@floating-ui/react": "^0.26.28", "@mdx-js/react": "^3.0.0", "ansi-to-html": "^0.7.2", diff --git a/common/snippets/_resource-selector.md b/common/snippets/_resource-selector.md new file mode 100644 index 00000000..e3536c00 --- /dev/null +++ b/common/snippets/_resource-selector.md @@ -0,0 +1,144 @@ +--- +title: Resource Selectors +sidebar_position: 2 +sidebar_custom_props: + icon: stash:search-box-light +--- + +# Resource Selectors + +Resource Selectors are used in multiple places including: + +- Attaching components to a topology +- Creating relationships between configs and configs/components +- Finding resources to run health checks or playbooks on + +| Field | Description | Scheme | Required | +| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- | +| `id` | ID of the component | `string` | No | +| `name` | Name of the component/config | `string` | No | +| `namespace` | Select resources in this namespace only, if empty find resources in all namespaces | `string` | No | +| `types` | Match any of the types specified | `[]string` | No | +| `statuses` | Match any of the statuses specified | `[]string` | No | +| `labelSelector` | Kubernetes Style Label Selector | [LabelSelector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) | No | +| `fieldSelector` | Kubernetes Style Field Selector Property fields of the component in kubernetes format (or database columns: owner, topology_id, parent_id) | [FieldSelector](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/) | No | +| `agent` | Select resources created on this agent, Defaults to `local` | `uuid`, `{name}`, `local` or `all` | No | +| `cache` | Cache settings to use for the results, expensive selectors or selectors that are used often should be cached for longer periods. Defaults to `max-age=10m` | `no-cache`, `no-store` or `max-age={duration}` | No | +| `search` | Search for resources via key value pairs using parsing expression grammar | `#search` | No | + +## Search + +The query syntax is `field1=value1 field2>value2 field3=value3* field4=*value4`. `*` is for prefix and suffix matching. + +Supported operators: + +| Operator | Syntax | Types | +| -------- | -------------------------------- | --------------------- | +| `=` | `field=value` | `string` `int` `json` | +| `!=` | `field!=value` | `string` `int` `json` | +| `*` | `field=*value` or `field=value*` | `string` `int` | +| `>` `<` | `field>value` or `fieldnow-24h + + - name: All components updated between a specific interval + selectors: + - search: updated_at>2024-10-10 updated_at<2024-10-17 + + - name: Component with name httpbin-service + # Not giving any key will do a name lookup (ie name=httpbin-service) + selectors: + - search: httpbin-service + + - name: Components with label cluster + # JSON lookups are also supported + selectors: + - search: labels.cluster=prod + + - name: Link configs which have logistics-api image + configs: + - search: config.spec.template.spec.containers[0].name=docker.io/example/logistics-api:latest +``` diff --git a/common/src/components/Helm.jsx b/common/src/components/Helm.jsx index d19a0871..1e46a895 100644 --- a/common/src/components/Helm.jsx +++ b/common/src/components/Helm.jsx @@ -55,6 +55,7 @@ export default function Helm({ repoName = "flanksource", chart = "mission-control", namespace = "mission-control", + mode = "tabs", createNamespace = true, createRepo = true, wait = true, @@ -70,6 +71,48 @@ export default function Helm({ const [cli, setCli] = useState(generateCli( repo, repoName, chart, namespace, createNamespace, createRepo, wait, state, valueFile, args)) + + var flux = + {createNamespace && `apiVersion: v1 +kind: Namespace +metadata: +name: ${namespace} +--- +` || ""} + {createRepo && `apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: +name: ${repoName} +namespace: ${namespace} +spec: +interval: 5m0s +url: ${repo} +--- +` || ""} + {`apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: +name: ${chart} +namespace: ${namespace} +spec: +chart: +spec: +chart: ${chart} +sourceRef: + kind: HelmRepository + name: ${repoName} + namespace: ${namespace} +interval: 1m +`} + {valueFile || values && "values:\n"} + {valueFile && valueFile.replace(/^/gm, ' ')} + {values && Object.keys(values).map((k) => { + return ` ${k}: ${values[k]}\n` + }).join("")} + ; + + + return <> {/*
@@ -123,63 +166,40 @@ export default function Helm({ })}
*/} - - - - {cli} - - + {mode == 'tabs' && + + + + {cli} + - - - {createNamespace && `apiVersion: v1 -kind: Namespace -metadata: - name: ${namespace} ---- -` || ""} - {createRepo && `apiVersion: source.toolkit.fluxcd.io/v1 -kind: HelmRepository -metadata: - name: ${repoName} - namespace: ${namespace} -spec: - interval: 5m0s - url: ${repo} ---- -` || ""} - {`apiVersion: helm.toolkit.fluxcd.io/v2 -kind: HelmRelease -metadata: - name: ${chart} - namespace: ${namespace} -spec: - chart: - spec: - chart: ${chart} - sourceRef: - kind: HelmRepository - name: ${repoName} - namespace: ${namespace} - interval: 1m - `} - {valueFile || values && "values:\n"} - {valueFile && valueFile.replace(/^/gm, ' ')} - {values && Object.keys(values).map((k) => { - return ` ${k}: ${values[k]}\n` - }).join("")} - - - + + + + + + {flux} + + + } + + {mode == "helm" && + + {cli} + + } + + {mode == "flux" && flux} {schema && } {!schema && (chart == "mission-control" || chart == "mission-control-agent") && + <>See < Link to={`/reference/helm/${chart}`}>values.yaml } diff --git a/common/src/components/Icon.jsx b/common/src/components/Icon.jsx index 25750d50..03bd17df 100644 --- a/common/src/components/Icon.jsx +++ b/common/src/components/Icon.jsx @@ -32,8 +32,8 @@ export default function Icon({ name, height = 22, className, url, children, ...p name ="carbon:lightning" } - if (name =="tutorial") { - name = "stash:graduation-cap-light" + if (name == "tutorial") { + name = "learning" } diff --git a/common/src/theme/DocCardList/index.js b/common/src/theme/DocCardList/index.js index b5aef3f6..25b68d26 100644 --- a/common/src/theme/DocCardList/index.js +++ b/common/src/theme/DocCardList/index.js @@ -22,11 +22,14 @@ export default function DocCardList(props) { return } - const filteredItems = filterDocCardListItems(items); + const filteredItems = filterDocCardListItems(items).filter(i => !(i.customProps && i.customProps.category)); return ( <> {props.items.map((item, index) => { + if (item.customProps && item.customProps.category) { + return null; + } return }) } diff --git a/mission-control-chart b/mission-control-chart index b3b2ae10..33c83ea9 160000 --- a/mission-control-chart +++ b/mission-control-chart @@ -1 +1 @@ -Subproject commit b3b2ae1067d1406296e9352aa4231621394a93d0 +Subproject commit 33c83ea9f051d785e745e5d3d9bc8b7551cd4fea diff --git a/mission-control/blog/control-plane-testing b/mission-control/blog/control-plane-testing deleted file mode 120000 index 221ee191..00000000 --- a/mission-control/blog/control-plane-testing +++ /dev/null @@ -1 +0,0 @@ -../../canary-checker/docs/tutorials/control-plane-testing \ No newline at end of file diff --git a/mission-control/blog/control-plane-testing/basic-canary.yaml b/mission-control/blog/control-plane-testing/basic-canary.yaml new file mode 100644 index 00000000..6ced146b --- /dev/null +++ b/mission-control/blog/control-plane-testing/basic-canary.yaml @@ -0,0 +1,47 @@ +apiVersion: canaries.flanksource.com/v1 +kind: Canary +metadata: + name: control-plane-tests + namespace: control-plane-tests +spec: + schedule: "@every 1h" + kubernetesResource: + - name: helm-release-postgres-check + description: "Deploy postgresql via HelmRelease" + waitFor: + timeout: 1m + display: + template: |+ + Helm release created: {{ .health | toYAML }} + staticResources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: HelmRepository + metadata: + name: bitnami + spec: + type: oci + interval: 1h + url: oci://registry-1.docker.io/bitnamicharts + resources: + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: postgresql + spec: + chart: + spec: + chart: postgresql + sourceRef: + kind: HelmRepository + name: bitnami + interval: 5m + values: + auth: + username: admin + password: qwerty123 + database: exampledb + primary: + persistence: + enabled: true + size: 8Gi + diff --git a/mission-control/blog/control-plane-testing/basic-run.svg b/mission-control/blog/control-plane-testing/basic-run.svg new file mode 100644 index 00000000..b76370f4 --- /dev/null +++ b/mission-control/blog/control-plane-testing/basic-run.svg @@ -0,0 +1,31 @@ + + + +16:25:05.279 WRN /Users/moshe/go/src/github.com/flanksource/canary-checker/fixtures/minimal/canary-checker.properties does not exist +16:25:05.279 WRN --db not configured +16:25:05.281 INF (k8s) Using kubeconfig /Users/moshe/.kube/config +16:25:05.289 INF Checking http_pass.yaml, 1 checks found +16:25:06.711 INF (http-pass.http-deprecated-endpoint) PASS duration=279 +16:25:06.711 INF (http-pass.http-minimal-check) PASS duration=286 +16:25:06.711 INF (http-pass.http-param-tests) PASS duration=275 +16:25:06.711 INF (http-pass.http-expr-tests) PASS duration=284 code=200, age=1883h41m33.579006s +16:25:06.711 INF (http-pass.http-headers) PASS duration=269 +16:25:06.711 INF 5 passed, 0 failed in 1s + diff --git a/mission-control/docs/installation/saas/fully-hosted.mdx b/mission-control/blog/control-plane-testing/canary-checker.properties similarity index 100% rename from mission-control/docs/installation/saas/fully-hosted.mdx rename to mission-control/blog/control-plane-testing/canary-checker.properties diff --git a/mission-control/blog/control-plane-testing/custom-canary.yaml b/mission-control/blog/control-plane-testing/custom-canary.yaml new file mode 100644 index 00000000..30f8fdb8 --- /dev/null +++ b/mission-control/blog/control-plane-testing/custom-canary.yaml @@ -0,0 +1,61 @@ + +apiVersion: canaries.flanksource.com/v1 +kind: Canary +metadata: + name: control-plane-tests + namespace: control-plane-tests +spec: + schedule: "@every 1m" + kubernetesResource: + - name: helm-release-postgres-check + namespace: default + description: "Deploy postgresql via HelmRelease" + staticResources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: HelmRepository + metadata: + name: bitnami + spec: + type: oci + interval: 1h + url: oci://registry-1.docker.io/bitnamicharts + resources: + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: postgresql + namespace: default + spec: + chart: + spec: + chart: postgresql + sourceRef: + kind: HelmRepository + name: bitnami + namespace: control-plane-tests + interval: 5m + values: + auth: + username: admin + password: qwerty123 + database: exampledb + primary: + persistence: + enabled: true + size: 8Gi + checks: + - postgres: + - name: postgres schemas check + url: "postgres://$(username):$(password)@postgresql.default.svc:5432/exampledb?sslmode=disable" + username: + value: admin + password: + value: qwerty123 + # Since we just want to check if database is responding, + # a SELECT 1 query should suffice + query: SELECT 1 + + checkRetries: + delay: 15s + interval: 10s + timeout: 5m diff --git a/mission-control/blog/control-plane-testing/flux.yaml b/mission-control/blog/control-plane-testing/flux.yaml new file mode 100644 index 00000000..0841c275 --- /dev/null +++ b/mission-control/blog/control-plane-testing/flux.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: control-plane-tests +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bitnami + namespace: control-plane-tests +spec: + type: oci + interval: 1h + url: oci://registry-1.docker.io/bitnamicharts +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: postgresql +spec: + chart: + spec: + chart: postgresql + sourceRef: + kind: HelmRepository + name: bitnami + namespace: control-plane-tests + version: "*" + interval: 1h + values: + auth: + database: my_database + password: qwerty123 + username: admin + primary: + persistence: + enabled: true + size: 8Gi diff --git a/mission-control/blog/control-plane-testing/index.mdx b/mission-control/blog/control-plane-testing/index.mdx new file mode 100644 index 00000000..e3d17bd0 --- /dev/null +++ b/mission-control/blog/control-plane-testing/index.mdx @@ -0,0 +1,207 @@ +--- +title: Synthetic Infrastructure Testing With Canary Checker and Flux +description: Walkthrough of sythentic Infrastructure Testing for Kubernetes +slug: infrastructure-testing-with-canary-checker-and-flux +authors: [moshe,yash] +tags: [synthetic testing, flux, helm, canary-checker] +hide_table_of_contents: false +--- + + +import Install from '@site/docs/snippets/_install.mdx' + +# Control Plane Testing + + +Deploying applications with Kubernetes is easier than ever, yet developers face increasing complexity. + +Kubernetes simplifies deployment, but with it comes a labyrinth of potential issues. From resource conflicts to version incompatibilities, a failure in one component can cascade. Understanding application health through metric models like **RED** (Requests, Errors, Duration) and **USE** (Utilization, Saturation, Errors) isn't always enough. Latent errors might only surface during deployment or scaling. + + + +For example, consider deploying a stateful PostgreSQL database via Flux on AWS. Problems could arise, including: + + + + + +* Tools like `helm template` and `helm lint` can validate chart rendering and syntax, but they don't guarantee compatibility with a specific Kubernetes version or the operators running on the cluster. +* `ct install` on a `kind` or simulated cluster can verify API compatibility and ensure all resources and operators work correctly in ideal conditions. +* Deploying to a staging environment can help catch issues before they reach production, but this approach doesn't detect capacity, performance or latent errors that only surface under load. + + +Control plane testing can help improve resilience by continuously redeploying workloads, ensuring there is enough capacity within the system and that all operators and external dependencies are working correctly. + +Canary checker is a kubernetes-native test platform that continuously runs tests using 30+ check styles against your workloads. In this tutorial, we use it to continuously verify the ability to provision and run stateful workloads in a cluster. + + + +The [`kubernetesResource`](/guide/canary-checker/reference/kubernetes-resource) check creates kubernetes resources based on the provided manifests & perform checks on them, it has 5 lifecycle stages: + +## Lifecycle + +

+ +* **Apply Static Resources** + Applies all `staticResources` that are required for all tests to pass e.g. namespaces, secrets, etc.. +* **Apply Resources** + Applies all the workloads defined in `resources` +* **Wait** - Using the parameters defined in `waitFor`, wait for the resources to be ready using [is-healthy](https://github.com/flanksource/is-healthy) +* **Run Checks** - Run all the `checks` against the workloads +* **Cleanup** - Delete all the `resources` that were created during the test. + +

+ +## Tutorial + +### Prerequisites + + +:::info Prerequisites +To follow this tutorial, you need: + +- A Kubernetes cluster +- [FluxCD](https://fluxcd.io/) installed + +::: + + +1. Define the workload under test + + Before you can create a canary you should start with a working example of a resource, in this example we use a `HelmRelease` to deploy a postgres database. + + ```yaml file=flux.yaml + ``` + + Once you have verified the helm release is working on its own, you can then begin building the control plane test using `canary-checker`. + + + +1. Install the `canary-checker` binary + + + :::info Helm Installation + This tutorial uses the CLI for faster feedback, in production we recommend installing `canary-checker` as an operator using the [helm chart](https://canarychecker.io/getting-started) or as part of the full Mission Control [platform](/installation/self-hosted/getting-started). +1. Next create a `Canary` CustomResourceDefinition (CRD) using the `kubernetesResource` check type, the layout of the canary is as follows: + + ```yaml title=basic-canary.yaml file=template.yaml + ``` +

+ + Using the workload defined in step 1, the check definition is as follows: + + ```yaml title=basic-canary.yaml file=basic-canary.yaml + ``` +

+ +1. Run the test locally using `canary-checker run basic-canary.yaml` +

+ + 18:01:52.745 INF (k8s) Using kubeconfig /Users/moshe/.kube/config + 18:01:52.749 INF Checking basic-canary.yaml, 1 checks found + 18:01:55.209 INF (control-plane-tests) HelmRelease/control-plane-tests/postgresql (created) +kustomized + 18:02:21.072 INF (control-plane-tests.helm-release-postgres-check) PASS duration=28321 Helm release created: + control-plane-tests/HelmRelease/postgresql: + health: healthy + message: Helm install succeeded for release control-plane-tests/postgresql.v1 with chart postgresql@16.2.2 + ready: true + status: InstallSucceeded + control-plane-tests/HelmRepository/bitnami: + health: unknown + ready: true + 18:02:21.073 INF 1 passed, 0 failed in 28s + + + +

+

And if you run `kubectl get events` you should see:

+ + + LAST SEEN TYPE REASON OBJECT MESSAGE + 26m Normal ChartPullSucceeded helmchart/control-plane-tests-postgresql pulled 'postgresql' chart with version '16.2.2' + 26m Normal Scheduled pod/postgresql-0 Successfully assigned control-plane-tests/postgresql-0 to ip-10-0-4-167.eu-west-1.compute.internal + 26m Normal Pulled pod/postgresql-0 Container image "docker.io/bitnami/postgresql:17.2.0-debian-12-r0" already present on machine + 26m Normal Created pod/postgresql-0 Created container postgresql + 26m Normal Started pod/postgresql-0 Started container postgresql + 26m Warning Unhealthy pod/postgresql-0 Readiness probe failed: 127.0.0.1:5432 - rejecting connections + 26m Warning Unhealthy pod/postgresql-0 Readiness probe failed: 127.0.0.1:5432 - no response + 26m Normal Killing pod/postgresql-0 Stopping container postgresql + 113s Normal Scheduled pod/postgresql-0 Successfully assigned control-plane-tests/postgresql-0 to ip-10-0-4-167.eu-west-1.compute.internal + 112s Normal Pulled pod/postgresql-0 Container image "docker.io/bitnami/postgresql:17.2.0-debian-12-r0" already present on machine + 112s Normal Created pod/postgresql-0 Created container postgresql + 112s Normal Started pod/postgresql-0 Started container postgresql + 96s Normal Killing pod/postgresql-0 Stopping container postgresql + 26m Normal HelmChartCreated helmrelease/postgresql Created HelmChart/control-plane-tests/control-plane-tests-postgresql with SourceRef 'HelmRepository/control-plane-tests/bitnami' + 26m Normal SuccessfulCreate statefulset/postgresql create Pod postgresql-0 in StatefulSet postgresql successful + 26m Normal InstallSucceeded helmrelease/postgresql Helm install succeeded for release control-plane-tests/postgresql.v1 with chart postgresql@16.2.2 + 26m Normal UninstallSucceeded helmrelease/postgresql Helm uninstall succeeded for release control-plane-tests/postgresql.v1 with chart postgresql@16.2.2 + 26m Normal HelmChartDeleted helmrelease/postgresql deleted HelmChart 'control-plane-tests/control-plane-tests-postgresql' + 116s Normal HelmChartCreated helmrelease/postgresql Created HelmChart/control-plane-tests/control-plane-tests-postgresql with SourceRef 'HelmRepository/control-plane-tests/bitnami' + 113s Normal SuccessfulCreate statefulset/postgresql create Pod postgresql-0 in StatefulSet postgresql successful + 101s Normal InstallSucceeded helmrelease/postgresql Helm install succeeded for release control-plane-tests/postgresql.v1 with chart postgresql@16.2.2 + 96s Warning CalculateExpectedPodCountFailed poddisruptionbudget/postgresql Failed to calculate the number of expected pods: found no controllers for pod "postgresql-0" + 96s Normal UninstallSucceeded helmrelease/postgresql Helm uninstall succeeded for release control-plane-tests/postgresql.v1 with chart postgresql@16.2.2 + 95s Normal HelmChartDeleted helmrelease/postgresql deleted HelmChart 'control-plane-tests/control-plane-tests-postgresql' + +2. Add custom check + + By default `kubernetesResource` only checks if the resource is ready. However, you can add custom checks to validate the resource further. + + For example, you can validate the PostgreSQL database is running and accepting connections, with a custom `postgres` check: + + + ```yaml + apiVersion: canaries.flanksource.com/v1 + kind: Canary + #... + spec: + kubernetesResource: + - #... + checks: + - postgres: + - name: postgres schemas check + url: "postgres://$(username):$(password)@postgresql.default.svc:5432/exampledb?sslmode=disable" + username: + value: admin + password: + value: qwerty123 + # Since we just want to check if database is responding, + # a SELECT 1 query should suffice + query: SELECT 1 + ``` + + :::warning Accessing variables + This example uses the `$(username)` and `$(password)` syntax to access the `username` and `password` variables hardcoded in the `checks` section, but in a production setting, reference secrets using [`valueFrom`](/reference/env-var) + ::: + + :::tip Alternatives to custom checks + Instead of using a custom check you can also add a standard helm test pod to your chart or define a canary inside the chart to automatically include health checks for all workloads. + ::: + + +3. The final test looks like: + + + ```yaml file=custom-canary.yaml + ``` + +## Conclusion + +Continuous testing of your control plane is essential for maintaining resilient infrastructure at scale. By implementing continuous testing with tools like Canary Checker, Flux, and Helm, you can: + +- Catch breaking changes early +- Validate infrastructure changes +- Ensure security compliance +- Maintain platform stability +- Reduce incident recovery time + +This proactive approach helps catch issues before they impact production environments and affect your users. + +## References + +- [kubernetesResource](/guide/canary-checker/reference/kubernetes-resource) Canary +- [ helm lint](https://helm.sh/docs/helm/helm_lint/) +- [ helm test](https://helm.sh/docs/helm/helm_test/) +- [ ct install](https://github.com/helm/chart-testing/blob/main/doc/ct_install.md) +- [ Flux HelmRelease](https://fluxcd.io/flux/components/helm/helmreleases/) +- [ Helm Chart Tests](https://helm.sh/docs/topics/chart_tests/) diff --git a/mission-control/blog/control-plane-testing/template.yaml b/mission-control/blog/control-plane-testing/template.yaml new file mode 100644 index 00000000..cb2bb4a2 --- /dev/null +++ b/mission-control/blog/control-plane-testing/template.yaml @@ -0,0 +1,24 @@ +apiVersion: canaries.flanksource.com/v1 +kind: Canary +metadata: + name: control-plane-tests + namespace: control-plane-tests +spec: + # how often to run the test + schedule: "@every 1h" + //highlight-next-line + kubernetesResource: # this is type of canary we are executing, canary-checker has many more + - name: helm-release-postgres-check + waitFor: + # The time to wait for the resources to be ready before considering the test a failure + timeout: 10m + staticResources: + //highlight-next-line + - # A list of resources that should be created once only and re-used across multiple tests + resources: + //highlight-next-line + - # A list of resources to be created every time the check runs + display: + # optional Go text template to display the results of the check + template: |+ + Helm release created: {{ .health | toYAML }} diff --git a/mission-control/docs/guide/config-db/scrapers/aws.md b/mission-control/docs/guide/config-db/scrapers/aws.md index 6ef98ad4..f2a13a4f 100644 --- a/mission-control/docs/guide/config-db/scrapers/aws.md +++ b/mission-control/docs/guide/config-db/scrapers/aws.md @@ -11,7 +11,7 @@ This config type is used to scrape information about your AWS infrastructure. :::tip Registry -The registry has an [AWS](/registry/aws) Helm chart that provides a pre-configured Scraper with some common defaults +The Mission Control Registry includes an [AWS](/integration/aws) Helm chart that provides a pre-configured Scraper with common defaults ::: diff --git a/mission-control/docs/guide/config-db/scrapers/kubernetes.mdx b/mission-control/docs/guide/config-db/scrapers/kubernetes.mdx index 5e6cee50..95ac26ab 100644 --- a/mission-control/docs/guide/config-db/scrapers/kubernetes.mdx +++ b/mission-control/docs/guide/config-db/scrapers/kubernetes.mdx @@ -8,8 +8,7 @@ sidebar_custom_props: # Kubernetes :::tip Helm Chart - -See the [Kubernetes](/integrations/kubernetes/catalog) Helm chart that provides a pre-configured Scraper and Topology with some common defaults. +See the [Kubernetes](/integrations/kubernetes/getting-started) Helm chart that provides a pre-configured Scraper and Topology with some common defaults. ::: The `kubernetes` scraper collects all of the resources and events in a Kubernetes cluster, and then watches for changes. @@ -21,7 +20,7 @@ The `kubernetes` scraper collects all of the resources and events in a Kubernete | Field | Description | Scheme | | ------------ | ---------------------------------------------------------------------------- | -------------------------------------------- | | `logLevel` | Specify the level of logging. | `string` | -| `schedule` | Specify the interval to scrape in cron format. Defaults to every 60 minutes. | `string` | +| `schedule` | Specify the interval to scrape in cron format. Defaults to every 15 minutes. | `string` | | `retention` | Settings for retaining changes, analysis and scraped items | [`Retention`](/guide/config-db/concepts/retention) | | `kubernetes` | Specifies the list of Kubernetes configurations to scrape. | [`[]Kubernetes`](#kubernetes) | @@ -40,18 +39,7 @@ The `kubernetes` scraper collects all of the resources and events in a Kubernete description: 'Include resources only from this namespace', scheme: 'string' }, - { - field: 'useCache', - description: - 'Whether to use cache or not when fetching resources. Default: false', - scheme: 'bool' - }, - { - field: 'allowIncomplete', - description: - "whether to fail scrape if all api resources weren't fetched successfully", - scheme: 'bool' - }, + { field: 'scope', description: @@ -75,12 +63,6 @@ The `kubernetes` scraper collects all of the resources and events in a Kubernete description: 'Resources to be included e.g `status.Phase=Running`', scheme: 'string' }, - { - field: 'maxInflight', - description: - 'Concurrency level when resources are fetched one at a time.`', - scheme: 'int' - }, { field: 'kubeconfig', description: 'Kubeconfig to connect to the cluster', @@ -88,7 +70,7 @@ The `kubernetes` scraper collects all of the resources and events in a Kubernete }, { field: 'watch', - description: 'Kubeconfig to connect to the cluster', + description: 'List of resources to watch for real-time changes', scheme: '[`[]WatchSelector`]()' }, { @@ -111,7 +93,26 @@ The `kubernetes` scraper collects all of the resources and events in a Kubernete ### Event -`Kubernetes::Event` resources are mapped to config changes. Events can be very verbose so they can be excluded or their severity level changed: +`Kubernetes::Event` resources are mapped to config changes. Events can be verbose so they can be excluded or their severity level changed: + + +```yaml +spec: + kubernetes: + - event: + exclusions: + reason: + - SuccessfulCreate + - Created + - DNSConfigForming + severityKeywords: + error: + - failed + - error + warn: + - backoff + - nodeoutofmemory +``` | Field | Description | Scheme | Required | | ------------------ | ------------------------------------------------------------------------------------------ | --------------------------------------- | -------- | @@ -132,6 +133,8 @@ This allows near-real-time updates to your kubernetes catalogs with the flexibil This feature is enabled by default but can be disabled by setting the property `watch.disable=true`. +Kubernetes events automatically trigger a re-scrape of involved objects, so even though not all resources are watched by default, the vast majority of changes still reflect in real-time due to associated events that fire at the same time as the update. + #### Watch Selector ```yaml title="custom-watch-resources.yaml" @@ -397,3 +400,13 @@ spec: ## Performance +The scraper is highly reliant on the performance of the Kubernetes API server, and as such, it is recommended to run the scraper from within the cluster or as close as possible to the control pane. + + +:::warning Overloading the API Server +It is possible to overload the API server with too many requests, to reduce the load on the API Server: + +* Decentralize the scraper by running it on an agent, from-inside each cluster rather than remotely. +* Increase the `schedule` to `1h` or more, real-time updates still be recorded by Kubernetes events and informers. +* Filter out and exclude resources and events that have a high churn or verbosity + diff --git a/mission-control/docs/guide/config-db/tutorials/index.mdx b/mission-control/docs/guide/config-db/tutorials/index.mdx index 093f1df0..7cfc0b8d 100644 --- a/mission-control/docs/guide/config-db/tutorials/index.mdx +++ b/mission-control/docs/guide/config-db/tutorials/index.mdx @@ -1,5 +1,5 @@ --- title: Tutorials sidebar_custom_props: - icon: stash:graduation-cap-light + icon: learning --- diff --git a/mission-control/docs/guide/notifications/concepts/inhibitions.mdx b/mission-control/docs/guide/notifications/concepts/inhibitions.mdx deleted file mode 100644 index 672b70cb..00000000 --- a/mission-control/docs/guide/notifications/concepts/inhibitions.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Inhibitions -sidebar_custom_props: - icon: shield ---- - -Notification inhibitions allow you to intelligently suppress certain notifications when other related alerts are already firing. This helps reduce notification fatigue and keeps communication channels focused on root causes rather than symptoms. - -In modern distributed systems, components are highly interconnected. When a critical component fails, it often causes failures in dependent components. Without inhibition, this can lead to: - -1. **Alert Storms** - Dozens or hundreds of alerts flooding your notification channels -2. **Alert Fatigue** - Team members becoming desensitized to alerts due to the volume -3. **Root Cause Obscurity** - The original cause of the issue being buried among symptom alerts -4. **Distracted Troubleshooting** - Time wasted investigating symptoms rather than root causes - -Notification inhibition solves these problems by intelligently suppressing secondary alerts while ensuring you're notified about the primary issue. - -```yaml file=/modules/mission-control/fixtures/notifications/deployment-with-inhibition.yaml -``` - -In this example: -- Notifications are configured for `config.unhealthy` events -- When a Kubernetes Pod becomes unhealthy and triggers a notification -- Any subsequent unhealthy notifications from related Deployment or ReplicaSet will be inhibited -- The inhibition traverses up 2 levels of relationships (Pod -> Deployment -> ReplicaSet) - -## How Inhibition Works - -When a resource triggers a notification, Mission Control checks if any related resources (based on configured inhibition rules) have already sent a notification within the **repeat interval** window. If a related notification is found, the current notification is suppressed and marked as "inhibited" in the system. - -:::important Relationship-Based Inhibition -Mission Control's inhibition system works **exclusively through resource relationships**. For a notification to inhibit another notification, the resources must have an established relationship in the system. - -For example: -- A Deployment is related to its ReplicaSets and Pods, so notifications about these resources can inhibit each other -- A Node is related to the Pods running on it -- One Deployment is generally not related to another Deployment, so they cannot inhibit each other - -This differs from systems like Alertmanager where inhibitions can be set up based on labels without requiring explicit relationships between the underlying resources. -::: - - -## Viewing Inhibited Notifications - -Inhibited notifications are not lost - they are recorded in the system with the status `inhibited` and include a reference to the parent notification that caused the inhibition. -This provides a complete audit trail while preventing notification spam. - -You can view inhibited notifications in the Mission Control UI, allowing you to understand the full scope of an incident. diff --git a/mission-control/docs/guide/permissions/index.mdx b/mission-control/docs/guide/permissions/index.mdx index 02224aca..7c5cc5b0 100644 --- a/mission-control/docs/guide/permissions/index.mdx +++ b/mission-control/docs/guide/permissions/index.mdx @@ -6,15 +6,15 @@ hide_title: true # hide_table_of_contents: true pagination_prev: guide/topology/index sidebar_custom_props: - icon: user + icon: shield-user --- ## Permission In Mission Control, Permission provides a flexible and robust security model that combines two powerful approaches: -Role-Based Access Control (RBAC) and Attribute-Based Access Control (ABAC). +Role-Based Access Control (RBAC) and Attribute-Based Access Control (ABAC). This unified permission system allows you to implement precise and granular access policies. -For instance, while RBAC can control whether a user can execute any playbooks in the system, +For instance, while RBAC can control whether a user can execute any playbooks in the system, more refined ABAC policies allow you to specify exactly which playbooks specific users or teams can access. Permissions are manageable via the UI as well as via CRDs. @@ -138,4 +138,4 @@ Permissions applied to the group are then inherited by the encompassed subjects. scheme: '`[]string`', }, ]} -/> \ No newline at end of file +/> diff --git a/mission-control/docs/guide/playbooks/actions/gitops.mdx b/mission-control/docs/guide/playbooks/actions/gitops.mdx index 44d41038..d74e3a55 100644 --- a/mission-control/docs/guide/playbooks/actions/gitops.mdx +++ b/mission-control/docs/guide/playbooks/actions/gitops.mdx @@ -9,51 +9,18 @@ import Templating from '@site/docs/reference/playbooks/context.mdx' # GitOps Action -GitOps action allows you to make commits and push to a remote repository. +The GitOps action creates commits and pushes changes to a git repository. -```yaml title="edit-kubernetes-manifests-gitops.yaml" -apiVersion: mission-control.flanksource.com/v1 -kind: Playbook -metadata: - name: edit-kubernetes-manifests-gitops -spec: - title: 'Edit Kustomize Resource' - icon: 'flux' - parameters: - - default: 'chore: update $(.config.type)/$(.config.name)' - label: 'Commit Message' - name: 'commit_message' - - default: '$(.config.config | toJSON | neat | json | toYAML)' - label: 'Changes' - name: 'yamlInput' - properties: - size: 'large' - type: 'code' - configs: - - labelSelector: 'kustomize.toolkit.fluxcd.io/name' - actions: - - name: 'Create Pull Request With Changes' - gitops: - repo: - url: 'https://github.com/flanksource/flux' - connection: 'connection://default/github' - base: 'main' - branch: 'edit-manifest-$(random.Alpha 8)' - commit: - author: '$(.user.name)' - email: '$(.user.email)' - message: '$(.params.commit_message)' - pr: - title: '$(.params.commit_message)' - patches: - - path: 'prod/kustomization.yaml' - yq: | - select( - .kind=="$(.config.config | jq `.kind`)" and - .metadata.name=="$(.config.config | jq `.metadata.name`)" - ) |= $(.params.yamlInput | yaml | toJSON) +Common use cases: + +* Enable developers to provision and manage Infrastructure as Code through a GUI interface, while DevOps and Platform engineers maintain their preferred tooling workflow +* Implement guardrail-driven access for developers to make infrastructure changes +* Use native support for Flux and Kustomization to automatically identify git repositories and files using `originAnnotations` + +```yaml title="edit-kubernetes-manifests-gitops.yaml" file=/modules/generated/playbooks/kustomize-edit.yaml ``` + +These env vars are extracted by traversing up the Flux Kustomization and Git Repository that created the config resource the playbook runs against. -These env vars are extracted by traversing up the Flux Kustomization and Git Repository that created the config resource the playbook is running against. We'll see this in detail shortly. - -To tag all the resources with that annotation, you'll need to add `originAnnotations` to the buildMetadata field as shown below: +To tag all the resources with that annotation, add `originAnnotations` to the `buildMetadata` field as shown below:
diff --git a/mission-control/docs/guide/playbooks/examples/index.mdx b/mission-control/docs/guide/playbooks/examples/index.mdx index c7d888eb..65f025d3 100644 --- a/mission-control/docs/guide/playbooks/examples/index.mdx +++ b/mission-control/docs/guide/playbooks/examples/index.mdx @@ -2,7 +2,7 @@ sidebar_position: 7 title: Examples sidebar_custom_props: - icon: stash:graduation-cap-light + icon: learning --- diff --git a/mission-control/docs/guide/topology/concepts/properties.mdx b/mission-control/docs/guide/topology/concepts/properties.mdx index b9b45cda..9e94b4d8 100644 --- a/mission-control/docs/guide/topology/concepts/properties.mdx +++ b/mission-control/docs/guide/topology/concepts/properties.mdx @@ -59,152 +59,4 @@ spec: This `config` object is used to find the config item to lookup a value from, if there are multiple matches, the first match is used. -### External Source Lookup - -Property values can be looked up from external sources (HTTP, SQL, Prometheus etc.) using the `lookup` field: - -```yaml title="order-api.yaml" -apiVersion: canaries.flanksource.com/v1 -kind: Topology -metadata: - name: order-api -spec: - components: - - name: Order API - properties: - - name: 3PL Status - lookup: - http: - - url: https://third-party-logistics.example.com/status - display: - expr: json.status - - name: Orders in past 24h - lookup: - postgres: - - connection: connection://orders/order-api-postgres - query: SELECT COUNT(*) FROM orders WHERE created_at > (NOW() - '24 hours'::INTERVAL) - display: - expr: results.rows[0].count -``` - -### Dynamically generating properties - -Properties' can also use `lookup` with `expr` field which returns a json object of `Property` or json list of `[]Property` - -```yaml title="order-api.yaml" -apiVersion: canaries.flanksource.com/v1 -kind: Topology -metadata: - name: order-api -spec: - components: - - name: Order API - properties: - - name: error_percentage - lookup: - postgresql: - - connection: connection://api/resources-postgres - query: SELECT key, value FROM resources, jsonb_each_text(labels) where id = '874a10a0-7728-4ee1-91fb-cc7dab0a6fb5' - display: - expr: | - dyn(results.rows).map(r, [ - {'name': r.key, 'text': r.value} - ]).toJSON() -``` - - -Properties' `expr` field can return separate properties for multiple components and they can then be joined using the component name - -```yaml title="order-api.yaml" -apiVersion: canaries.flanksource.com/v1 -kind: Topology -metadata: - name: test-topology-property-merge -spec: - schedule: "@every 10m" - components: - - name: RootComponents - type: virtual - icon: server - lookup: - http: - - url: https://httpbin.demo.aws.flanksource.com/status/200 - name: http-lookup - display: - expr: | - [ - { - 'name': 'component-a', - 'type': 'API', - 'properties': [{'name': 'error_percentage', 'min': 0, 'max': 100}, {'name': 'owner'}] - }, - { - 'name': 'component-b', - 'type': 'Frontend', - 'properties': [{'name': 'error_percentage', 'min': 0, 'max': 100}, {'name': 'owner'}] - }, - { - 'name': 'component-c', - 'type': 'Database', - 'properties': [{'name': 'error_percentage', 'min': 0, 'max': 100}, {'name': 'owner'}] - }, - ].toJSON() - properties: - # These get merged with the components - - name: error_percentage - lookup: - http: - - url: https://httpbin.demo.aws.flanksource.com/status/200 - name: error_percentage_lookup - display: - expr: | - [ - { - 'name': 'component-a', - 'properties': [{'name': 'error_percentage', 'value': 1}] - }, - { - 'name': 'component-b', - 'properties': [{'name': 'error_percentage', 'value': 10}] - }, - { - 'name': 'component-c', - 'properties': [{'name': 'error_percentage', 'value': 50}] - }, - ].toJSON() - # These also get merged with the components - - name: owner - lookup: - http: - - url: https://httpbin.demo.aws.flanksource.com/status/200 - name: owner_lookup - display: - expr: | - [ - { - 'name': 'component-a', - 'properties': [{'name': 'owner', 'text': 'team-a'}] - }, - { - 'name': 'component-b', - 'properties': [{'name': 'owner', 'text': 'team-b'}] - }, - { - 'name': 'component-c', - 'properties': [{'name': 'owner', 'text': 'team-b'}] - }, - ].toJSON() - # These are applied to all the components - - name: generic - lookup: - http: - - url: https://httpbin.demo.aws.flanksource.com/status/200 - name: generic_lookup - display: - expr: | - [ - {'name': 'company', 'text': 'Acme'}, - {'name': 'location', 'text': 'Mars'}, - ].toJSON() -``` - +### diff --git a/mission-control/docs/guide/topology/examples/index.mdx b/mission-control/docs/guide/topology/examples/index.mdx index 1209a27e..b169d831 100644 --- a/mission-control/docs/guide/topology/examples/index.mdx +++ b/mission-control/docs/guide/topology/examples/index.mdx @@ -1,6 +1,8 @@ --- sidebar_position: 100 title: Examples +sidebar_custom_props: + icon: learning --- diff --git a/mission-control/docs/guide/topology/lookups/index.mdx b/mission-control/docs/guide/topology/lookups/index.mdx index 636dccb3..33716516 100644 --- a/mission-control/docs/guide/topology/lookups/index.mdx +++ b/mission-control/docs/guide/topology/lookups/index.mdx @@ -1,6 +1,8 @@ --- title: Lookups sidebar_position: 3 +sidebar_custom_props: + icon: database-search-o --- diff --git a/mission-control/docs/installation/_aws_iam.mdx b/mission-control/docs/installation/_aws_iam.mdx index 2d97baf2..17bbd5ab 100644 --- a/mission-control/docs/installation/_aws_iam.mdx +++ b/mission-control/docs/installation/_aws_iam.mdx @@ -12,7 +12,7 @@ Depending on how you want to use Mission Control you need to create an IAM role
-You can also create a new policy with just the permissions required by Mission Control +You can also create a new policy with only the permissions required by Mission Control ```json title="iam-policy.json" { diff --git a/mission-control/docs/installation/local-testing.md b/mission-control/docs/installation/local-testing.md index 08f2a626..1999c816 100644 --- a/mission-control/docs/installation/local-testing.md +++ b/mission-control/docs/installation/local-testing.md @@ -1,6 +1,8 @@ --- title: Local Testing description: Run Mission Control Locally using minikube or kind +sidebar_custom_props: + icon: lab --- import Tabs from '@theme/Tabs'; @@ -42,7 +44,7 @@ nodes: protocol: TCP ``` -A single node cluster will be provisioned, hosting both the control plane and workloads. Configure the hostPort bindings onto free ports, in this case `8080` and `8443` are used. +A single node cluster is provisioned, hosting both the control plane and workloads. Configure the `hostPort` bindings onto free ports, in this case `8080` and `8443` Provision the kind cluster with @@ -60,9 +62,7 @@ kubectl get nodes -Install [nginx](https://github.com/kubernetes/ingress-nginx) ingress controller with: - -The Kubernetes Nginx Ingress Controller maintains a kind-compatible manifest - deploy this to the cluster using: +Install [ingress-nginx](https://github.com/kubernetes/ingress-nginx) controller with: ```bash kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/kind/deploy.yaml @@ -101,14 +101,14 @@ minikube addons enable ingress -[nip.io](http://nip.io) is simple wildcard DNS server that returns the ip provided in the host name, e.g. +[nip.io](http://nip.io) is a wildcard DNS server that returns the ip provided in the host name, e.g. ```bash ❯ nslookup 127.0.0.1.nip.io Address: 127.0.0.1 ``` -By using nip you will be able to access mission-control without any further networking / configuration setup. +By using nip you can access mission-control without any further networking / configuration setup. ```yaml title="values.yaml" global: @@ -155,7 +155,7 @@ See [values.yaml](/installation/helm#self-hosted) for more options. -The default username is `admin@local` and the password can be retrived with: +The default username is `admin@local` and the password can be retrieved with: ``` kubectl get secret mission-control-admin-password \ @@ -163,10 +163,10 @@ kubectl get secret mission-control-admin-password \ --template='{{.data.password | base64decode}}' ``` -You can then goto [https://127.0.0.1.nip.io:8443](https://127.0.0.1.nip.io:8443) to login. +You can then go to [https://127.0.0.1.nip.io:8443](https://127.0.0.1.nip.io:8443) to login. :::info Self-Signed Certificate -This example uses a self-signed certificate created by Nginx, We recommend using [cert-manager.io](https://cert-manager.io/). +This example uses a self-signed certificate created by nginx, We recommend using [cert-manager.io](https://cert-manager.io/). ::: @@ -193,6 +193,6 @@ and apply to the cluster with: kubectl apply -f canaries.yaml ``` -When you goto the [Health](https://127.0.0.1.nip.io:8443/health) tab you can then see the check running: +Navigate to the [Health](https://127.0.0.1.nip.io:8443/health) tab you can then see the check running: diff --git a/mission-control/docs/installation/saas/agent.mdx b/mission-control/docs/installation/saas/agent.mdx index cde39f37..8085c071 100644 --- a/mission-control/docs/installation/saas/agent.mdx +++ b/mission-control/docs/installation/saas/agent.mdx @@ -1,7 +1,12 @@ --- title: Agent Installation +sidebar_custom_props: + icon: server --- +import ReactMarkdown from 'react-markdown' +import Link from '@docusaurus/Link'; + :::info Prerequisites To install and run the Mission Control agent you need to have the following prerequisites: @@ -19,13 +24,16 @@ import OpenAPI from '@site/src/components/OpenAPI' The recommended way of installing an agent is generating the Helm/Flux install script on the UI: + + -1. Navigate to **Settings** --> **Agents** -2. Click on the button + +1. Navigate to {props.saas && app.flanksource.com/settings/agents}{!props.saas && <>SettingsAgents} - Enter the following: + +2. Click on the button, and enter in the dialog: * **clusterName** - * Toggle **Kubernetes** to automatically scrape the cluster the agent is installed in, you can skip this step and perform it later by installing the [chart](/integrations/kubernets/catalog) + * Toggle **Kubernetes** to automatically scrape the cluster the agent is installed in, you can skip this step and perform it later by installing the [chart](/integrations/kubernetes/catalog)

3. Click **Next** @@ -33,12 +41,11 @@ The recommended way of installing an agent is generating the Helm/Flux install s -4. Alternatively if you are installing the agent in multiple locations you can reuse the same token generated - - - + + ", "upstream.agent": "YOUR_LOCAL_NAME", @@ -48,37 +55,53 @@ The recommended way of installing an agent is generating the Helm/Flux install s }} /> + + + ", + "upstream.agent": "YOUR_LOCAL_NAME", + "upstream.username": "token", + "upstream.password": "", + "upstream.host": "" + }} + + /> + + -:::info Externalize the token -We recommend that the upstream token be stored separately and encrypted using sops or similar -1. Create a new secret called `mission-control-upstream` - title=secret.yaml - apiVersion: v1 - kind: Secret - metadata: - name: upstream - stringData: - UPSTREAM_HOST: "" - UPSTREAM_USER: token - UPSTREAM_PASSWORD: "" - AGENT_NAME: "YOUR_LOCAL_NAME" +

+:::info Encrypting the Token + We recommend that the upstream token be stored and encrypted using sops or similar +1. Create a new secret called `mission-control-upstream`: + ```yaml title=secret.yaml + apiVersion: v1 + kind: Secret + metadata: + name: mission-control-upstream + stringData: + UPSTREAM_HOST: "" + UPSTREAM_USER: token + UPSTREAM_PASSWORD: "" + AGENT_NAME: "YOUR_LOCAL_NAME" + ```

2. Update the chart values: - title=values.yaml + + ```yaml title=values.yaml upstream: createSecret: false secretName: mission-control-upstream + ``` ::: - -### values.yaml - - diff --git a/mission-control/docs/installation/saas/eks.mdx b/mission-control/docs/installation/saas/eks.mdx index 215bbedb..d92041f5 100644 --- a/mission-control/docs/installation/saas/eks.mdx +++ b/mission-control/docs/installation/saas/eks.mdx @@ -1,6 +1,8 @@ --- title: AWS EKS slug: installation/eks +sidebar_custom_props: + icon: aws-eks-cluster --- import Tabs from '@theme/Tabs' @@ -28,6 +30,19 @@ To install and run Mission Control you need to have the following prerequisites: }} /> + ## Next Steps -Install the [AWS](/registry/aws) registry chart to configure the AWS Scraper + + + + + + + + + + + + + diff --git a/mission-control/docs/installation/saas/getting-started.mdx b/mission-control/docs/installation/saas/getting-started.mdx index 1734a2ce..0985f610 100644 --- a/mission-control/docs/installation/saas/getting-started.mdx +++ b/mission-control/docs/installation/saas/getting-started.mdx @@ -7,30 +7,13 @@ sidebar_custom_props: --- +import Agent from './agent.mdx' import AgentToken from '@site/docs/partials/_agent_token.mdx' -export const toc = [{ - value: "Signup", - id: "signup", - level: 2, - }, - { - value: "Agent Installation", - id: "agent-installation", - level: 2, - }, - - { - value: "Technology Bundles", - id: "registry", - level: 2, - } - -] When using the Mission Control SaaS the agent based approach is recommended for ingesting data, The agent is headless installation of mission-control that caches data locally in a postgres database and replicates it to the SaaS. - + The agent based approach has the following benefits: @@ -53,10 +36,12 @@ See [Installation](/installation) for other deployment models including [Self Ho Organization details and members can be changed by going to [accounts.flanksource.com/organization](https://accounts.flanksource.com/organization)
or Clicking on **Manage Organization** when logged in ::: -4. Install an Agent -## Agent Installation +### Agent Installation +Next an agent needs to deployed to scrape resources and execute playbooks: + + diff --git a/mission-control/docs/installation/self-hosted/database.md b/mission-control/docs/installation/self-hosted/database.md index b871bf77..6b2ec960 100644 --- a/mission-control/docs/installation/self-hosted/database.md +++ b/mission-control/docs/installation/self-hosted/database.md @@ -1,6 +1,8 @@ --- title: Database description: Alternative methods for connecting to the db used for persistence +sidebar_custom_props: + icon: postgres --- Mission Control stores all state in a Postgres Database, by default a Postgres StatefulSet is created. diff --git a/mission-control/docs/installation/self-hosted/eks.mdx b/mission-control/docs/installation/self-hosted/eks.mdx index 91453258..5401f266 100644 --- a/mission-control/docs/installation/self-hosted/eks.mdx +++ b/mission-control/docs/installation/self-hosted/eks.mdx @@ -1,5 +1,7 @@ --- title: AWS EKS +sidebar_custom_props: + icon: aws-eks-cluster --- import AwsIam from "../_aws_iam.mdx" @@ -23,7 +25,20 @@ To install and run a self-hosted Mission Control on AWS EKS you need to have the }}/> - ## Next Steps -Install the [AWS](/registry/aws) registry chart to configure the AWS Scraper + + + + + + + + + + + + + + + diff --git a/mission-control/docs/installation/self-hosted/getting-started.mdx b/mission-control/docs/installation/self-hosted/getting-started.mdx index eb47a40d..73f7886e 100644 --- a/mission-control/docs/installation/self-hosted/getting-started.mdx +++ b/mission-control/docs/installation/self-hosted/getting-started.mdx @@ -3,74 +3,55 @@ title: Getting Started show_title: true sidebar_custom_props: icon: getting-started - +pagination_next: installation/self-hosted/database sidebar_position: 0 --- - import AdminPassword from '@site/docs/partials/_admin-password.mdx' import Helm from "@site/src/components/Helm" -export const toc = [ - { - value: 'Prerequisites', - id: 'prerequisites', - level: 2 - }, - { - value: 'Step 1: Install Helm Repository', - id: 'step-1-install-helm-repository', - level: 2 - }, - { - value: 'Step 2: Install Helm Chart', - id: 'helm-chart', - level: 2 - }, - { - value: 'Cert Manager', - id: 'cert-manager', - level: 3 - }, - { - value: 'Optional Steps', - id: 'optional-steps', - level: 2 - }, - { - value: 'Step 3: Configure Email (SMTP)', - id: 'smtp', - level: 3 - }, - { - value: 'Step 4: Single Sign On', - id: 'sso', - level: 3 - }, - { - value: 'Step 5: External Database', - id: '-database', - level: 3 - } -] - - - -This tutorial guides you through setting up and configuring a self-hosted Mission Control environment. + +Mission Control is an internal developer platform built for operations. Built with a "self-hosted first" approach, Mission Control gives you complete control over your deployment environment while offering enterprise-grade features. + +## Self-Hosted Deployment + +When you self-host Mission Control, you maintain full ownership of your infrastructure and data while enjoying the same robust feature set available in the SaaS offering. In fact, some advanced features are exclusively available in the self-hosted version. + + + +### Why Self-Host Mission Control? + +**Key Benefits** +- **Infrastructure Flexibility**: Deploy on your preferred infrastructure - whether on-premise, AWS, GCP, Azure, or any other Kubernetes environment +- **Data Sovereignty**: Keep all your monitoring and operational data within your own environment +- **Security Control**: Leverage your existing security practices and compliance frameworks +- **Cost Efficiency**: Optimize resource allocation and scaling based on your specific needs +- **Customization**: Tailor the platform to integrate with your specific infrastructure components + +**Implementation Considerations** +- Configuration and management of authentication (SSO) is required +- Database management responsibilities remain with your team +- Updates and maintenance follow standard Kubernetes patterns +


+ + + +This guide walks you through the complete setup process for your self-hosted Mission Control environment. + :::info Prerequisites To install and run Mission Control you need the following: - Kubernetes 1.26+ with an Ingress Controller - [cert-manager.io](https://cert-manager.io/docs/) or an existing TLS secret for ingress -- 1 - 2 CPUs and 6-8GB of Memory (2-4GB if using an external DB) +- 1 - 2 CPU's and 6-8GB of Memory (2-4GB if using an external DB) - Persistent Volumes with 20GB+ of storage or an external postgres database - (Optional) [prometheus operator](https://prometheus-operator.dev/) - (Optional) SMTP Server (For sending notifications and invites) ::: ---- 1. Choose a routable `DOMAIN` for Mission Control +

> See [Ingress](/reference/helm/mission-control#ingress) for more options on configuring the ingress including generating certs with cert-manager >

See [Local Testing](../local-testing) for testing using a kind or minikube without a routable domain

@@ -88,50 +69,17 @@ To install and run Mission Control you need the following: 2. Login @ https://DOMAIN/ -## Optional Steps - - - -An SMTP server is required for sending notifications, approvals, user invites and password resets. - -The format of `connection_uri` is `smtp|smtps://USER:PASS@host:PORT[?param=value]` - -Use `smtps` for implicit TLS sessions or `smtp` for explicit StartTLS/cleartext sessions. - -| Parameter | Description | Default | -| --- | --- | --- | -| `disable_starttls` | When using `smtp` scheme, set to `true` to allow cleartext sessions or `false` to enforce StartTLS | `false` | -| `skip_ssl_verify` | Set to `true` to allow self-signed TLS certificates or `false` to enforce certificate verification. Applies to both implicit and explicit TLS sessions | `false` | - +## Next Steps -```yaml title="values.yaml" -kratos: - kratos: - config: - courier: - smtp: - connection_uri: # smtp://user:pass@localhost:25 - from_address: noreply@ - from_name: Mission Control - # These headers will be passed in the SMTP conversation - # e.g. when using the AWS SES SMTP interface for cross-account sending. - headers: {} - # Identifier used in the SMTP HELO/EHLO command. - # Some SMTP relays require a unique identifier. - local_name: "" -``` + -See also [HTTP Webhooks](https://www.ory.sh/docs/kratos/self-hosted/email-http) for sending emails. - - + -See [SSO](./sso) + + - - + -See [Database](./database) to configure an external database such as AWS RDS or Google Cloud SQL, or to optimize the bundled PostgreSQL settings. - diff --git a/mission-control/docs/installation/self-hosted/oidc.mdx b/mission-control/docs/installation/self-hosted/oidc.mdx index bb1dd4b9..2316c84a 100644 --- a/mission-control/docs/installation/self-hosted/oidc.mdx +++ b/mission-control/docs/installation/self-hosted/oidc.mdx @@ -1,6 +1,8 @@ --- title: Single Sign On (SSO) slug: sso +sidebar_custom_props: + icon: jwt --- import Properties from '../_properties.mdx' @@ -71,17 +73,17 @@ See [Providers](https://www.ory.sh/docs/kratos/social-signin/overview) more deta ```

-5. Optionally, create a cel expression to map identities from the OIDC provider to a mission control role & team. +5. Optionally, create a cel expression to map identities from the OIDC provider to a mission control role & team. The following script maps all Azure users in the `SRE` group to the `admin` role and everyone else to the `viewer` role. ```yaml apiVersion: v1 - kind: ConfigMap - metadata: + kind: ConfigMap + metadata: name: azure-identity-mapper data: script: > - { + { "role": "sre" in identity.traits.groups ? "admin": "viewer" }.toJSON() ``` diff --git a/mission-control/docs/installation/self-hosted/smtp.mdx b/mission-control/docs/installation/self-hosted/smtp.mdx new file mode 100644 index 00000000..4cb19724 --- /dev/null +++ b/mission-control/docs/installation/self-hosted/smtp.mdx @@ -0,0 +1,36 @@ +--- +title: SMTP / Email +sidebar_custom_props: + icon: email +--- +An SMTP server is required for sending notifications, approvals, user invites and password resets. + +The format of `connection_uri` is `smtp|smtps://USER:PASS@host:PORT[?param=value]` + +Use `smtps` for implicit TLS sessions or `smtp` for explicit StartTLS/cleartext sessions. + +| Parameter | Description | Default | +| --- | --- | --- | +| `disable_starttls` | When using `smtp` scheme, set to `true` to allow cleartext sessions or `false` to enforce StartTLS | `false` | +| `skip_ssl_verify` | Set to `true` to allow self-signed TLS certificates or `false` to enforce certificate verification. Applies to both implicit and explicit TLS sessions | `false` | + + +```yaml title="values.yaml" +kratos: + kratos: + config: + courier: + smtp: + connection_uri: # smtp://user:pass@localhost:25 + from_address: noreply@ + from_name: Mission Control + # These headers will be passed in the SMTP conversation + # e.g. when using the AWS SES SMTP interface for cross-account sending. + headers: {} + # Identifier used in the SMTP HELO/EHLO command. + # Some SMTP relays require a unique identifier. + local_name: "" +``` + +See also [HTTP Webhooks](https://www.ory.sh/docs/kratos/self-hosted/email-http) for sending emails. + diff --git a/mission-control/docs/integrations/aws/getting-started.md b/mission-control/docs/integrations/aws/getting-started.md index 34c299dd..00be37b9 100644 --- a/mission-control/docs/integrations/aws/getting-started.md +++ b/mission-control/docs/integrations/aws/getting-started.md @@ -7,7 +7,7 @@ sidebar_custom_props: import Schema from '@site/modules/mission-control-registry/charts/aws/values.schema.json' -The AWS chart catalog [scraper](/guide/config-db/scrapers/aws) that: +Installs a [catalog scraper](/guide/config-db/scrapers/aws) that: - Scrapes AWS Resources and detects changes in the resource definition - Ingests changes from CloudTrail @@ -41,7 +41,7 @@ For Cost & Usage Reporting 1. Create a new connection for an [AWS Access Key](/integrations/aws/iam?type=accessKey) -1. Install the [mission-control-aws](https://artifacthub.io/packages/helm/flanksource/mission-control-aws) chart +1. Install the [mission-control-aws](https://artifacthub.io/packages/helm/flanksource/mission-control-aws) chart - Aggregrate Alarms + Aggregate Alarms diff --git a/mission-control/docs/reference/image-variants.md b/mission-control/docs/reference/image-variants.md index c2709021..f78e1dad 100644 --- a/mission-control/docs/reference/image-variants.md +++ b/mission-control/docs/reference/image-variants.md @@ -1,5 +1,7 @@ --- title: Image Variants +sidebar_custom_props: + icon: settings --- Canary checker comes with 2 image variants: diff --git a/mission-control/docs/reference/notifications/_notification.mdx b/mission-control/docs/reference/notifications/_notification.mdx index 0d5f6066..7ca3d7c2 100644 --- a/mission-control/docs/reference/notifications/_notification.mdx +++ b/mission-control/docs/reference/notifications/_notification.mdx @@ -60,11 +60,6 @@ description: "Group notifications that are in waiting stage based on labels, tags and attributes. Only applicable when `waitFor` is provided. See [Grouping attributes](../../guide/notifications/concepts/wait-for#grouping-notifications)", scheme: "[]string" }, - { - field: "inhibitions", - description: "Controls notification suppression for related resources. Uses the repeat interval as the window for suppression as well as the wait for period.", - scheme: "[[]NotificationInhibition](#notification-inhibition)" - }, { field: "title", description: "Channel dependent e.g. subject for email", @@ -105,36 +100,3 @@ :::info Single Recipient Only one recipient can be specified ::: - -## Notification Inhibition - - diff --git a/mission-control/docs/reference/permissions/index.mdx b/mission-control/docs/reference/permissions/index.mdx index 2b3bf1f1..638fa326 100644 --- a/mission-control/docs/reference/permissions/index.mdx +++ b/mission-control/docs/reference/permissions/index.mdx @@ -1,6 +1,8 @@ --- title: Permissions sidebar_position: 13 +sidebar_custom_props: + icon: shield-user --- diff --git a/mission-control/docs/reference/resource-selector.md b/mission-control/docs/reference/resource-selector.md deleted file mode 100644 index 0fc824d8..00000000 --- a/mission-control/docs/reference/resource-selector.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Resource Selectors -sidebar_position: 2 -sidebar_custom_props: - icon: stash:search-box-light ---- - -# Resource Selectors - -Resource Selectors are used in multiple places including: - -- Attaching components to a topology -- Creating relationships between configs and configs/components -- Finding resources to run health checks or playbooks on - -| Field | Description | Scheme | Required | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------- | -------- | -| `id` | ID of the component | `string` | No | -| `name` | Name of the component/config | `string` | No | -| `namespace` | Select resources in this namespace only, if empty find resources in all namespaces | `string` | No | -| `types` | Match any of the types specified | `[]string` | No | -| `statuses` | Match any of the statuses specified | `[]string` | No | -| `labelSelector` | Kubernetes Style Label Selector | [LabelSelector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) | No | -| `fieldSelector` | Kubernetes Style Field Selector Property fields of the component in kubernetes format (or database columns: owner, topology_id, parent_id) | [FieldSelector](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/) | No | -| `agent` | Select resources created on this agent, Defaults to `local` | `uuid`, `{name}`, `local` or `all` | No | -| `cache` | Cache settings to use for the results, expensive selectors or selectors that are are use very often should be cached for longer periods. Defaults to `max-age=10m` | `no-cache`, `no-store` or `max-age={duration}` | No | -| `search` | Search for resources via key value pairs using parsing expression grammar | `string` | No | - -## Search - -The query syntax is `field1=value1 field2>value2 field3=value3* field4=*value4`. `*` is for prefix and suffix matching. - -Supported operators: - -| Operator | Syntax | Types | -| -------- | -------------------------------- | --------------------- | -| `=` | `field=value` | `string` `int` `json` | -| `!=` | `field!=value` | `string` `int` `json` | -| `*` | `field=*value` or `field=value*` | `string` `int` | -| `>` `<` | `field>value` or `fieldnow-24h - - - name: All components updated between a specific interval - selectors: - - search: updated_at>2024-10-10 updated_at<2024-10-17 - - - name: Component with name httpbin-service - # Not giving any key will do a name lookup (ie name=httpbin-service) - selectors: - - search: httpbin-service - - - name: Components with label cluster - # JSON lookups are also supported - selectors: - - search: labels.cluster=prod - - - name: Link configs which have logistics-api image - configs: - - search: config.spec.template.spec.containers[0].name=docker.io/example/logistics-api:latest -``` diff --git a/mission-control/docs/reference/resource-selector.md b/mission-control/docs/reference/resource-selector.md new file mode 120000 index 00000000..d632687d --- /dev/null +++ b/mission-control/docs/reference/resource-selector.md @@ -0,0 +1 @@ +../snippets/_resource-selector.md \ No newline at end of file diff --git a/mission-control/docs/reference/types.md b/mission-control/docs/reference/types.md deleted file mode 100644 index a27944d2..00000000 --- a/mission-control/docs/reference/types.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -hide_title: true -title: Common Types -sidebar_position: 2 -sidebar_custom_props: - icon: fluent:library-16-regular ---- - -# Common Types - -This document provides a reference for common types used in the configuration and operation of the system. - -## Agent - -An agent can be specified using: - -- `local`: The primary mission control instance. -- `uuid`: The UUID of an agent. -- `name`: The name of an agent. -- `all`: Match all/any agents. - -## Cron - -``` -# ┌───────────── minute (0–59) -# │ ┌───────────── hour (0–23) -# │ │ ┌───────────── day of the month (1–31) -# │ │ │ ┌───────────── month (1–12) -# │ │ │ │ ┌───────────── day of the week (0–6) (Sunday to Saturday) -# │ │ │ │ │ -# │ │ │ │ │ -# │ │ │ │ │ - 0 * * * * -``` - -| Shortcut | Description | Equivalent | -| ------------------------------ | ---------------------------------------------------------- | ----------- | -| `@every` [Duration](#duration) | e.g., `@every 5m` | | -| `@yearly` (or `@annually`) | Run once a year at midnight of 1 January | `0 0 1 1 *` | -| `@monthly` | Run once a month at midnight of the first day of the month | `0 0 1 * *` | -| `@weekly` | Run once a week at midnight on Sunday | `0 0 * * 0` | -| `@daily` (or `@midnight`) | Run once a day at midnight | `0 0 * * *` | -| `@hourly` | Run once an hour at the beginning of the hour | `0 * * * *` | - -## Duration - -Valid time units are `s`, `m`, `h`, `d`, `w`, `y`. For example: - -- `1m15s` -- `1h5m` -- `23h` -- `1d8h` -- `1w6d8h` -- `19w0d8h` - -## Size - -Sizes are strings with a unit suffix, e.g., `100`, `100b`, `10mb`. Valid size units are `kb`, `mb`, `gb`, `tb`. - -## Icon - -One of the icons in the [flanksource-icons](https://github.com/flanksource/flanksource-icons/tree/main/svg) project or a URL to an image. - -e.g. - -- `kubernetes` -- `Kubernetes::Pod` -- `argo` -- `aws-ebs-volume` - -Use the picker below to search for icons: - - - -## Match Pattern - -Pattern matching supports the following operations: - -- `*` - Match anything -- `Added,Deleted` - Match either `Added` or `Deleted` -- `Added*`: Match anything starting with `Added`. -- `*Terminated`: Match anything ending with `Terminated`. -- `!PodCrashLooping`: Match everything except `PodCrashLooping`. diff --git a/mission-control/docs/reference/types.md b/mission-control/docs/reference/types.md new file mode 120000 index 00000000..00c9c871 --- /dev/null +++ b/mission-control/docs/reference/types.md @@ -0,0 +1 @@ +../snippets/_types.md \ No newline at end of file diff --git a/mission-control/package-lock.json b/mission-control/package-lock.json index e00e061e..20accccf 100644 --- a/mission-control/package-lock.json +++ b/mission-control/package-lock.json @@ -11,7 +11,7 @@ "@docusaurus/core": "3.7.0", "@docusaurus/plugin-client-redirects": "3.7.0", "@docusaurus/preset-classic": "3.7.0", - "@flanksource/icons": "^1.0.32", + "@flanksource/icons": "^1.0.34", "@floating-ui/react": "^0.26.28", "@iconify/react": "^5.1.0", "@mdx-js/react": "^3.0.0", @@ -3715,9 +3715,9 @@ } }, "node_modules/@flanksource/icons": { - "version": "1.0.32", - "resolved": "https://registry.npmjs.org/@flanksource/icons/-/icons-1.0.32.tgz", - "integrity": "sha512-0f6P+CBzcc90O16KUuPDeie1PBg91teeusFaXBLK+Dl+G9Hb4iF7mvkjnHSXy/daABY51Vh5H++GhDHl4Ni7NQ==", + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/@flanksource/icons/-/icons-1.0.34.tgz", + "integrity": "sha512-U9WVAOM06FfE+HtnmSkBzUi0QPn9M6In9Z2JzPi8IDQ4bpbV6747UbV9NOJ5LRbqdaOVKKxHfP9BLxsRU8gXBg==", "peerDependencies": { "react": "*" } @@ -24826,9 +24826,9 @@ "dev": true }, "@flanksource/icons": { - "version": "1.0.32", - "resolved": "https://registry.npmjs.org/@flanksource/icons/-/icons-1.0.32.tgz", - "integrity": "sha512-0f6P+CBzcc90O16KUuPDeie1PBg91teeusFaXBLK+Dl+G9Hb4iF7mvkjnHSXy/daABY51Vh5H++GhDHl4Ni7NQ==", + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/@flanksource/icons/-/icons-1.0.34.tgz", + "integrity": "sha512-U9WVAOM06FfE+HtnmSkBzUi0QPn9M6In9Z2JzPi8IDQ4bpbV6747UbV9NOJ5LRbqdaOVKKxHfP9BLxsRU8gXBg==", "requires": {} }, "@floating-ui/core": { diff --git a/mission-control/package.json b/mission-control/package.json index c9a7e60c..8e1db754 100644 --- a/mission-control/package.json +++ b/mission-control/package.json @@ -17,7 +17,7 @@ "@docusaurus/core": "3.7.0", "@docusaurus/plugin-client-redirects": "3.7.0", "@docusaurus/preset-classic": "3.7.0", - "@flanksource/icons": "^1.0.32", + "@flanksource/icons": "^1.0.34", "@floating-ui/react": "^0.26.28", "@iconify/react": "^5.1.0", "@mdx-js/react": "^3.0.0", diff --git a/mission-control/sidebars.js b/mission-control/sidebars.js index 6ffb2c23..94057df8 100644 --- a/mission-control/sidebars.js +++ b/mission-control/sidebars.js @@ -76,6 +76,9 @@ const sidebars = { { type: 'category', label: 'SaaS', + customProps: { + icon: 'cloud-done' + }, link: { type: 'doc', id: 'installation/saas/getting-started', @@ -88,12 +91,6 @@ const sidebars = { id: 'installation/saas/getting-started', }, - { - type: 'doc', - id: 'installation/saas/eks', - label: 'AWS EKS' - }, - { type: 'doc', id: 'installation/saas/agent', @@ -103,19 +100,43 @@ const sidebars = { { type: 'doc', id: 'installation/saas/kubectl', + customProps: { + icon: 'console' + }, label: 'Kubectl Access' }, + { + type: 'html', + value: '

' + }, + { + type: 'doc', + id: 'installation/saas/eks', + customProps: { + icon: 'aws-eks-cluster' + }, + label: 'AWS EKS' + }, + + ] }, { type: 'category', label: 'Self-Hosted', + customProps: { + icon: 'data-center' + }, + link: { + type: 'doc', + id: 'installation/self-hosted/getting-started', + }, items: [ { type: 'doc', id: 'installation/self-hosted/getting-started', - label: 'Installation' + label: 'Getting Started' }, { type: 'doc', @@ -128,6 +149,15 @@ const sidebars = { label: 'SSO (OIDC)' }, + { + type: 'doc', + id: 'installation/self-hosted/smtp', + label: 'Email' + }, + { + type: 'html', + value: '
' + }, { type: 'doc', id: 'installation/self-hosted/eks', diff --git a/mission-control/static/img/add-agent.png b/mission-control/static/img/add-agent.png new file mode 100644 index 00000000..16502afd Binary files /dev/null and b/mission-control/static/img/add-agent.png differ diff --git a/modules/Makefile b/modules/Makefile index 309768fd..89dda592 100644 --- a/modules/Makefile +++ b/modules/Makefile @@ -2,8 +2,6 @@ OS ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') ARCH ?= $(shell uname -m | sed 's/x86_64/amd64/') - - .PHONY: all all: sync manifests @@ -13,7 +11,7 @@ sync: .PHONY: manifests -manifests: .bin/helm +manifests: .bin/helm .bin/yq rm -rf rendered-manifests ./make.sh mission-control-registry/charts/playbooks-ai "--set slack.connection=connection://mission-control/slack --set global.llm_connection=connection://mission-control/anthropic" ./make.sh mission-control-registry/charts/playbooks-kubernetes @@ -26,3 +24,7 @@ manifests: .bin/helm .bin/helm: .bin wget -nv https://get.helm.sh/helm-v3.17.2-$(OS)-$(ARCH).tar.gz -O .bin/helm.tar.gz tar -xzf .bin/helm.tar.gz -C .bin --strip-components 1 + +.bin/yq: .bin + wget -nv https://github.com/mikefarah/yq/releases/download/v4.45.1/yq_$(OS)_$(ARCH) -O .bin/yq + chmod +x .bin/yq diff --git a/modules/canary-checker b/modules/canary-checker index 98f44997..9ecc3c78 160000 --- a/modules/canary-checker +++ b/modules/canary-checker @@ -1 +1 @@ -Subproject commit 98f4499797db768618f23c768b65b133bb70eb51 +Subproject commit 9ecc3c78d47ccf2d4a6f2a7c87d5b0c57529c659 diff --git a/modules/config-db b/modules/config-db index fa19e22d..2dac1134 160000 --- a/modules/config-db +++ b/modules/config-db @@ -1 +1 @@ -Subproject commit fa19e22ddf2934bc1e08ca1a15df6a9e4ae2263e +Subproject commit 2dac1134c5812bd3d6ab358698af5185281d88d7 diff --git a/modules/duty b/modules/duty index 739ee879..df4fb8cc 160000 --- a/modules/duty +++ b/modules/duty @@ -1 +1 @@ -Subproject commit 739ee8791110a4d39add553f9ecd0219cc1c2afe +Subproject commit df4fb8cc3fffa4a3cda7aaf4771a7917a30607ed diff --git a/modules/make.sh b/modules/make.sh index 31be2ed7..c5d97147 100755 --- a/modules/make.sh +++ b/modules/make.sh @@ -13,7 +13,7 @@ CHART_NAME=$1 shift HELM_ARGS=$@ OUTPUT_DIR="generated/playbooks" -HELM=.bin/helm +export PATH=.bin:$PATH # Create output directory mkdir -p "$OUTPUT_DIR" @@ -22,8 +22,7 @@ echo "Rendering Helm chart: $CHART_NAME" echo "Output directory: $OUTPUT_DIR" # Use helm template to render the chart and pipe to yq -$HELM template "$CHART_NAME" $HELM_ARGS > rendered.yaml - +helm template "$CHART_NAME" $HELM_ARGS > rendered.yaml for playbook in $(yq e 'select(.kind =="Playbook") | .metadata.name ' -o json -r rendered.yaml); do FILENAME="$OUTPUT_DIR/${playbook}.yaml" diff --git a/modules/mission-control b/modules/mission-control index e9af47fa..956e0324 160000 --- a/modules/mission-control +++ b/modules/mission-control @@ -1 +1 @@ -Subproject commit e9af47fa8e5accf5d001ece8ebd10f6c6b2f913b +Subproject commit 956e0324e07431228b67da91048fc462b0544491 diff --git a/modules/mission-control-chart b/modules/mission-control-chart index b3b2ae10..33c83ea9 160000 --- a/modules/mission-control-chart +++ b/modules/mission-control-chart @@ -1 +1 @@ -Subproject commit b3b2ae1067d1406296e9352aa4231621394a93d0 +Subproject commit 33c83ea9f051d785e745e5d3d9bc8b7551cd4fea diff --git a/modules/mission-control-registry b/modules/mission-control-registry index a4d7caea..27a56807 160000 --- a/modules/mission-control-registry +++ b/modules/mission-control-registry @@ -1 +1 @@ -Subproject commit a4d7caea1cfe6cd1e6c07e36d562a735327c4036 +Subproject commit 27a568070b7071ab875ce0f6fe0aa92701d2f96e diff --git a/styles/Flanksource/Acronyms.yml b/styles/Flanksource/Acronyms.yml index 5debee67..150db768 100644 --- a/styles/Flanksource/Acronyms.yml +++ b/styles/Flanksource/Acronyms.yml @@ -19,6 +19,22 @@ exceptions: - CRUD - CSS - CSV + - RDS + - SQS + - Subnet + - IAMRole + - ECSTask + - ECSCluster + - EBSVolume + - EBSVolume + - DNSZone + - DHCP + - ECS + - DNS + - EKS + - IAM + - IRSA + - VPC - DEBUG - DOM - DPI diff --git a/styles/Flanksource/Foreign.yml b/styles/Flanksource/Foreign.yml index f645ca0b..2b70e3be 100644 --- a/styles/Flanksource/Foreign.yml +++ b/styles/Flanksource/Foreign.yml @@ -7,6 +7,6 @@ nonword: true action: name: replace swap: - '\b(?:ie|i\.e\.)[\s,]': that is + # '\b(?:ie|i\.e\.)[\s,]': that is '\b(?:viz\.)[\s,]': namely '\b(?:ergo)[\s,]': therefore diff --git a/styles/ignore/words-with-suggestions.txt b/styles/ignore/words-with-suggestions.txt index d36aa161..e7ab9d95 100644 --- a/styles/ignore/words-with-suggestions.txt +++ b/styles/ignore/words-with-suggestions.txt @@ -28,10 +28,16 @@ dockerSocket postgrest pprof canaryNamespace +jmespath +shellQuote canarySelector containerdSocket clusterName change_type +Entra +Signup +JSONNET +Goto JMESPath routable matchQuery @@ -65,6 +71,7 @@ blackbox Blackbox bool boolean +auditability booleans Booleans bools