Skip to content

Commit

Permalink
go.mod: bump openshift/api
Browse files Browse the repository at this point in the history
Changes in openshift/api now require us to
`import _ "github.com/openshift/api/monitoring/v1/zz_generated.crd-manifests"
in order to get the generated CRD manifests in vendor/.

Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
  • Loading branch information
jan--f authored and rexagod committed Apr 9, 2024
1 parent fd70f8e commit 6287584
Show file tree
Hide file tree
Showing 10 changed files with 448 additions and 16 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -156,10 +156,10 @@ json-manifests: $(JSON_MANIFESTS_DIR) $(JSON_MANIFESTS)
.PHONY: json-crds
json-crds: jsonnet/crds/alertingrules-custom-resource-definition.json jsonnet/crds/alertrelabelconfigs-custom-resource-definition.json

jsonnet/crds/alertingrules-custom-resource-definition.json: vendor/github.com/openshift/api/monitoring/v1/0000_50_monitoring_01_alertingrules.crd.yaml
jsonnet/crds/alertingrules-custom-resource-definition.json: vendor/github.com/openshift/api/monitoring/v1/zz_generated.crd-manifests/0000_50_monitoring_01_alertingrules.crd.yaml
$(GOJSONTOYAML_BIN) -yamltojson < $< > $@

jsonnet/crds/alertrelabelconfigs-custom-resource-definition.json: vendor/github.com/openshift/api/monitoring/v1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml
jsonnet/crds/alertrelabelconfigs-custom-resource-definition.json: vendor/github.com/openshift/api/monitoring/v1/zz_generated.crd-manifests/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml
$(GOJSONTOYAML_BIN) -yamltojson < $< > $@

.PHONY: versions
Expand Down
2 changes: 1 addition & 1 deletion jsonnet/crds/alertingrules-custom-resource-definition.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"apiVersion":"apiextensions.k8s.io/v1","kind":"CustomResourceDefinition","metadata":{"annotations":{"api-approved.openshift.io":"https://github.com/openshift/api/pull/1406","description":"OpenShift Monitoring alerting rules"},"name":"alertingrules.monitoring.openshift.io"},"spec":{"group":"monitoring.openshift.io","names":{"kind":"AlertingRule","listKind":"AlertingRuleList","plural":"alertingrules","singular":"alertingrule"},"scope":"Namespaced","versions":[{"name":"v1","schema":{"openAPIV3Schema":{"description":"AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage. \n The API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly. \n You can find upstream API documentation for PrometheusRule resources here: \n https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"type":"object"},"spec":{"description":"spec describes the desired state of this AlertingRule object.","properties":{"groups":{"description":"groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed. \n It's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case.","items":{"description":"RuleGroup is a list of sequentially evaluated alerting rules.","properties":{"interval":{"description":"interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval","maxLength":2048,"pattern":"^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$","type":"string"},"name":{"description":"name is the name of the group.","maxLength":2048,"minLength":1,"type":"string"},"rules":{"description":"rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed.","items":{"description":"Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules","properties":{"alert":{"description":"alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character.","maxLength":2048,"minLength":1,"type":"string"},"annotations":{"additionalProperties":{"type":"string"},"description":"annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links.","type":"object"},"expr":{"anyOf":[{"type":"integer"},{"type":"string"}],"description":"expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr \u003e mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.","x-kubernetes-int-or-string":true},"for":{"description":"for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending.","maxLength":2048,"pattern":"^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$","type":"string"},"labels":{"additionalProperties":{"type":"string"},"description":"labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key:","type":"object"}},"required":["alert","expr"],"type":"object"},"minItems":1,"type":"array"}},"required":["name","rules"],"type":"object"},"minItems":1,"type":"array","x-kubernetes-list-map-keys":["name"],"x-kubernetes-list-type":"map"}},"required":["groups"],"type":"object"},"status":{"description":"status describes the current state of this AlertOverrides object.","properties":{"observedGeneration":{"description":"observedGeneration is the last generation change you've dealt with.","format":"int64","type":"integer"},"prometheusRule":{"description":"prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace.","properties":{"name":{"description":"name of the referenced PrometheusRule.","maxLength":2048,"minLength":1,"type":"string"}},"required":["name"],"type":"object"}},"type":"object"}},"required":["spec"],"type":"object"}},"served":true,"storage":true,"subresources":{"status":{}}}]},"status":{"acceptedNames":{"kind":"","plural":""},"conditions":[],"storedVersions":[]}}
{"apiVersion":"apiextensions.k8s.io/v1","kind":"CustomResourceDefinition","metadata":{"annotations":{"api-approved.openshift.io":"https://github.com/openshift/api/pull/1406","api.openshift.io/merged-by-featuregates":"true","description":"OpenShift Monitoring alerting rules","include.release.openshift.io/ibm-cloud-managed":"true","include.release.openshift.io/self-managed-high-availability":"true"},"name":"alertingrules.monitoring.openshift.io"},"spec":{"group":"monitoring.openshift.io","names":{"kind":"AlertingRule","listKind":"AlertingRuleList","plural":"alertingrules","singular":"alertingrule"},"scope":"Namespaced","versions":[{"name":"v1","schema":{"openAPIV3Schema":{"description":"AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage. \n The API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly. \n You can find upstream API documentation for PrometheusRule resources here: \n https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"type":"object"},"spec":{"description":"spec describes the desired state of this AlertingRule object.","properties":{"groups":{"description":"groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed. \n It's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case.","items":{"description":"RuleGroup is a list of sequentially evaluated alerting rules.","properties":{"interval":{"description":"interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval","maxLength":2048,"pattern":"^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$","type":"string"},"name":{"description":"name is the name of the group.","maxLength":2048,"minLength":1,"type":"string"},"rules":{"description":"rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed.","items":{"description":"Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules","properties":{"alert":{"description":"alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character.","maxLength":2048,"minLength":1,"type":"string"},"annotations":{"additionalProperties":{"type":"string"},"description":"annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links.","type":"object"},"expr":{"anyOf":[{"type":"integer"},{"type":"string"}],"description":"expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr \u003e mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.","x-kubernetes-int-or-string":true},"for":{"description":"for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending.","maxLength":2048,"pattern":"^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$","type":"string"},"labels":{"additionalProperties":{"type":"string"},"description":"labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key:","type":"object"}},"required":["alert","expr"],"type":"object"},"minItems":1,"type":"array"}},"required":["name","rules"],"type":"object"},"minItems":1,"type":"array","x-kubernetes-list-map-keys":["name"],"x-kubernetes-list-type":"map"}},"required":["groups"],"type":"object"},"status":{"description":"status describes the current state of this AlertOverrides object.","properties":{"observedGeneration":{"description":"observedGeneration is the last generation change you've dealt with.","format":"int64","type":"integer"},"prometheusRule":{"description":"prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace.","properties":{"name":{"description":"name of the referenced PrometheusRule.","maxLength":2048,"minLength":1,"type":"string"}},"required":["name"],"type":"object"}},"type":"object"}},"required":["spec"],"type":"object"}},"served":true,"storage":true,"subresources":{"status":{}}}]}}

0 comments on commit 6287584

Please sign in to comment.