Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MON-3664: chore: avoid issues with std.set* functions #2231

Merged
merged 1 commit into from Jan 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions jsonnet/main.jsonnet
Expand Up @@ -140,7 +140,7 @@ local inCluster =
$.controlPlane.mixin.grafanaDashboards +
$.controlPlane.etcdMixin.grafanaDashboards,
// Allow-listing dashboards that are going into the product. List needs to be sorted for std.setMember to work
local includeDashboards = [
local includeDashboards = std.set([
'cluster-total.json',
'etcd.json',
'k8s-resources-cluster.json',
Expand All @@ -154,7 +154,7 @@ local inCluster =
'node-rsrc-use.json',
'pod-total.json',
'prometheus.json',
],
]),
// This step is to delete row with titles 'Storage IO - Distribution(Containers)'
// and 'Storage IO - Distribution' from 'k8s-resources-pod.json' dashboard since
// Prometheus doesn't collect the per-container fs metrics
Expand All @@ -163,7 +163,7 @@ local inCluster =
},
local filterDashboard(dashboard, excludedRowTitles) = dashboard { rows: std.filter(function(row) !std.member(excludedRowTitles, row.title), dashboard.rows) },
dashboards: {
[k]: filterDashboard(allDashboards[k], if std.setMember(k, std.objectFields(filteredDashboards)) then filteredDashboards[k] else [])
[k]: filterDashboard(allDashboards[k], if std.setMember(k, std.set(std.objectFields(filteredDashboards))) then filteredDashboards[k] else [])
for k in std.objectFields(allDashboards)
if std.setMember(k, includeDashboards)
},
Expand Down
4 changes: 2 additions & 2 deletions jsonnet/utils/add-annotations.libsonnet
Expand Up @@ -29,14 +29,14 @@
'target.workload.openshift.io/management': '{"effect": "PreferredDuringScheduling"}',
},
local addAnnotation(o) = o {
[if std.setMember(o.kind, ['DaemonSet', 'Deployment', 'ReplicaSet']) then 'spec']+: {
[if std.setMember(o.kind, std.set(['DaemonSet', 'Deployment'])) then 'spec']+: {
template+: {
metadata+: {
annotations+: annotation,
},
},
},
[if std.setMember(o.kind, ['Alertmanager', 'Prometheus', 'ThanosRuler']) then 'spec']+:
[if std.setMember(o.kind, std.set(['Alertmanager', 'Prometheus', 'ThanosRuler'])) then 'spec']+:
{
podMetadata+: {
annotations+: annotation,
Expand Down
2 changes: 1 addition & 1 deletion jsonnet/utils/add-labels.libsonnet
Expand Up @@ -2,7 +2,7 @@
local managedBy(o) =
local name = o.metadata.name;
// Some of the resources which are generated by jsonnet are moved under manifests/ and managed by CVO.
if o.kind == 'CustomResourceDefinition' || (o.kind == 'Role' && name == 'cluster-monitoring-operator-alert-customization') || (o.kind == 'Deployment' && name == 'cluster-monitoring-operator') || (o.kind == 'ClusterRole' && std.setMember(name, ['cluster-monitoring-operator-namespaced', 'cluster-monitoring-operator'])) then
if o.kind == 'CustomResourceDefinition' || (o.kind == 'Role' && name == 'cluster-monitoring-operator-alert-customization') || (o.kind == 'Deployment' && name == 'cluster-monitoring-operator') || (o.kind == 'ClusterRole' && std.setMember(name, std.set(['cluster-monitoring-operator-namespaced', 'cluster-monitoring-operator']))) then
'cluster-version-operator'
else
'cluster-monitoring-operator',
Expand Down
2 changes: 1 addition & 1 deletion jsonnet/utils/remove-limits.libsonnet
@@ -1,7 +1,7 @@
{
removeLimits(o): {
local removeLimit(o) = o {
[if std.setMember(o.kind, ['DaemonSet', 'Deployment', 'ReplicaSet']) then 'spec']+: {
[if std.setMember(o.kind, std.set(['DaemonSet', 'Deployment'])) then 'spec']+: {
template+: {
spec+: {
containers: [
Expand Down
4 changes: 2 additions & 2 deletions jsonnet/utils/set-terminationMessagePolicy.libsonnet
@@ -1,7 +1,7 @@
{
setTerminationMessagePolicy(o): o {
local addTerminationMessagePolicy(o) = o {
[if std.setMember(o.kind, ['DaemonSet', 'Deployment', 'ReplicaSet']) then 'spec']+: {
[if std.setMember(o.kind, std.set(['DaemonSet', 'Deployment'])) then 'spec']+: {
template+: {
spec+: {
containers: [
Expand All @@ -19,7 +19,7 @@
},
},
},
[if std.setMember(o.kind, ['Alertmanager', 'Prometheus', 'ThanosRuler']) then 'spec']+: {
[if std.setMember(o.kind, std.set(['Alertmanager', 'Prometheus', 'ThanosRuler'])) then 'spec']+: {
containers: [
c {
terminationMessagePolicy: 'FallbackToLogsOnError',
Expand Down
2 changes: 1 addition & 1 deletion manifests/0000_50_cluster-monitoring-operator_02-role.yaml
Expand Up @@ -6,7 +6,7 @@ metadata:
include.release.openshift.io/self-managed-high-availability: "true"
include.release.openshift.io/single-node-developer: "true"
labels:
app.kubernetes.io/managed-by: cluster-monitoring-operator
app.kubernetes.io/managed-by: cluster-version-operator
app.kubernetes.io/part-of: openshift-monitoring
name: cluster-monitoring-operator
rules:
Expand Down