Skip to content

Commit

Permalink
OCPBUGS-15440: fix CMO to apply console-plugin pod.spec config
Browse files Browse the repository at this point in the history
The console-plugin's deployment configs that need to applied to the
deployment.spec.template.spec weren't getting applied because the
changes were made to the copy of the object instead of the original
object itself. The tests didn't catch the issue because the pod assertions
didn't validate if any pods were found for the combination of namespace
and label-selector

This commit fixes both by refering (pointer) to the original spec
instead of the copy and by fixing the assertions fail if it fails to
find any pods.

Signed-off-by: Sunil Thaha <sthaha@redhat.com>
  • Loading branch information
sthaha committed Jun 27, 2023
1 parent 8d67591 commit edf0ecd
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 4 deletions.
4 changes: 2 additions & 2 deletions pkg/manifests/manifests.go
Expand Up @@ -2621,8 +2621,8 @@ func (f *Factory) MonitoringPluginDeployment() (*appsv1.Deployment, error) {

// ensure console-plugin container is present even if config isn't defined so that,
// we validate that deployment has the expected container name. Thereby avoiding
// any suprises should user add config later.
podSpec := d.Spec.Template.Spec
// any surprises should user add config later.
podSpec := &d.Spec.Template.Spec
containers := podSpec.Containers
idx := slices.IndexFunc(containers, containerNameEquals(MonitoringPluginDeploymentContainer))
if idx < 0 {
Expand Down
16 changes: 14 additions & 2 deletions test/e2e/config_test.go
Expand Up @@ -754,15 +754,15 @@ func TestClusterMonitorConsolePlugin(t *testing.T) {
deploymentName = "monitoring-plugin"
cpu = "10m"
mem = "13Mi"
labelSelector = "app.kubernetes.io/instance=monitoring-plugin"
labelSelector = "app.kubernetes.io/name=monitoring-plugin"
containerName = "monitoring-plugin"
)

// ensure console-plugin is running before the change
f.AssertDeploymentExistsAndRollout(deploymentName, f.Ns)

data := fmt.Sprintf(`
consolePlugin:
monitoringPlugin:
resources:
requests:
cpu: %s
Expand All @@ -784,6 +784,7 @@ consolePlugin:
f.Ns,
labelSelector,
[]framework.PodAssertion{
expectTolerationsEqual(1),
expectCatchAllToleration(),
expectMatchingRequests("*", containerName, mem, cpu),
},
Expand All @@ -794,6 +795,17 @@ consolePlugin:
}
}

// checks that the toleration is present
// this toleration will match all so will not affect rolling out workloads
func expectTolerationsEqual(exp int) framework.PodAssertion {
return func(pod v1.Pod) error {
if got := len(pod.Spec.Tolerations); got != exp {
return fmt.Errorf("expected to find %d tolerations in %s but found %d", exp, pod.Name, got)
}
return nil
}
}

// checks that the toleration is set accordingly
// this toleration will match all so will not affect rolling out workloads
func expectCatchAllToleration() framework.PodAssertion {
Expand Down
4 changes: 4 additions & 0 deletions test/e2e/framework/assertions.go
Expand Up @@ -329,6 +329,10 @@ func (f *Framework) AssertPodConfiguration(namespace, labelSelector string, asse
return fmt.Errorf("%w - failed to get Pods", err)
}

if len(pods.Items) == 0 {
return fmt.Errorf("%w - failed to find Pods in %s matching %s", err, namespace, labelSelector)
}

// for each pod in the list of matching labels run each assertion
for _, p := range pods.Items {
for _, assertion := range assertions {
Expand Down

0 comments on commit edf0ecd

Please sign in to comment.