-
Notifications
You must be signed in to change notification settings - Fork 23
Fix couple multicluster flaky tests #562
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -152,11 +152,12 @@ def test_delete_om_and_appdb_statefulset_in_failed_cluster( | |
| # delete OM to simulate losing Ops Manager application | ||
| # this is only for testing unavailability of the OM application, it's not testing losing OM cluster | ||
| # we don't delete here any additional resources (secrets, configmaps) that are required for a proper OM recovery testing | ||
| # it will be immediately recreated by the operator, so we cannot check if it was deleted | ||
| delete_statefulset( | ||
| ops_manager.namespace, | ||
| ops_manager.name, | ||
| propagation_policy="Background", | ||
| api_client=central_cluster_client, | ||
| api_client=get_member_cluster_api_client(OM_MEMBER_CLUSTER_NAME), | ||
| ) | ||
| except kubernetes.client.ApiException as e: | ||
| if e.status != 404: | ||
|
|
@@ -184,14 +185,6 @@ def statefulset_is_deleted(namespace: str, name: str, api_client=Optional[kubern | |
| else: | ||
| raise e | ||
|
|
||
| run_periodically( | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is racy, because stateful set is immediately recreated by operator |
||
| lambda: statefulset_is_deleted( | ||
| ops_manager.namespace, | ||
| ops_manager.name, | ||
| api_client=get_member_cluster_api_client(OM_MEMBER_CLUSTER_NAME), | ||
| ), | ||
| timeout=120, | ||
| ) | ||
| run_periodically( | ||
| lambda: statefulset_is_deleted( | ||
| ops_manager.namespace, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -128,26 +128,6 @@ EOF | |
| sleep 1 | ||
|
|
||
| local service_account_name="operator-tests-multi-cluster-service-account" | ||
|
|
||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. duplicated code that is already present jus below. |
||
| local secret_name | ||
| secret_name="$(kubectl --context "${CENTRAL_CLUSTER}" get secret -n "${NAMESPACE}" | { grep "${service_account_name}" || test $? = 1; } | awk '{ print $1 }')" | ||
| if [[ "${secret_name}" == "" ]]; then | ||
| secret_name="${service_account_name}-token-secret" | ||
| create_service_account_token_secret "${CENTRAL_CLUSTER}" "${service_account_name}" "${secret_name}" | ||
| fi | ||
|
|
||
| local central_cluster_token | ||
| central_cluster_token="$(kubectl --context "${CENTRAL_CLUSTER}" get secret "${secret_name}" -o jsonpath='{ .data.token}' -n "${NAMESPACE}" | base64 -d)" | ||
| echo "Creating Multi Cluster configuration secret" | ||
|
|
||
| configuration_params=( | ||
| "--from-literal=central_cluster=${CENTRAL_CLUSTER}" | ||
| ) | ||
|
|
||
| configuration_params+=( | ||
| "--from-literal=${CENTRAL_CLUSTER}=${central_cluster_token}" | ||
| ) | ||
|
|
||
| local secret_name | ||
| secret_name="$(kubectl --context "${CENTRAL_CLUSTER}" get secret -n "${NAMESPACE}" | { grep "${service_account_name}" || test $? = 1; } | awk '{ print $1 }')" | ||
| if [[ "${secret_name}" == "" ]]; then | ||
|
|
@@ -175,7 +155,18 @@ EOF | |
| create_service_account_token_secret "${member_cluster}" "${service_account_name}" "${secret_name}" | ||
| fi | ||
|
|
||
| member_cluster_token="$(kubectl --context "${member_cluster}" get secret "${secret_name}" -o jsonpath='{ .data.token}' -n "${NAMESPACE}" | base64 -d)" | ||
| # Retry up to 10 times if .data.token is not yet populated | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. secrets |
||
| for _ in {1..10}; do | ||
| member_cluster_token="$(kubectl --context "${member_cluster}" get secret "${secret_name}" -o jsonpath='{ .data.token }' -n "${NAMESPACE}" | base64 -d)" | ||
| if [[ -n "${member_cluster_token}" ]]; then | ||
| break | ||
| fi | ||
| sleep 1 | ||
| done | ||
| if [[ -z "${member_cluster_token}" ]]; then | ||
| echo "Error: .data.token not populated for secret ${secret_name} in cluster ${member_cluster}" | ||
| exit 1 | ||
| fi | ||
| # for 2 cluster tests central cluster is the first member, so we cannot add this as it will result in duplicate key and error in create secret | ||
| if [[ "${member_cluster}" != "${CENTRAL_CLUSTER}" ]]; then | ||
| configuration_params+=( | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is not needed and prevents us from collecting test data