diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 1c3e65f27..bc4fd8683 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -189,7 +189,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_meko_mck_upgrade + - name: e2e_mongodbmulticluster_meko_mck_upgrade tags: [ "patch-run" ] commands: - func: "e2e_test" @@ -250,6 +250,11 @@ tasks: commands: - func: "e2e_test" + - name: e2e_mongodbmulticluster_custom_roles + tags: [ "patch-run" ] + commands: + - func: "e2e_test" + - name: e2e_mongodb_custom_roles tags: [ "patch-run" ] commands: @@ -922,82 +927,157 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_replica_set + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_migration + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_member_options + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_migration + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_member_options + - name: e2e_mongodbmulticluster_multi_cluster_scale_down_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_up + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down tags: [ "patch-run" ] commands: - func: e2e_test + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_deletion + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_mtls_test + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_scram + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_sts_override + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_scram + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_enable_tls + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_replica_set + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_replica_set_migration + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_replica_set_member_options + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_replica_set_scale_up + tags: [ "patch-run" ] + commands: + - func: e2e_test + - name: e2e_multi_cluster_new_replica_set_scale_up tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster + - name: e2e_mongodb_multi_cluster_scale_up_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster_new_cluster + - name: e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_down_cluster + - name: e2e_mongodb_multi_cluster_scale_down_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_down + - name: e2e_mongodb_multi_cluster_replica_set_scale_down tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_deletion + - name: e2e_mongodb_multi_cluster_replica_set_deletion tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_mtls_test + - name: e2e_mongodb_multi_cluster_mtls_test tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scram + - name: e2e_mongodb_multi_cluster_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_sts_override + - name: e2e_mongodb_multi_sts_override tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_scram + - name: e2e_mongodb_multi_cluster_tls_with_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_enable_tls + - name: e2e_mongodb_multi_cluster_enable_tls tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_upgrade_downgrade + - name: e2e_mongodb_multi_cluster_upgrade_downgrade tags: [ "patch-run" ] commands: - func: e2e_test @@ -1008,22 +1088,42 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_tls_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_tls_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore + - name: e2e_mongodb_multi_cluster_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_appdb_s3_based_backup_restore + - name: e2e_mongodb_multi_cluster_appdb_s3_based_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore_no_mesh + - name: e2e_mongodb_multi_cluster_backup_restore_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test @@ -1048,78 +1148,153 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_x509 + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_x509 + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap + - name: e2e_mongodbmulticluster_multi_cluster_specific_namespaces + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_tls_with_x509 + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_with_ldap tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap_custom_roles + - name: e2e_mongodb_multi_cluster_with_ldap_custom_roles tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_specific_namespaces + - name: e2e_mongodb_multi_cluster_specific_namespaces tags: [ "patch-run" ] commands: - func: e2e_test # TODO: not used in any variant - - name: e2e_multi_cluster_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_multi_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover + - name: e2e_mongodbmulticluster_multi_cluster_recover tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_network_partition + - name: e2e_mongodbmulticluster_multi_cluster_recover_network_partition tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_recover_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_agent_flags + - name: e2e_mongodbmulticluster_multi_cluster_agent_flags tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_ignore_unknown_users + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_validation + - name: e2e_mongodbmulticluster_multi_cluster_validation + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_clusterwide + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_disaster_recovery + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_multi_disaster_recovery + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_2_clusters_replica_set + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_2_clusters_clusterwide + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_recover + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_recover_network_partition + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_recover_clusterwide + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_agent_flags + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_validation tags: [ "patch-run" ] commands: - func: e2e_test @@ -1179,12 +1354,22 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_om_appdb_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_pvc_resize tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_pvc_resize + - name: e2e_mongodb_multi_cluster_om_appdb_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_pvc_resize tags: [ "patch-run" ] commands: - func: e2e_test @@ -1273,12 +1458,22 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_group + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodb_multi_cluster_oidc_m2m_group tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_user + - name: e2e_mongodb_multi_cluster_oidc_m2m_user tags: [ "patch-run" ] commands: - func: e2e_test diff --git a/.evergreen.yml b/.evergreen.yml index 819c795c7..5321da430 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -787,7 +787,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group # e2e_operator_race_with_telemetry_task_group includes the tests for testing the operator with race detector enabled @@ -815,7 +815,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group - name: e2e_multi_cluster_kind_task_group @@ -823,38 +823,69 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_replica_set - - e2e_multi_cluster_replica_set_migration - - e2e_multi_cluster_replica_set_member_options - - e2e_multi_cluster_recover - - e2e_multi_cluster_recover_clusterwide - - e2e_multi_cluster_specific_namespaces - - e2e_multi_cluster_scram - - e2e_multi_cluster_tls_with_x509 - - e2e_multi_cluster_tls_no_mesh - - e2e_multi_cluster_enable_tls - # e2e_multi_cluster_with_ldap - # e2e_multi_cluster_with_ldap_custom_roles - - e2e_multi_cluster_mtls_test - - e2e_multi_cluster_replica_set_deletion - - e2e_multi_cluster_replica_set_scale_up + - e2e_mongodbmulticluster_multi_cluster_replica_set + - e2e_mongodbmulticluster_multi_cluster_replica_set_migration + - e2e_mongodbmulticluster_multi_cluster_replica_set_member_options + - e2e_mongodbmulticluster_multi_cluster_recover + - e2e_mongodbmulticluster_multi_cluster_recover_clusterwide + - e2e_mongodbmulticluster_multi_cluster_specific_namespaces + - e2e_mongodbmulticluster_multi_cluster_scram + - e2e_mongodbmulticluster_multi_cluster_tls_with_x509 + - e2e_mongodbmulticluster_multi_cluster_tls_no_mesh + - e2e_mongodbmulticluster_multi_cluster_enable_tls + # e2e_mongodbmulticluster_multi_cluster_with_ldap + # e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles + - e2e_mongodbmulticluster_multi_cluster_mtls_test + - e2e_mongodbmulticluster_multi_cluster_replica_set_deletion + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up - e2e_multi_cluster_new_replica_set_scale_up - - e2e_multi_cluster_scale_up_cluster - - e2e_multi_cluster_scale_up_cluster_new_cluster - - e2e_multi_cluster_replica_set_scale_down - - e2e_multi_cluster_scale_down_cluster - - e2e_multi_sts_override - - e2e_multi_cluster_tls_with_scram - - e2e_multi_cluster_upgrade_downgrade - - e2e_multi_cluster_backup_restore - - e2e_multi_cluster_backup_restore_no_mesh - - e2e_multi_cluster_disaster_recovery - - e2e_multi_cluster_multi_disaster_recovery - - e2e_multi_cluster_recover_network_partition - - e2e_multi_cluster_validation - - e2e_multi_cluster_agent_flags - - e2e_multi_cluster_replica_set_ignore_unknown_users - - e2e_multi_cluster_pvc_resize + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down + - e2e_mongodbmulticluster_multi_cluster_scale_down_cluster + - e2e_mongodbmulticluster_multi_sts_override + - e2e_mongodbmulticluster_multi_cluster_tls_with_scram + - e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade + - e2e_mongodbmulticluster_multi_cluster_backup_restore + - e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh + - e2e_mongodbmulticluster_multi_cluster_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_recover_network_partition + - e2e_mongodbmulticluster_multi_cluster_validation + - e2e_mongodbmulticluster_multi_cluster_agent_flags + - e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users + - e2e_mongodbmulticluster_multi_cluster_pvc_resize + - e2e_mongodb_multi_cluster_replica_set + - e2e_mongodb_multi_cluster_replica_set_migration + - e2e_mongodb_multi_cluster_replica_set_member_options + - e2e_mongodb_multi_cluster_recover + - e2e_mongodb_multi_cluster_recover_clusterwide + - e2e_mongodb_multi_cluster_specific_namespaces + - e2e_mongodb_multi_cluster_scram + - e2e_mongodb_multi_cluster_tls_with_x509 + - e2e_mongodb_multi_cluster_tls_no_mesh + - e2e_mongodb_multi_cluster_enable_tls + # e2e_mongodb_multi_cluster_with_ldap + # e2e_mongodb_multi_cluster_with_ldap_custom_roles + - e2e_mongodb_multi_cluster_mtls_test + - e2e_mongodb_multi_cluster_replica_set_deletion + - e2e_mongodb_multi_cluster_replica_set_scale_up + - e2e_mongodb_multi_cluster_scale_up_cluster + - e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster + - e2e_mongodb_multi_cluster_replica_set_scale_down + - e2e_mongodb_multi_cluster_scale_down_cluster + - e2e_mongodb_multi_sts_override + - e2e_mongodb_multi_cluster_tls_with_scram + - e2e_mongodb_multi_cluster_upgrade_downgrade + - e2e_mongodb_multi_cluster_backup_restore + - e2e_mongodb_multi_cluster_backup_restore_no_mesh + - e2e_mongodb_multi_cluster_disaster_recovery + - e2e_mongodb_multi_cluster_multi_disaster_recovery + - e2e_mongodb_multi_cluster_recover_network_partition + - e2e_mongodb_multi_cluster_validation + - e2e_mongodb_multi_cluster_agent_flags + - e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users + - e2e_mongodb_multi_cluster_pvc_resize - e2e_multi_cluster_sharded_geo_sharding - e2e_multi_cluster_sharded_scaling - e2e_multi_cluster_sharded_scaling_all_shard_overrides @@ -884,12 +915,15 @@ task_groups: - e2e_tls_sc_additional_certs - e2e_tls_x509_configure_all_options_sc - e2e_tls_x509_sc - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade + - e2e_mongodbmulticluster_custom_roles - e2e_mongodb_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user - - e2e_multi_cluster_oidc_m2m_group - - e2e_multi_cluster_oidc_m2m_user + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user + - e2e_mongodb_multi_cluster_oidc_m2m_group + - e2e_mongodb_multi_cluster_oidc_m2m_user <<: *teardown_group @@ -898,8 +932,10 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_2_clusters_replica_set - - e2e_multi_cluster_2_clusters_clusterwide + - e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set + - e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide + - e2e_mongodb_multi_cluster_2_clusters_replica_set + - e2e_mongodb_multi_cluster_2_clusters_clusterwide <<: *teardown_group - name: e2e_multi_cluster_om_appdb_task_group @@ -912,11 +948,13 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodb_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh + - e2e_mongodb_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - e2e_om_appdb_flags_and_config @@ -967,11 +1005,13 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodb_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh + - e2e_mongodb_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_om_appdb_flags_and_config - e2e_om_appdb_upgrade diff --git a/controllers/operator/mongodbreplicaset_controller_multi_test.go b/controllers/operator/mongodbreplicaset_controller_multi_test.go index 01d3e2da3..b77d1898a 100644 --- a/controllers/operator/mongodbreplicaset_controller_multi_test.go +++ b/controllers/operator/mongodbreplicaset_controller_multi_test.go @@ -425,8 +425,8 @@ func TestReadState_ClusterMapping_ReadsFromAnnotation(t *testing.T) { func TestReadState_ClusterMapping_FallbackToStatusMembers(t *testing.T) { rs := &mdbv1.MongoDB{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-rs", - Namespace: mock.TestNamespace, + Name: "test-rs", + Namespace: mock.TestNamespace, Annotations: map[string]string{ // No ClusterMapping annotation }, @@ -453,8 +453,8 @@ func TestReadState_ClusterMapping_FallbackToStatusMembers(t *testing.T) { func TestReadState_ClusterMapping_SkipsMigrationForMultiCluster(t *testing.T) { rs := &mdbv1.MongoDB{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-rs", - Namespace: mock.TestNamespace, + Name: "test-rs", + Namespace: mock.TestNamespace, Annotations: map[string]string{ // No state annotations }, @@ -488,8 +488,8 @@ func TestReadState_ClusterMapping_SkipsMigrationForMultiCluster(t *testing.T) { func TestReadState_LastAppliedMemberSpec_FallbackToStatusMembers(t *testing.T) { rs := &mdbv1.MongoDB{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-rs", - Namespace: mock.TestNamespace, + Name: "test-rs", + Namespace: mock.TestNamespace, Annotations: map[string]string{ // No LastAppliedMemberSpec annotation }, diff --git a/docker/mongodb-kubernetes-tests/kubetester/certs_mongodb_multi.py b/docker/mongodb-kubernetes-tests/kubetester/certs_mongodb_multi.py index ac1f6dac1..7f1079ea4 100644 --- a/docker/mongodb-kubernetes-tests/kubetester/certs_mongodb_multi.py +++ b/docker/mongodb-kubernetes-tests/kubetester/certs_mongodb_multi.py @@ -10,6 +10,7 @@ get_mongodb_x509_subject, multi_cluster_service_fqdns, ) +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient @@ -18,7 +19,7 @@ def create_multi_cluster_agent_certs( multi_cluster_issuer: str, secret_name: str, central_cluster_client: kubernetes.client.ApiClient, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, secret_backend: Optional[str] = None, ) -> str: agents = ["mms-automation-agent"] @@ -49,7 +50,7 @@ def create_multi_cluster_x509_agent_certs( multi_cluster_issuer: str, secret_name: str, central_cluster_client: kubernetes.client.ApiClient, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, secret_backend: Optional[str] = None, ) -> str: spec = get_agent_x509_subject(mongodb_multi.namespace) @@ -73,7 +74,7 @@ def create_multi_cluster_mongodb_tls_certs( bundle_secret_name: str, member_cluster_clients: List[MultiClusterClient], central_cluster_client: kubernetes.client.ApiClient, - mongodb_multi: Optional[MongoDBMulti] = None, + mongodb_multi: Optional[MongoDBMulti | MongoDB] = None, namespace: Optional[str] = None, additional_domains: Optional[List[str]] = None, service_fqdns: Optional[List[str]] = None, @@ -100,7 +101,7 @@ def create_multi_cluster_mongodb_x509_tls_certs( bundle_secret_name: str, member_cluster_clients: List[MultiClusterClient], central_cluster_client: kubernetes.client.ApiClient, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, additional_domains: Optional[List[str]] = None, service_fqdns: Optional[List[str]] = None, clusterwide: bool = False, diff --git a/docker/mongodb-kubernetes-tests/kubetester/mongodb.py b/docker/mongodb-kubernetes-tests/kubetester/mongodb.py index e808023d1..fbc515e8f 100644 --- a/docker/mongodb-kubernetes-tests/kubetester/mongodb.py +++ b/docker/mongodb-kubernetes-tests/kubetester/mongodb.py @@ -211,6 +211,16 @@ def read_statefulsets(self, clients: List[MultiClusterClient]) -> Dict[str, clie def assert_connectivity(self, ca_path: Optional[str] = None, cluster_domain: str = "cluster.local"): return self.tester(ca_path=ca_path).assert_connectivity() + def get_item_spec(self, cluster_name: str) -> Dict: + for spec in sorted( + self["spec"]["clusterSpecList"], + key=lambda x: x["clusterName"], + ): + if spec["clusterName"] == cluster_name: + return spec + + raise ValueError(f"Cluster with name {cluster_name} not found!") + def set_architecture_annotation(self): if "annotations" not in self["metadata"]: self["metadata"]["annotations"] = {} diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py index 00f7cf963..60c316754 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py @@ -1,27 +1,21 @@ from kubetester import ( create_or_update_configmap, find_fixture, - random_k8s_name, read_configmap, try_load, wait_until, ) -from kubetester.mongodb import MongoDB, Phase -from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb import MongoDB from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind from pytest import fixture, mark +from tests.authentication.shared import custom_roles as testhelper from tests.multicluster.conftest import cluster_spec_list -@fixture(scope="module") -def project_name_prefix(namespace: str) -> str: - return random_k8s_name(f"{namespace}-project-") - - -@fixture(scope="module") -def first_project(namespace: str, project_name_prefix: str) -> str: +@fixture(scope="function") +def first_project(namespace: str) -> str: cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{project_name_prefix}-first" + project_name = f"{namespace}-first" return create_or_update_configmap( namespace=namespace, name=project_name, @@ -33,10 +27,10 @@ def first_project(namespace: str, project_name_prefix: str) -> str: ) -@fixture(scope="module") -def second_project(namespace: str, project_name_prefix: str) -> str: +@fixture(scope="function") +def second_project(namespace: str) -> str: cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{project_name_prefix}-second" + project_name = f"{namespace}-second" return create_or_update_configmap( namespace=namespace, name=project_name, @@ -48,10 +42,10 @@ def second_project(namespace: str, project_name_prefix: str) -> str: ) -@fixture(scope="module") -def third_project(namespace: str, project_name_prefix: str) -> str: +@fixture(scope="function") +def third_project(namespace: str) -> str: cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{project_name_prefix}-third" + project_name = f"{namespace}-third" return create_or_update_configmap( namespace=namespace, name=project_name, @@ -63,18 +57,37 @@ def third_project(namespace: str, project_name_prefix: str) -> str: ) -@fixture(scope="module") -def mongodb_role(): - resource = ClusterMongoDBRole.from_yaml(find_fixture("cluster-mongodb-role.yaml"), cluster_scoped=True) +@fixture(scope="function") +def mongodb_role_with_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-with-empty-strings.yaml"), cluster_scoped=True + ) + + if try_load(resource): + return resource + + return resource + + +@fixture(scope="function") +def mongodb_role_without_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-without-empty-strings.yaml"), cluster_scoped=True + ) if try_load(resource): return resource - return resource.update() + return resource -@fixture(scope="module") -def replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, first_project: str) -> MongoDB: +@fixture(scope="function") +def replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + first_project: str, +) -> MongoDB: resource = MongoDB.from_yaml(find_fixture("replica-set-scram.yaml"), namespace=namespace) if try_load(resource): @@ -83,17 +96,26 @@ def replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, first_project: resource["spec"]["members"] = 1 resource["spec"]["security"]["roleRefs"] = [ { - "name": mongodb_role.get_name(), + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), "kind": ClusterMongoDBRoleKind, - } + }, ] resource["spec"]["opsManager"]["configMapRef"]["name"] = first_project return resource -@fixture(scope="module") -def sharded_cluster(namespace: str, mongodb_role: ClusterMongoDBRole, second_project: str) -> MongoDB: +@fixture(scope="function") +def sharded_cluster( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + second_project: str, +) -> MongoDB: resource = MongoDB.from_yaml(find_fixture("sharded-cluster-scram-sha-1.yaml"), namespace=namespace) if try_load(resource): @@ -105,18 +127,27 @@ def sharded_cluster(namespace: str, mongodb_role: ClusterMongoDBRole, second_pro resource["spec"]["security"]["roleRefs"] = [ { - "name": mongodb_role.get_name(), + "name": mongodb_role_with_empty_strings.get_name(), "kind": ClusterMongoDBRoleKind, - } + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, ] resource["spec"]["opsManager"]["configMapRef"]["name"] = second_project return resource -@fixture(scope="module") -def mc_replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, third_project: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(find_fixture("mongodb-multi.yaml"), namespace=namespace) +@fixture(scope="function") +def mc_replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + third_project: str, +) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("mongodb-multi.yaml"), namespace=namespace) if try_load(resource): return resource @@ -124,9 +155,13 @@ def mc_replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, third_proje resource["spec"]["security"] = { "roleRefs": [ { - "name": mongodb_role.get_name(), + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), "kind": ClusterMongoDBRoleKind, - } + }, ] } resource["spec"]["opsManager"]["configMapRef"]["name"] = third_project @@ -137,122 +172,98 @@ def mc_replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, third_proje @mark.e2e_mongodb_custom_roles def test_create_resources( - mongodb_role: ClusterMongoDBRole, replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDB, ): - replica_set.update() - sharded_cluster.update() - mc_replica_set.update() - - replica_set.assert_reaches_phase(Phase.Running, timeout=400) - sharded_cluster.assert_reaches_phase(Phase.Running, timeout=400) - mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) + testhelper.test_create_resources( + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + replica_set, + sharded_cluster, + mc_replica_set, + ) @mark.e2e_mongodb_custom_roles def test_automation_config_has_roles( - replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti, mongodb_role: ClusterMongoDBRole + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, ): - rs_tester = replica_set.get_automation_config_tester() - rs_tester.assert_has_expected_number_of_roles(expected_roles=1) - rs_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) + testhelper.test_automation_config_has_roles( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) - sc_tester = sharded_cluster.get_automation_config_tester() - sc_tester.assert_has_expected_number_of_roles(expected_roles=1) - sc_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) - mcrs_tester = mc_replica_set.get_automation_config_tester() - mcrs_tester.assert_has_expected_number_of_roles(expected_roles=1) - mcrs_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) +def assert_expected_roles( + mc_replica_set: MongoDB, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) @mark.e2e_mongodb_custom_roles -def test_changing_role( - replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti, mongodb_role: ClusterMongoDBRole +def test_change_inherited_role( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, ): - rs_version = replica_set.get_automation_config_tester().automation_config["version"] - sc_version = sharded_cluster.get_automation_config_tester().automation_config["version"] - mcrs_version = mc_replica_set.get_automation_config_tester().automation_config["version"] - - mongodb_role["spec"]["roles"][0]["role"] = "readWrite" - mongodb_role.update() - - wait_until(lambda: replica_set.get_automation_config_tester().reached_version(rs_version + 1), timeout=120) - wait_until(lambda: sharded_cluster.get_automation_config_tester().reached_version(sc_version + 1), timeout=120) - wait_until(lambda: mc_replica_set.get_automation_config_tester().reached_version(mcrs_version + 1), timeout=120) - - replica_set.get_automation_config_tester().assert_expected_role( - role_index=0, expected_value=mongodb_role.get_role() - ) - sharded_cluster.get_automation_config_tester().assert_expected_role( - role_index=0, expected_value=mongodb_role.get_role() - ) - mc_replica_set.get_automation_config_tester().assert_expected_role( - role_index=0, expected_value=mongodb_role.get_role() + testhelper.test_change_inherited_role( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, ) @mark.e2e_mongodb_custom_roles def test_deleting_role_does_not_remove_access( - mongodb_role: ClusterMongoDBRole, replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, ): - mongodb_role.delete() - - assert try_load(mongodb_role) == False - - replica_set.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" - ) - sharded_cluster.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" - ) - mc_replica_set.assert_reaches_phase( - phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" + testhelper.test_deleting_role_does_not_remove_access( + replica_set, sharded_cluster, mc_replica_set, mongodb_role_with_empty_strings ) - # The role should still exist in the automation config - replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) - sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) - mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) - @mark.e2e_mongodb_custom_roles -def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): - sc_version = sharded_cluster.get_automation_config_tester().automation_config["version"] - mcrs_version = mc_replica_set.get_automation_config_tester().automation_config["version"] - - sharded_cluster["spec"]["security"]["roleRefs"] = None - sharded_cluster.update() - - mc_replica_set["spec"]["security"]["roleRefs"] = None - mc_replica_set.update() - - wait_until(lambda: sharded_cluster.get_automation_config_tester().reached_version(sc_version + 1), timeout=120) - wait_until(lambda: mc_replica_set.get_automation_config_tester().reached_version(mcrs_version + 1), timeout=120) - - sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) - mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) +def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDB): + testhelper.test_removing_role_from_resources(replica_set, sharded_cluster, mc_replica_set) @mark.e2e_mongodb_custom_roles def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): - multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() + testhelper.test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles) @mark.e2e_mongodb_custom_roles def test_replicaset_is_failed(replica_set: MongoDB): - replica_set.assert_reaches_phase( - Phase.Failed, - msg_regexp="RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration.", - ) + testhelper.test_replicaset_is_failed(replica_set) @mark.e2e_mongodb_custom_roles def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): - rs_version = replica_set.get_automation_config_tester().automation_config["version"] - replica_set["spec"]["security"]["roleRefs"] = None - replica_set.update() - - replica_set.assert_reaches_phase(Phase.Running) - wait_until(lambda: replica_set.get_automation_config_tester().reached_version(rs_version + 1), timeout=120) - - replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) + testhelper.test_replicaset_is_reconciled_without_rolerefs(replica_set) diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py new file mode 100644 index 000000000..371015395 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py @@ -0,0 +1,271 @@ +from kubetester import ( + create_or_update_configmap, + find_fixture, + read_configmap, + try_load, + wait_until, +) +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind +from pytest import fixture, mark +from tests.authentication.shared import custom_roles as testhelper +from tests.multicluster.conftest import cluster_spec_list + + +@fixture(scope="function") +def first_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-first" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def second_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-second" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def third_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-third" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def mongodb_role_with_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-with-empty-strings.yaml"), cluster_scoped=True + ) + + if try_load(resource): + return resource + + return resource + + +@fixture(scope="function") +def mongodb_role_without_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-without-empty-strings.yaml"), cluster_scoped=True + ) + + if try_load(resource): + return resource + + return resource + + +@fixture(scope="function") +def replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + first_project: str, +) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("replica-set-scram.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["members"] = 1 + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = first_project + + return resource + + +@fixture(scope="function") +def sharded_cluster( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + second_project: str, +) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("sharded-cluster-scram-sha-1.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["mongodsPerShardCount"] = 1 + resource["spec"]["mongosCount"] = 1 + resource["spec"]["configServerCount"] = 1 + + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = second_project + + return resource + + +@fixture(scope="function") +def mc_replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + third_project: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(find_fixture("mongodbmulticluster-multi.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["security"] = { + "roleRefs": [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + } + resource["spec"]["opsManager"]["configMapRef"]["name"] = third_project + resource["spec"]["clusterSpecList"] = cluster_spec_list(["kind-e2e-cluster-1"], [1]) + + return resource + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_create_resources( + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, +): + testhelper.test_create_resources( + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + replica_set, + sharded_cluster, + mc_replica_set, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_automation_config_has_roles( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.test_automation_config_has_roles( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +def assert_expected_roles( + mc_replica_set: MongoDBMulti, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_change_inherited_role( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.test_change_inherited_role( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_deleting_role_does_not_remove_access( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, +): + testhelper.test_deleting_role_does_not_remove_access( + replica_set, sharded_cluster, mc_replica_set, mongodb_role_with_empty_strings + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): + testhelper.test_removing_role_from_resources(replica_set, sharded_cluster, mc_replica_set) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): + testhelper.test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_replicaset_is_failed(replica_set: MongoDB): + testhelper.test_replicaset_is_failed(replica_set) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): + testhelper.test_replicaset_is_reconciled_without_rolerefs(replica_set) diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py new file mode 100644 index 000000000..52b3980ac --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py @@ -0,0 +1,225 @@ +from kubetester import ( + create_or_update_configmap, + find_fixture, + read_configmap, + try_load, + wait_until, +) +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + + +# fmt: off +def get_expected_role(role_name: str) -> dict: + return { + "role": role_name, + "db": "admin", + "roles": [ + { + "db": "admin", + "role": "read" + } + ], + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find", + "update", + "insert", + "remove" + ] + }, + { + "resource": { + "db": "users", + "collection": "usersCollection" + }, + "actions": [ + "update", + "insert", + "remove" + ] + }, + { + "resource": { + "db": "", + "collection": "" + }, + "actions": [ + "find" + ] + }, + { + "resource": { + "cluster": True + }, + "actions": [ + "bypassWriteBlockingMode" + ] + } + ], + "authenticationRestrictions": [ + { + "clientSource": ["127.0.0.0/8"], + "serverAddress": ["10.0.0.0/8"] + } + ], + } +# fmt: on + + +def test_create_resources( + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, +): + mongodb_role_with_empty_strings.update() + mongodb_role_without_empty_strings.update() + + replica_set.update() + sharded_cluster.update() + mc_replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running, timeout=400) + sharded_cluster.assert_reaches_phase(Phase.Running, timeout=400) + mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) + + +def test_automation_config_has_roles( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +def assert_expected_roles( + mc_replica_set: MongoDBMulti | MongoDB, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + rs_tester = replica_set.get_automation_config_tester() + sc_tester = sharded_cluster.get_automation_config_tester() + mcrs_tester = mc_replica_set.get_automation_config_tester() + mcrs_tester.assert_has_expected_number_of_roles(expected_roles=2) + rs_tester.assert_has_expected_number_of_roles(expected_roles=2) + sc_tester.assert_has_expected_number_of_roles(expected_roles=2) + + rs_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + # the second role created without specifying fields with "" should result in identical role to the one with explicitly specified db: "", collection: "". + rs_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + sc_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + sc_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + mcrs_tester.assert_expected_role( + role_index=0, expected_value=get_expected_role(mongodb_role_with_empty_strings["spec"]["role"]) + ) + mcrs_tester.assert_expected_role( + role_index=1, expected_value=get_expected_role(mongodb_role_without_empty_strings["spec"]["role"]) + ) + + +def test_change_inherited_role( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + mongodb_role_with_empty_strings["spec"]["roles"][0]["role"] = "readWrite" + mongodb_role_with_empty_strings.update() + + def is_role_changed(ac_tester: AutomationConfigTester): + return ( + ac_tester.get_role_at_index(0)["roles"][0]["role"] == "readWrite" + and ac_tester.get_role_at_index(1)["roles"][0]["role"] == "read" + ) + + wait_until(lambda: is_role_changed(replica_set.get_automation_config_tester())) + wait_until(lambda: is_role_changed(sharded_cluster.get_automation_config_tester())) + wait_until(lambda: is_role_changed(mc_replica_set.get_automation_config_tester())) + + +def test_deleting_role_does_not_remove_access( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti | MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, +): + mongodb_role_with_empty_strings.delete() + + assert try_load(mongodb_role_with_empty_strings) == False + + replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + sharded_cluster.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + mc_replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role_with_empty_strings.get_name()}' not found" + ) + + # The role should still exist in the automation config + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) + + +def test_removing_role_from_resources( + replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti | MongoDB +): + sharded_cluster["spec"]["security"]["roleRefs"] = None + sharded_cluster.update() + + mc_replica_set["spec"]["security"]["roleRefs"] = None + mc_replica_set.update() + + wait_until(lambda: len(sharded_cluster.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) + wait_until(lambda: len(mc_replica_set.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) + + +def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): + multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() + + +def test_replicaset_is_failed(replica_set: MongoDB): + replica_set.assert_reaches_phase( + Phase.Failed, + msg_regexp="RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration.", + ) + + +def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): + replica_set["spec"]["security"]["roleRefs"] = None + replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running) + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) diff --git a/docker/mongodb-kubernetes-tests/tests/constants.py b/docker/mongodb-kubernetes-tests/tests/constants.py new file mode 100644 index 000000000..d4854a5db --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/constants.py @@ -0,0 +1,42 @@ +AWS_REGION = "us-east-1" + +KUBECONFIG_FILEPATH = "/etc/config/kubeconfig" +MULTI_CLUSTER_CONFIG_DIR = "/etc/multicluster" +# AppDB monitoring is disabled by default for e2e tests. +# If monitoring is needed use monitored_appdb_operator_installation_config / operator_with_monitored_appdb +MONITOR_APPDB_E2E_DEFAULT = "true" +CLUSTER_HOST_MAPPING = { + "us-central1-c_central": "https://35.232.85.244", + "us-east1-b_member-1a": "https://35.243.222.230", + "us-east1-c_member-2a": "https://34.75.94.207", + "us-west1-a_member-3a": "https://35.230.121.15", +} + +LEGACY_CENTRAL_CLUSTER_NAME: str = "__default" +LEGACY_DEPLOYMENT_STATE_VERSION: str = "1.27.0" + +# Helm charts +DEFAULT_HELM_CHART_PATH_ENV_VAR_NAME = "DEFAULT_HELM_CHART_PATH" + +OCI_HELM_VERSION_ENV_VAR_NAME = "OCI_HELM_VERSION" +OCI_HELM_REGISTRY_ENV_VAR_NAME = "OCI_HELM_REGISTRY" +OCI_HELM_REPOSITORY_ENV_VAR_NAME = "OCI_HELM_REPOSITORY" +OCI_HELM_REGION_ENV_VAR_NAME = "OCI_HELM_REGION" + +LEGACY_OPERATOR_CHART = "mongodb/enterprise-operator" +MCK_HELM_CHART = "mongodb/mongodb-kubernetes" +LOCAL_HELM_CHART_DIR = "helm_chart" + +OFFICIAL_OPERATOR_IMAGE_NAME = "mongodb-kubernetes" +LEGACY_OPERATOR_IMAGE_NAME = "mongodb-enterprise-operator-ubi" + +# Names for operator and RBAC +OPERATOR_NAME = "mongodb-kubernetes-operator" +MULTI_CLUSTER_OPERATOR_NAME = OPERATOR_NAME + "-multi-cluster" +LEGACY_OPERATOR_NAME = "mongodb-enterprise-operator" +LEGACY_MULTI_CLUSTER_OPERATOR_NAME = LEGACY_OPERATOR_NAME + "-multi-cluster" +APPDB_SA_NAME = "mongodb-kubernetes-appdb" +DATABASE_SA_NAME = "mongodb-kubernetes-database-pods" +OM_SA_NAME = "mongodb-kubernetes-ops-manager" +TELEMETRY_CONFIGMAP_NAME = LEGACY_OPERATOR_NAME + "-telemetry" +MULTI_CLUSTER_MEMBER_LIST_CONFIGMAP = OPERATOR_NAME + "-member-list" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/__init__.py index 13170a09d..cee43d2cd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/__init__.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/__init__.py @@ -5,6 +5,7 @@ from kubetester.helm import helm_template from kubetester.multicluster_client import MultiClusterClient from tests import test_logger +from tests.constants import LOCAL_HELM_CHART_DIR logger = test_logger.get_test_logger(__name__) @@ -15,7 +16,7 @@ def prepare_multi_cluster_namespaces( member_cluster_clients: List[MultiClusterClient], central_cluster_name: str, skip_central_cluster: bool = True, - helm_chart_path="helm_chart", + helm_chart_path=LOCAL_HELM_CHART_DIR, ): """create a new namespace and configures all necessary service accounts there""" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-central-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-central-sts-override.yaml new file mode 100644 index 000000000..2aa86569d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-central-sts-override.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + statefulSet: + spec: + template: + spec: + # FIXME workaround for sleep 7200 hanging + shareProcessNamespace: true + containers: + - name: sidecar1 + image: busybox + command: ["sleep"] + args: [ "7200" ] + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 2 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-cluster.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-cluster.yaml new file mode 100644 index 000000000..2912fc30e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + persistent: false + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 2 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-dr.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-dr.yaml new file mode 100644 index 000000000..e88247368 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-dr.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + persistent: false + credentials: my-credentials + duplicateServiceObjects: true + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: gke_k8s-rdas_us-east1-b_member-1a + members: 2 + statefulSet: + spec: + template: + spec: + securityContext: + fsGroup: 2000 + - clusterName: gke_k8s-rdas_us-east1-c_member-2a + members: 1 + statefulSet: + spec: + template: + spec: + securityContext: + fsGroup: 2000 + - clusterName: gke_k8s-rdas_us-west1-a_member-3a + members: 2 + statefulSet: + spec: + template: + spec: + securityContext: + fsGroup: 2000 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-pvc-resize.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-pvc-resize.yaml new file mode 100644 index 000000000..ed764afe3 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-pvc-resize.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 2 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 + persistent: true + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: data + spec: + resources: + requests: + storage: 1Gi + storageClassName: csi-hostpath-sc diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-split-horizon.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-split-horizon.yaml new file mode 100644 index 000000000..79715668a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-split-horizon.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + connectivity: + replicaSetHorizons: + - "test-horizon": "ec2-52-56-69-123.eu-west-2.compute.amazonaws.com:30100" + - "test-horizon": "ec2-3-9-165-220.eu-west-2.compute.amazonaws.com:30100" + - "test-horizon": "ec2-3-10-22-163.eu-west-2.compute.amazonaws.com:30100" + + + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + persistent: true + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 1 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 1 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-sts-override.yaml new file mode 100644 index 000000000..cfaa3d9aa --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi-sts-override.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 2 + statefulSet: + spec: + selector: + matchLabels: + app: "multi-replica-set" + template: + metadata: + labels: + app: "multi-replica-set" + spec: + containers: + - name: sidecar1 + image: busybox + command: ["sleep"] + args: [ "7200" ] + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + - clusterName: kind-e2e-cluster-2 + members: 1 + statefulSet: + spec: + template: + spec: + containers: + - name: sidecar2 + image: busybox + command: ["sleep"] + args: [ "7200" ] + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + - clusterName: kind-e2e-cluster-3 + members: 1 + statefulSet: + spec: + template: + spec: + containers: + - name: sidecar3 + image: busybox + command: ["sleep"] + args: [ "7200" ] + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi.yaml new file mode 100644 index 000000000..84b865bcb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-multi.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: multi-replica-set +spec: + version: 4.4.0-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 2 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-x509-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/mongodb-x509-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-group.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-group.yaml new file mode 100644 index 000000000..8699e2fef --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-group.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: oidc-multi-replica-set +spec: + version: 7.0.5-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 1 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 + security: + authentication: + agents: + mode: SCRAM + enabled: true + modes: + - SCRAM + - OIDC + oidcProviderConfigs: + - audience: "" + clientId: "" + issuerURI: "" + requestedScopes: [ ] + userClaim: "sub" + groupsClaim: "cognito:groups" + authorizationMethod: "WorkloadIdentityFederation" + authorizationType: "GroupMembership" + configurationName: "OIDC-test" + roles: + - role: "OIDC-test/test" + db: "admin" + roles: + - role: "readWriteAnyDatabase" + db: "admin" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-user.yaml new file mode 100644 index 000000000..c80c922e5 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/mongodb-multi-m2m-user.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDB +metadata: + name: oidc-multi-replica-set +spec: + version: 7.0.5-ent + type: ReplicaSet + topology: MultiCluster + duplicateServiceObjects: false + credentials: my-credentials + opsManager: + configMapRef: + name: my-project + clusterSpecList: + - clusterName: kind-e2e-cluster-1 + members: 1 + - clusterName: kind-e2e-cluster-2 + members: 1 + - clusterName: kind-e2e-cluster-3 + members: 2 + security: + authentication: + agents: + mode: SCRAM + enabled: true + modes: + - SCRAM + - OIDC + oidcProviderConfigs: + - audience: "" + clientId: "" + issuerURI: "" + requestedScopes: [ ] + userClaim: "sub" + authorizationMethod: "WorkloadIdentityFederation" + authorizationType: "UserID" + configurationName: "OIDC-test-user" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/oidc-user-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/oidc/oidc-user-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/split-horizon-node-ports/mongodb-split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/split-horizon-node-ports/mongodb-split-horizon-node-port.yaml new file mode 100644 index 000000000..b118ddafb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/fixtures/split-horizon-node-ports/mongodb-split-horizon-node-port.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: my-service + labels: + controller: mongodb-enterprise-operator + mongodb: -multi-cluster-replica-set + statefulset.kubernetes.io/pod-name: multi-cluster-replica-set-0-0 +spec: + type: NodePort + selector: + controller: mongodb-enterprise-operator + statefulset.kubernetes.io/pod-name: multi-cluster-replica-set-0-0 + ports: + - port: 30100 + targetPort: 27017 + nodePort: 30100 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py new file mode 100644 index 000000000..ebb6fb1a5 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -0,0 +1,151 @@ +# This is a manual test deploying MongoDB on 2 clusters in GKE and EKS. +# Steps to execute: +# 1. After the test is executed it will create external services of type LoadBalancer. All pods will be not ready +# due to lack of external connectivity. +# 2. Go to mc.mongokubernetes.com Route53 hosted zone: https://us-east-1.console.aws.amazon.com/route53/v2/hostedzones#ListRecordSets/Z04069951X9SBFR8OQUFM +# 3. Copy provisioned hostnames from external services in EKS and update CNAME records for: +# * multi-cluster-rs-0-0.aws-member-cluster.eks.mc.mongokubernetes.com +# * multi-cluster-rs-0-1.aws-member-cluster.eks.mc.mongokubernetes.com +# 4. Copy IP addresses of external services in GKE and update A record for: +# * multi-cluster-rs-1-0.gke-member-cluster.gke.mc.mongokubernetes.com +# * multi-cluster-rs-1-1.gke-member-cluster.gke.mc.mongokubernetes.com +# 5. After few minutes everything should be ready. + +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-rs" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def cert_additional_domains() -> list[str]: + return [ + "*.gke-member-cluster.gke.mc.mongokubernetes.com", + "*.aws-member-cluster.eks.mc.mongokubernetes.com", + ] + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str]) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource["spec"]["persistent"] = False + # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "aws-member-cluster.eks.mc.mongokubernetes.com", + "externalService": { + "annotations": {"cloud.google.com/l4-rbs": "enabled"}, + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": True, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + ], + }, + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "gke-member-cluster.gke.mc.mongokubernetes.com", + "externalService": { + "annotations": { + "service.beta.kubernetes.io/aws-load-balancer-type": "external", + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "instance", + "service.beta.kubernetes.io/aws-load-balancer-scheme": "internet-facing", + }, + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": True, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + ], + }, + }, + } + + return resource + + +@fixture(scope="function") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + mongodb_multi_unmarshalled["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return mongodb_multi_unmarshalled.update() + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: list[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, + cert_additional_domains: list[str], +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + additional_domains=cert_additional_domains, + ) + + +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +def test_create_mongodb_multi( + mongodb_multi: MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_clusterwide_replicaset.py new file mode 100644 index 000000000..06f727c24 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_clusterwide_replicaset.py @@ -0,0 +1,228 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubetester import ( + create_or_update_configmap, +) +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_2_cluster_clusterwide_replicaset as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@pytest.fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@pytest.fixture(scope="module") +def mongodb_multi_a_unmarshalled( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, mdba_ns) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) + resource.set_version(ensure_ent_version(custom_mdb_version)) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.fixture(scope="module") +def mongodb_multi_b_unmarshalled( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) + resource.set_version(ensure_ent_version(custom_mdb_version)) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.fixture(scope="module") +def server_certs_a( + multi_cluster_clusterissuer: str, + mongodb_multi_a_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_clusterissuer, + f"{CERT_SECRET_PREFIX}-{mongodb_multi_a_unmarshalled.name}-cert", + member_cluster_clients, + central_cluster_client, + mongodb_multi_a_unmarshalled, + clusterwide=True, + ) + + +@pytest.fixture(scope="module") +def server_certs_b( + multi_cluster_clusterissuer: str, + mongodb_multi_b_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_clusterissuer, + f"{CERT_SECRET_PREFIX}-{mongodb_multi_b_unmarshalled.name}-cert", + member_cluster_clients, + central_cluster_client, + mongodb_multi_b_unmarshalled, + clusterwide=True, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + server_certs_a: str, + mongodb_multi_a_unmarshalled: MongoDB, + issuer_ca_filepath: str, +) -> MongoDB: + ca = open(issuer_ca_filepath).read() + + # The operator expects the CA that validates Ops Manager is contained in + # an entry with a name of "mms-ca.crt" + data = {"ca-pem": ca, "mms-ca.crt": ca} + name = "issuer-ca" + + create_or_update_configmap(mdba_ns, name, data, api_client=central_cluster_client) + + resource = mongodb_multi_a_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": name, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + server_certs_b: str, + mongodb_multi_b_unmarshalled: MongoDB, + issuer_ca_filepath: str, +) -> MongoDB: + ca = open(issuer_ca_filepath).read() + + # The operator expects the CA that validates Ops Manager is contained in + # an entry with a name of "mms-ca.crt" + data = {"ca-pem": ca, "mms-ca.crt": ca} + name = "issuer-ca" + + create_or_update_configmap(mdbb_ns, name, data, api_client=central_cluster_client) + + resource = mongodb_multi_b_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": name, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDB): + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDB): + testhelper.test_enable_mongodb_multi_nsa_auth(mongodb_multi_a) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_clusterwide +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDB): + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_replicaset.py new file mode 100644 index 000000000..aa37b2c20 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_2_cluster_replicaset.py @@ -0,0 +1,92 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_2_cluster_replicaset as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str], custom_mdb_version: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) + resource.set_version(ensure_ent_version(custom_mdb_version)) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = mongodb_multi_unmarshalled + + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource.create() + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_replica_set +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_replica_set +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_replica_set +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_replica_set +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_2_clusters_replica_set +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_agent_flags.py new file mode 100644 index 000000000..9b868e366 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_agent_flags.py @@ -0,0 +1,54 @@ +from typing import List + +import kubernetes +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_agent_flags as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi-cluster.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # override agent startup flags + resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} + resource["spec"]["agent"]["logLevel"] = "DEBUG" + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@mark.e2e_mongodb_multi_cluster_agent_flags +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(multi_cluster_operator, mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_agent_flags +def test_multi_replicaset_has_agent_flags( + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_multi_replicaset_has_agent_flags(namespace, member_cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_agent_flags +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_automated_disaster_recovery.py new file mode 100644 index 000000000..32cb6a3fe --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_automated_disaster_recovery.py @@ -0,0 +1,104 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_automated_disaster_recovery as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +@mark.e2e_mongodb_multi_cluster_multi_disaster_recovery +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +@mark.e2e_mongodb_multi_cluster_multi_disaster_recovery +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_mongodb_multi_leaves_running_state( + mongodb_multi: MongoDB, +): + testhelper.test_mongodb_multi_leaves_running_state(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDB, member_cluster_names: list[str]): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +@mark.e2e_mongodb_multi_cluster_multi_disaster_recovery +def test_replica_set_is_reachable(mongodb_multi: MongoDB): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_replica_reaches_running(mongodb_multi: MongoDB): + testhelper.test_replica_reaches_running(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +@mark.e2e_mongodb_multi_cluster_multi_disaster_recovery +def test_number_numbers_in_ac(mongodb_multi: MongoDB): + testhelper.test_number_numbers_in_ac(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_disaster_recovery +def test_sts_count_in_member_cluster( + mongodb_multi: MongoDB, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_sts_count_in_member_cluster(mongodb_multi, member_cluster_names, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore.py new file mode 100644 index 000000000..a29e64aba --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore.py @@ -0,0 +1,403 @@ +from typing import Dict, List, Optional + +import kubernetes +import kubernetes.client +import pymongo +import pytest +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + try_load, +) +from kubetester.certs import create_ops_manager_tls_certs +from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from pytest import fixture, mark +from tests.conftest import ( + wait_for_primary, +) + +from ..shared import multi_cluster_backup_restore as testhelper + +MONGODB_PORT = 30000 +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +@fixture(scope="module") +def ops_manager_certs( + namespace: str, + multi_cluster_issuer: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return create_ops_manager_tls_certs( + multi_cluster_issuer, + namespace, + "om-backup", + secret_name="mdb-om-backup-cert", + # We need the interconnected certificate since we update coreDNS later with that ip -> domain + # because our central cluster is not part of the mesh, but we can access the pods via external IPs. + # Since we are using TLS we need a certificate for a hostname, an IP does not work, hence + # f"om-backup.{namespace}.interconnected" -> IP setup below + additional_domains=[ + "fastdl.mongodb.org", + f"om-backup.{namespace}.interconnected", + ], + api_client=central_cluster_client, + ) + + +def new_om_data_store( + mdb: MongoDB, + id: str, + assignment_enabled: bool = True, + user_name: Optional[str] = None, + password: Optional[str] = None, +) -> Dict: + return { + "id": id, + "uri": mdb.mongo_uri(user_name=user_name, password=password), + "ssl": mdb.is_tls_enabled(), + "assignmentEnabled": assignment_enabled, + } + + +@fixture(scope="module") +def ops_manager( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + custom_version: Optional[str], + custom_appdb_version: str, + ops_manager_certs: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource: MongoDBOpsManager = MongoDBOpsManager.from_yaml( + yaml_fixture("om_ops_manager_backup.yaml"), namespace=namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource["spec"]["externalConnectivity"] = {"type": "LoadBalancer"} + resource["spec"]["security"] = { + "certsSecretPrefix": "mdb", + "tls": {"ca": multi_cluster_issuer_ca_configmap}, + } + # remove s3 config + del resource["spec"]["backup"]["s3Stores"] + + resource.set_version(custom_version) + resource.set_appdb_version(custom_appdb_version) + resource.allow_mdb_rc_versions() + resource.create_admin_secret(api_client=central_cluster_client) + + try_load(resource) + + return resource + + +@fixture(scope="module") +def oplog_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=OPLOG_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="development", + mdb_name=OPLOG_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "development") + + resource["spec"]["opsManager"]["configMapRef"]["name"] = OPLOG_RS_NAME + "-config" + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = {"authentication": {"enabled": True, "modes": ["SCRAM"]}} + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=BLOCKSTORE_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="blockstore", + mdb_name=BLOCKSTORE_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "blockstore") + + resource.set_version(custom_mdb_version) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_user( + namespace, + blockstore_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user-backing-db.yaml"), namespace=namespace) + resource["spec"]["mongodbResourceRef"]["name"] = blockstore_replica_set.name + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def oplog_user( + namespace, + oplog_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml( + yaml_fixture("scram-sha-user-backing-db.yaml"), + namespace=namespace, + name="mms-user-2", + ) + resource["spec"]["mongodbResourceRef"]["name"] = oplog_replica_set.name + resource["spec"]["passwordSecretKeyRef"]["name"] = "mms-user-2-password" + resource["spec"]["username"] = "mms-user-2" + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@mark.e2e_mongodb_multi_cluster_backup_restore +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_backup_restore +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_backup_restore +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) + + +class TestBackupForMongodb: + @fixture(scope="module") + def base_url( + self, + ops_manager: MongoDBOpsManager, + ) -> str: + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + This address only works for member clusters. + """ + interconnected_field = f"https://om-backup.{ops_manager.namespace}.interconnected" + new_address = f"{interconnected_field}:8443" + + return new_address + + @fixture(scope="module") + def project_one( + self, + ops_manager: MongoDBOpsManager, + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + base_url: str, + ) -> OMTester: + return ops_manager.get_om_tester( + project_name=f"{namespace}-project-one", + api_client=central_cluster_client, + base_url=base_url, + ) + + @fixture(scope="function") + def mdb_client(self, mongodb_multi_one: MongoDB): + return pymongo.MongoClient( + mongodb_multi_one.tester(port=MONGODB_PORT).cnx_string, + **mongodb_multi_one.tester(port=MONGODB_PORT).default_opts, + readPreference="primary", # let's read from the primary and not stale data from the secondary + ) + + @fixture(scope="function") + def mongodb_multi_one_collection(self, mdb_client): + + # Ensure primary is available before proceeding + wait_for_primary(mdb_client) + + return mdb_client["testdb"]["testcollection"] + + @fixture(scope="module") + def mongodb_multi_one( + self, + ops_manager: MongoDBOpsManager, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: List[str], + base_url, + custom_mdb_version: str, + ) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi.yaml"), + "multi-replica-set-one", + namespace, + # the project configmap should be created in the central cluster. + ).configure(ops_manager, f"{namespace}-project-one", api_client=central_cluster_client) + + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = [ + {"clusterName": member_cluster_names[0], "members": 2}, + {"clusterName": member_cluster_names[1], "members": 1}, + {"clusterName": member_cluster_names[2], "members": 2}, + ] + + # creating a cluster with backup should work with custom ports + resource["spec"].update({"additionalMongodConfig": {"net": {"port": MONGODB_PORT}}}) + + resource.configure_backup(mode="enabled") + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + data = KubernetesTester.read_configmap( + namespace, "multi-replica-set-one-config", api_client=central_cluster_client + ) + KubernetesTester.delete_configmap(namespace, "multi-replica-set-one-config", api_client=central_cluster_client) + data["baseUrl"] = base_url + data["sslMMSCAConfigMap"] = multi_cluster_issuer_ca_configmap + create_or_update_configmap( + namespace, + "multi-replica-set-one-config", + data, + api_client=central_cluster_client, + ) + + return resource.update() + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_setup_om_connection( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, ops_manager, central_cluster_client, member_cluster_clients + ) + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDB): + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) + + @skip_if_local + @mark.e2e_mongodb_multi_cluster_backup_restore + @pytest.mark.flaky(reruns=100, reruns_delay=6) + def test_add_test_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_mdb_backed_up(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_change_mdb_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_pit_restore(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) + + @mark.e2e_mongodb_multi_cluster_backup_restore + def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection, mdb_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore_no_mesh.py new file mode 100644 index 000000000..a5f2595aa --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_backup_restore_no_mesh.py @@ -0,0 +1,526 @@ +# This test sets up ops manager in a multicluster "no-mesh" environment. +# It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. + +from typing import List, Tuple + +import kubernetes +import kubernetes.client +import pymongo +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + try_load, +) +from kubetester.certs import create_ops_manager_tls_certs +from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from pytest import fixture, mark + +from ..shared import multi_cluster_backup_restore_no_mesh as testhelper + +MONGODB_PORT = 30000 +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +@fixture(scope="module") +def ops_manager_certs( + namespace: str, + multi_cluster_issuer: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return create_ops_manager_tls_certs( + multi_cluster_issuer, + namespace, + "om-backup", + secret_name="mdb-om-backup-cert", + # We need the interconnected certificate since we update coreDNS later with that ip -> domain + # because our central cluster is not part of the mesh, but we can access the pods via external IPs. + # Since we are using TLS we need a certificate for a hostname, an IP does not work, hence + # f"om-backup.{namespace}.interconnected" -> IP setup below + additional_domains=[ + "fastdl.mongodb.org", + f"om-backup.{namespace}.interconnected", + ], + api_client=central_cluster_client, + ) + + +@fixture(scope="module") +def ops_manager( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + custom_version: str, + custom_appdb_version: str, + ops_manager_certs: str, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBOpsManager: + resource: MongoDBOpsManager = MongoDBOpsManager.from_yaml( + yaml_fixture("om_ops_manager_backup.yaml"), namespace=namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource["spec"]["externalConnectivity"] = {"type": "LoadBalancer"} + resource["spec"]["security"] = { + "certsSecretPrefix": "mdb", + "tls": {"ca": multi_cluster_issuer_ca_configmap}, + } + # remove s3 config + del resource["spec"]["backup"]["s3Stores"] + resource.set_version(custom_version) + resource.set_appdb_version(ensure_ent_version(custom_appdb_version)) + + resource.allow_mdb_rc_versions() + resource.create_admin_secret(api_client=central_cluster_client) + + try_load(resource) + + return resource + + +@fixture(scope="module") +def oplog_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=OPLOG_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="development", + mdb_name=OPLOG_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "development") + + resource["spec"]["opsManager"]["configMapRef"]["name"] = OPLOG_RS_NAME + "-config" + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = {"authentication": {"enabled": True, "modes": ["SCRAM"]}} + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_replica_set( + ops_manager, + namespace, + custom_mdb_version: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-for-om.yaml"), + namespace=namespace, + name=BLOCKSTORE_RS_NAME, + ) + + testhelper.create_project_config_map( + om=ops_manager, + project_name="blockstore", + mdb_name=BLOCKSTORE_RS_NAME, + client=central_cluster_client, + custom_ca=multi_cluster_issuer_ca_configmap, + ) + + resource.configure(ops_manager, "blockstore") + + resource.set_version(custom_mdb_version) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def blockstore_user( + namespace, + blockstore_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user-backing-db.yaml"), namespace=namespace) + resource["spec"]["mongodbResourceRef"]["name"] = blockstore_replica_set.name + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def oplog_user( + namespace, + oplog_replica_set: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + """Creates a password secret and then the user referencing it""" + resource = MongoDBUser.from_yaml( + yaml_fixture("scram-sha-user-backing-db.yaml"), + namespace=namespace, + name="mms-user-2", + ) + resource["spec"]["mongodbResourceRef"]["name"] = oplog_replica_set.name + resource["spec"]["passwordSecretKeyRef"]["name"] = "mms-user-2-password" + resource["spec"]["username"] = "mms-user-2" + + print(f"\nCreating password for MongoDBUser {resource.name} in secret/{resource.get_secret_name()} ") + create_or_update_secret( + KubernetesTester.get_namespace(), + resource.get_secret_name(), + { + "password": USER_PASSWORD, + }, + api_client=central_cluster_client, + ) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + yield resource.update() + + +@fixture(scope="module") +def replica_set_external_hosts() -> List[Tuple[str, str]]: + return [ + ("172.18.255.211", "test.kind-e2e-cluster-1.interconnected"), + ( + "172.18.255.211", + "multi-replica-set-one-0-0.kind-e2e-cluster-1.interconnected", + ), + ( + "172.18.255.212", + "multi-replica-set-one-0-1.kind-e2e-cluster-1.interconnected", + ), + ( + "172.18.255.213", + "multi-replica-set-one-0-2.kind-e2e-cluster-1.interconnected", + ), + ("172.18.255.221", "test.kind-e2e-cluster-2.interconnected"), + ( + "172.18.255.221", + "multi-replica-set-one-1-0.kind-e2e-cluster-2.interconnected", + ), + ( + "172.18.255.222", + "multi-replica-set-one-1-1.kind-e2e-cluster-2.interconnected", + ), + ( + "172.18.255.223", + "multi-replica-set-one-1-2.kind-e2e-cluster-2.interconnected", + ), + ("172.18.255.231", "test.kind-e2e-cluster-3.interconnected"), + ( + "172.18.255.231", + "multi-replica-set-one-2-0.kind-e2e-cluster-3.interconnected", + ), + ( + "172.18.255.232", + "multi-replica-set-one-2-1.kind-e2e-cluster-3.interconnected", + ), + ( + "172.18.255.233", + "multi-replica-set-one-2-2.kind-e2e-cluster-3.interconnected", + ), + ] + + +@fixture(scope="module") +def disable_istio( + multi_cluster_operator: Operator, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + for mcc in member_cluster_clients: + api = client.CoreV1Api(api_client=mcc.api_client) + labels = {"istio-injection": "disabled"} + ns = api.read_namespace(name=namespace) + ns.metadata.labels.update(labels) + api.replace_namespace(name=namespace, body=ns) + return None + + +@mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh +def test_update_coredns( + replica_set_external_hosts: List[Tuple[str, str]], + cluster_clients: dict[str, kubernetes.client.ApiClient], +): + testhelper.test_update_coredns(replica_set_external_hosts, cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh +class TestOpsManagerCreation: + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh +class TestBackupDatabasesAdded: + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) + + +class TestBackupForMongodb: + @fixture(scope="module") + def base_url( + self, + ops_manager: MongoDBOpsManager, + ) -> str: + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + We also use this address for the operator to connect to ops manager, + because the operator and the replica-sets rely on the same project configmap. + """ + interconnected_field = f"https://om-backup.{ops_manager.namespace}.interconnected" + new_address = f"{interconnected_field}:8443" + + return new_address + + @fixture(scope="module") + def project_one( + self, + ops_manager: MongoDBOpsManager, + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + base_url: str, + ) -> OMTester: + return ops_manager.get_om_tester( + project_name=f"{namespace}-project-one", + api_client=central_cluster_client, + base_url=base_url, + ) + + @fixture(scope="function") + def mongodb_multi_one_collection(self, mongodb_multi_one: MongoDB): + + tester = mongodb_multi_one.tester( + port=MONGODB_PORT, + service_names=[ + "multi-replica-set-one-0-0.kind-e2e-cluster-1.interconnected", + "multi-replica-set-one-0-1.kind-e2e-cluster-1.interconnected", + "multi-replica-set-one-1-0.kind-e2e-cluster-2.interconnected", + "multi-replica-set-one-2-0.kind-e2e-cluster-3.interconnected", + "multi-replica-set-one-2-1.kind-e2e-cluster-3.interconnected", + ], + external=True, + ) + + collection = pymongo.MongoClient(tester.cnx_string, **tester.default_opts)["testdb"] + + return collection["testcollection"] + + @fixture(scope="module") + def mongodb_multi_one( + self, + ops_manager: MongoDBOpsManager, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + disable_istio, + namespace: str, + member_cluster_names: List[str], + base_url, + custom_mdb_version: str, + ) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi.yaml"), + "multi-replica-set-one", + namespace, + # the project configmap should be created in the central cluster. + ).configure(ops_manager, f"{namespace}-project-one", api_client=central_cluster_client) + resource.set_version(ensure_ent_version(custom_mdb_version)) + + resource["spec"]["clusterSpecList"] = [ + {"clusterName": member_cluster_names[0], "members": 2}, + {"clusterName": member_cluster_names[1], "members": 1}, + {"clusterName": member_cluster_names[2], "members": 2}, + ] + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-1.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": MONGODB_PORT, + }, + { + "name": "backup", + "port": MONGODB_PORT + 1, + }, + { + "name": "testing0", + "port": MONGODB_PORT + 2, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-2.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": MONGODB_PORT, + }, + { + "name": "backup", + "port": MONGODB_PORT + 1, + }, + { + "name": "testing1", + "port": MONGODB_PORT + 2, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][2]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-3.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": MONGODB_PORT, + }, + { + "name": "backup", + "port": MONGODB_PORT + 1, + }, + { + "name": "testing2", + "port": MONGODB_PORT + 2, + }, + ], + } + }, + } + + # creating a cluster with backup should work with custom ports + resource["spec"].update({"additionalMongodConfig": {"net": {"port": MONGODB_PORT}}}) + + resource.configure_backup(mode="enabled") + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + data = KubernetesTester.read_configmap( + namespace, "multi-replica-set-one-config", api_client=central_cluster_client + ) + KubernetesTester.delete_configmap(namespace, "multi-replica-set-one-config", api_client=central_cluster_client) + data["baseUrl"] = base_url + data["sslMMSCAConfigMap"] = multi_cluster_issuer_ca_configmap + create_or_update_configmap( + namespace, + "multi-replica-set-one-config", + data, + api_client=central_cluster_client, + ) + + return resource.update() + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_setup_om_connection( + self, + replica_set_external_hosts: List[Tuple[str, str]], + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients + ) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDB): + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) + + @skip_if_local + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_add_test_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_mdb_backed_up(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_change_mdb_data(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_pit_restore(self, project_one: OMTester): + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_mdb_ready(self, mongodb_multi_one: MongoDB): + testhelper.TestBackupForMongodb.test_mdb_ready(self, mongodb_multi_one) + + @mark.e2e_mongodb_multi_cluster_backup_restore_no_mesh + def test_data_got_restored(self, mongodb_multi_one_collection): + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_cli_recover.py new file mode 100644 index 000000000..23ebf975d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_cli_recover.py @@ -0,0 +1,102 @@ +from typing import Callable, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_cli_recover as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + mongodb_multi_unmarshalled.update() + return mongodb_multi_unmarshalled + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_recover_operator_add_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_add_cluster(member_cluster_names, namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDB, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_adding_cluster(mongodb_multi, member_cluster_names) + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodb_multi_cluster_recover +def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDB, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_clusterwide.py new file mode 100644 index 000000000..a62192572 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_clusterwide.py @@ -0,0 +1,186 @@ +import os +from typing import Dict, List + +import kubernetes +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import ( + _install_multi_cluster_operator, + run_kube_config_creation_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@fixture(scope="module") +def unmanaged_mdb_ns(namespace: str): + return "{}-mdb-ns-c".format(namespace) + + +@fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, mdba_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, mdbb_ns) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def unmanaged_mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + unmanaged_mdb_ns: str, + member_cluster_names: List[str], +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, unmanaged_mdb_ns) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def install_operator( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: Dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[kubernetes.client.ApiClient], + cluster_clients: Dict[str, kubernetes.client.ApiClient], + member_cluster_names: List[str], + mdba_ns: str, + mdbb_ns: str, +) -> Operator: + print(f"Installing operator in context: {central_cluster_name}") + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + member_cluster_namespaces = mdba_ns + "," + mdbb_ns + run_kube_config_creation_tool(member_cluster_names, namespace, namespace, member_cluster_names, True) + + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.createOperatorServiceAccount": "false", + "operator.watchNamespace": member_cluster_namespaces, + }, + central_cluster_name, + ) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + unmanaged_mdb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + unmanaged_mdb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodb_multi_cluster_clusterwide +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_deploy_operator(install_operator: Operator): + testhelper.test_deploy_operator(install_operator) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDB): + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDB): + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) + + +@mark.e2e_mongodb_multi_cluster_specific_namespaces +def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_dr_connect.py new file mode 100644 index 000000000..ab51af376 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_dr_connect.py @@ -0,0 +1,69 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.operator import Operator + +from ..shared import multi_cluster_dr_connect as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping +# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_mongodb_multi_cluster_dr local=true +@pytest.fixture(scope="module") +def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi-dr.yaml"), MDB_RESOURCE, namespace) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + # return resource.load() + return resource.create() + + +@pytest.fixture(scope="module") +def mongodb_multi_collection(mongodb_multi: MongoDB): + collection = mongodb_multi.tester().client["testdb"] + return collection["testcollection"] + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_create_kube_config_file(cluster_clients: Dict): + testhelper.test_create_kube_config_file(cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_replica_set_is_reachable(mongodb_multi: MongoDB): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +@pytest.mark.flaky(reruns=100, reruns_delay=6) +def test_add_test_data(mongodb_multi_collection): + testhelper.test_add_test_data(mongodb_multi_collection) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_delete_member_3_cluster(): + testhelper.test_delete_member_3_cluster() + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDB): + testhelper.test_replica_set_is_reachable_after_deletetion(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_dr +def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): + testhelper.test_add_test_data_after_deletion(mongodb_multi_collection, capsys) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_enable_tls.py new file mode 100644 index 000000000..4a21ef31a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_enable_tls.py @@ -0,0 +1,76 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_enable_tls as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_multi_unmarshalled: MongoDB, +) -> MongoDB: + + resource = mongodb_multi_unmarshalled + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodb_multi_cluster_enable_tls +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_enable_tls +def test_create_mongodb_multi(mongodb_multi: MongoDB, namespace: str): + testhelper.test_create_mongodb_multi(mongodb_multi, namespace) + + +@mark.e2e_mongodb_multi_cluster_enable_tls +def test_enabled_tls_mongodb_multi( + mongodb_multi: MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_enabled_tls_mongodb_multi( + mongodb_multi, namespace, server_certs, multi_cluster_issuer_ca_configmap, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap.py new file mode 100644 index 000000000..25938131a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap.py @@ -0,0 +1,247 @@ +from typing import Dict, List + +import kubernetes +from kubetester import create_secret +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_static_containers +from kubetester.ldap import LDAPUser, OpenLDAP +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser, Role, generic_user +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import get_multi_cluster_operator_installation_config +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_ldap as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-replica-set-ldap" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def multi_cluster_operator_installation_config(namespace) -> Dict[str, str]: + config = get_multi_cluster_operator_installation_config(namespace=namespace) + config["customEnvVars"] = config["customEnvVars"] + "\&MDB_AUTOMATIC_RECOVERY_BACKOFF_TIME_S=360" + return config + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it + # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which + # cause MDB process to exit. It might be a good idea to try uncommenting it after migrating to newer EVG hosts. + # See https://github.com/docker-library/mongo/issues/606 for more information + # resource.set_version(ensure_ent_version(custom_mdb_version)) + + resource.set_version(ensure_ent_version("5.0.5-ent")) + + # Setting the initial clusterSpecList to more members than we need to generate + # the certificates for all the members once the RS is scaled up. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + mongodb_multi_unmarshalled: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names, + member_cluster_clients: List[MultiClusterClient], + namespace: str, + multi_cluster_issuer_ca_configmap: str, + server_certs: str, + multicluster_openldap_tls: OpenLDAP, + ldap_mongodb_agent_user: LDAPUser, + issuer_ca_configmap: str, +) -> MongoDB: + resource = mongodb_multi_unmarshalled + + secret_name = "bind-query-password" + create_secret( + namespace, + secret_name, + {"password": multicluster_openldap_tls.admin_password}, + api_client=central_cluster_client, + ) + ac_secret_name = "automation-config-password" + create_secret( + namespace, + ac_secret_name, + {"automationConfigPassword": ldap_mongodb_agent_user.password}, + api_client=central_cluster_client, + ) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [1, 1, 1]) + + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "enabled": True, + "ca": multi_cluster_issuer_ca_configmap, + }, + "authentication": { + "enabled": True, + "modes": ["LDAP", "SCRAM"], # SCRAM for testing CLOUDP-229222 + "ldap": { + "servers": [multicluster_openldap_tls.servers], + "bindQueryUser": "cn=admin,dc=example,dc=org", + "bindQueryPasswordSecretRef": {"name": secret_name}, + "transportSecurity": "none", # For testing CLOUDP-229222 + "validateLDAPServerConfig": True, + "caConfigMapRef": {"name": issuer_ca_configmap, "key": "ca-pem"}, + "userToDNMapping": '[{match: "(.+)",substitution: "uid={0},ou=groups,dc=example,dc=org"}]', + "timeoutMS": 12345, + "userCacheInvalidationInterval": 60, + }, + "agents": { + "mode": "SCRAM", # SCRAM for testing CLOUDP-189433 + "automationPasswordSecretRef": { + "name": ac_secret_name, + "key": "automationConfigPassword", + }, + "automationUserName": ldap_mongodb_agent_user.uid, + "automationLdapGroupDN": "cn=agents,ou=groups,dc=example,dc=org", + }, + }, + } + resource["spec"]["additionalMongodConfig"] = {"net": {"ssl": {"mode": "preferSSL"}}} + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@fixture(scope="module") +def user_ldap( + mongodb_multi: MongoDB, + namespace: str, + ldap_mongodb_user: LDAPUser, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + mongodb_user = ldap_mongodb_user + user = generic_user( + namespace, + username=mongodb_user.uid, + db="$external", + password=mongodb_user.password, + mongodb_resource=mongodb_multi, + ) + user.add_roles( + [ + Role(db="admin", role="clusterAdmin"), + Role(db="admin", role="readWriteAnyDatabase"), + Role(db="admin", role="dbAdminAnyDatabase"), + ] + ) + user.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + user.update() + return user + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_mongodb_multi_pending(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_pending(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDB): + testhelper.test_turn_tls_on_CLOUDP_229222(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDB): + testhelper.test_multi_replicaset_CLOUDP_229222(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDB): + testhelper.test_restore_mongodb_multi_ldap_configuration(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_create_ldap_user(mongodb_multi: MongoDB, user_ldap: MongoDBUser): + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_ldap_user_created_and_can_authenticate(mongodb_multi: MongoDB, user_ldap: MongoDBUser, ca_path: str): + testhelper.test_ldap_user_created_and_can_authenticate(mongodb_multi, user_ldap, ca_path) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDB, user_ldap: MongoDBUser): + testhelper.test_ops_manager_state_correctly_updated(mongodb_multi, user_ldap) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDB): + testhelper.test_deployment_is_reachable_with_ldap_agent(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_scale_mongodb_multi(mongodb_multi: MongoDB, member_cluster_names): + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_names) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_new_ldap_user_can_authenticate_after_scaling(mongodb_multi: MongoDB, user_ldap: MongoDBUser, ca_path: str): + testhelper.test_new_ldap_user_can_authenticate_after_scaling(mongodb_multi, user_ldap, ca_path) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_disable_agent_auth(mongodb_multi: MongoDB): + testhelper.test_disable_agent_auth(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_connectivity_with_no_auth(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap +def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDB): + testhelper.test_deployment_is_reachable_with_no_auth(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap_custom_roles.py new file mode 100644 index 000000000..58fd987c8 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_ldap_custom_roles.py @@ -0,0 +1,190 @@ +from typing import List + +import kubernetes +from kubetester import create_secret +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_static_containers +from kubetester.ldap import LDAPUser, OpenLDAP +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser, generic_user +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_ldap_custom_roles as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-replica-set-ldap" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + + # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it + # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which + # cause MDB process to exit. It might be a good idea to try uncommenting it after migrating to newer EVG hosts. + # See https://github.com/docker-library/mongo/issues/606 for more information + # resource.set_version(ensure_ent_version(custom_mdb_version)) + resource.set_version("5.0.5-ent") + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + mongodb_multi_unmarshalled: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + namespace: str, + multi_cluster_issuer_ca_configmap: str, + server_certs: str, + multicluster_openldap_tls: OpenLDAP, + ldap_mongodb_agent_user: LDAPUser, + issuer_ca_configmap: str, +) -> MongoDB: + resource = mongodb_multi_unmarshalled + secret_name = "bind-query-password" + create_secret( + namespace, + secret_name, + {"password": multicluster_openldap_tls.admin_password}, + api_client=central_cluster_client, + ) + ac_secret_name = "automation-config-password" + create_secret( + namespace, + ac_secret_name, + {"automationConfigPassword": ldap_mongodb_agent_user.password}, + api_client=central_cluster_client, + ) + + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "enabled": True, + "ca": multi_cluster_issuer_ca_configmap, + }, + "authentication": { + "enabled": True, + "modes": ["LDAP", "SCRAM"], + "ldap": { + "servers": [multicluster_openldap_tls.servers], + "bindQueryUser": "cn=admin,dc=example,dc=org", + "bindQueryPasswordSecretRef": {"name": secret_name}, + "transportSecurity": "tls", + "validateLDAPServerConfig": True, + "caConfigMapRef": {"name": issuer_ca_configmap, "key": "ca-pem"}, + "userToDNMapping": '[{match: "(.+)",substitution: "uid={0},ou=groups,dc=example,dc=org"}]', + "authzQueryTemplate": "{USER}?memberOf?base", + }, + "agents": { + "mode": "SCRAM", + }, + }, + "roles": [ + { + "role": "cn=users,ou=groups,dc=example,dc=org", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "foo"}, + }, + { + "actions": ["insert", "find"], + "resource": {"collection": "", "db": "admin"}, + }, + ], + }, + ], + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@fixture(scope="module") +def user_ldap( + mongodb_multi: MongoDB, + namespace: str, + ldap_mongodb_user: LDAPUser, + central_cluster_client: kubernetes.client.ApiClient, +) -> MongoDBUser: + mongodb_user = ldap_mongodb_user + user = generic_user( + namespace, + username=mongodb_user.uid, + db="$external", + password=mongodb_user.password, + mongodb_resource=mongodb_multi, + ) + user.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + user.update() + return user + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi_with_ldap(mongodb_multi) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +def test_create_ldap_user(mongodb_multi: MongoDB, user_ldap: MongoDBUser): + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +def test_ldap_user_can_write_to_database(mongodb_multi: MongoDB, user_ldap: MongoDBUser, ca_path: str): + testhelper.test_ldap_user_can_write_to_database(mongodb_multi, user_ldap, ca_path) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +@mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") +def test_ldap_user_can_write_to_other_collection(mongodb_multi: MongoDB, user_ldap: MongoDBUser, ca_path: str): + testhelper.test_ldap_user_can_write_to_other_collection(mongodb_multi, user_ldap, ca_path) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +@mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") +def test_ldap_user_can_write_to_other_database(mongodb_multi: MongoDB, user_ldap: MongoDBUser, ca_path: str): + testhelper.test_ldap_user_can_write_to_other_database(mongodb_multi, user_ldap, ca_path) + + +@skip_if_static_containers +@mark.e2e_mongodb_multi_cluster_with_ldap_custom_roles +def test_automation_config_has_roles(mongodb_multi: MongoDB): + testhelper.test_automation_config_has_roles(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_group.py new file mode 100644 index 000000000..2252783ea --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_group.py @@ -0,0 +1,52 @@ +import kubernetes +import kubetester.oidc as oidc +import pytest +from kubetester import try_load +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.operator import Operator +from pytest import fixture + +from ..shared import multi_cluster_oidc_m2m_group as testhelper + +MDB_RESOURCE = "oidc-multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-group.yaml"), MDB_RESOURCE, namespace) + if try_load(resource): + return resource + + oidc_provider_configs = resource.get_oidc_provider_configs() + + oidc_provider_configs[0]["clientId"] = oidc.get_cognito_workload_client_id() + oidc_provider_configs[0]["audience"] = oidc.get_cognito_workload_client_id() + oidc_provider_configs[0]["issuerURI"] = oidc.get_cognito_workload_url() + + resource.set_oidc_provider_configs(oidc_provider_configs) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource.update() + + +@pytest.mark.e2e_mongodb_multi_cluster_oidc_m2m_group +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) + + def test_assert_connectivity(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_user.py new file mode 100644 index 000000000..a89be690b --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_oidc_m2m_user.py @@ -0,0 +1,66 @@ +import kubernetes +import kubetester.oidc as oidc +import pytest +from kubetester import try_load +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from pytest import fixture + +from ..shared import multi_cluster_oidc_m2m_user as testhelper + +MDB_RESOURCE = "oidc-multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-user.yaml"), MDB_RESOURCE, namespace) + if try_load(resource): + return resource + + oidc_provider_configs = resource.get_oidc_provider_configs() + + oidc_provider_configs[0]["clientId"] = oidc.get_cognito_workload_client_id() + oidc_provider_configs[0]["audience"] = oidc.get_cognito_workload_client_id() + oidc_provider_configs[0]["issuerURI"] = oidc.get_cognito_workload_url() + + resource.set_oidc_provider_configs(oidc_provider_configs) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource.update() + + +@fixture(scope="module") +def oidc_user(namespace) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("oidc/oidc-user-multi.yaml"), namespace=namespace) + + resource["spec"]["username"] = f"OIDC-test-user/{oidc.get_cognito_workload_user_id()}" + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + + return resource.update() + + +@pytest.mark.e2e_mongodb_multi_cluster_oidc_m2m_user +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) + + def test_create_user(self, oidc_user: MongoDBUser): + testhelper.TestOIDCMultiCluster.test_create_user(self, oidc_user) + + def test_assert_connectivity(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDB): + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_pvc_resize.py new file mode 100644 index 000000000..d12339c08 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_pvc_resize.py @@ -0,0 +1,53 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_pvc_resize as testhelper + +RESOURCE_NAME = "multi-replica-set-pvc-resize" + + +@pytest.fixture(scope="module") +def mongodb_multi( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_pvc_resize +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_pvc_resize +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_pvc_resize +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_resize_pvc_state_changes(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_pvc_resize +def test_mongodb_multi_resize_finished( + mongodb_multi: MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_mongodb_multi_resize_finished(mongodb_multi, namespace, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_clusterwide.py new file mode 100644 index 000000000..3f78774fa --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_clusterwide.py @@ -0,0 +1,236 @@ +import os +from typing import Dict, List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import ( + _install_multi_cluster_operator, + run_kube_config_creation_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_recover_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, mdba_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, mdbb_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def install_operator( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: Dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], + mdba_ns: str, + mdbb_ns: str, +) -> Operator: + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + member_cluster_namespaces = mdba_ns + "," + mdbb_ns + run_kube_config_creation_tool( + member_cluster_names, + namespace, + namespace, + member_cluster_names, + True, + service_account_name=MULTI_CLUSTER_OPERATOR_NAME, + operator_name=OPERATOR_NAME, + ) + + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.createOperatorServiceAccount": "false", + "operator.watchNamespace": member_cluster_namespaces, + "multiCluster.performFailOver": "false", + }, + central_cluster_name, + operator_name=MULTI_CLUSTER_OPERATOR_NAME, + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_operator_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_delete_cluster_role_and_binding( + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_cluster_role_and_binding(central_cluster_client, member_cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_deploy_operator(install_operator: Operator): + testhelper.test_deploy_operator(install_operator) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns(namespace, central_cluster_client, mdba_ns, mdbb_ns) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDB, mongodb_multi_b: MongoDB): + testhelper.test_create_mongodb_multi_nsa_nsb(mongodb_multi_a, mongodb_multi_b) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a: MongoDB, + mongodb_multi_b: MongoDB, + mdba_ns: str, + mdbb_ns: str, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a, mongodb_multi_b, mdba_ns, mdbb_ns, member_cluster_names, member_cluster_clients + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDB): + testhelper.test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDB): + testhelper.test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster( + member_cluster_names, namespace, mdba_ns, mdbb_ns, central_cluster_client + ) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDB): + testhelper.test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a) + + +@mark.e2e_mongodb_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDB): + testhelper.test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_network_partition.py new file mode 100644 index 000000000..962d72619 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_recover_network_partition.py @@ -0,0 +1,92 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_recover_network_partition as testhelper + +RESOURCE_NAME = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_manual_remediation) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDB, + member_cluster_names: list[str], +): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_mongodb_multi_enters_failed_state( + mongodb_multi: MongoDB, + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_mongodb_multi_enters_failed_state(mongodb_multi, namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) + + +@mark.e2e_mongodb_multi_cluster_recover_network_partition +def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDB, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set.py new file mode 100644 index 000000000..48c841da5 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set.py @@ -0,0 +1,138 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.conftest import ( + setup_log_rotate_for_agents, +) +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set as testhelper + +MONGODB_PORT = 30000 +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi-central-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + additional_mongod_config = { + "systemLog": {"logAppend": True, "verbosity": 4}, + "operationProfiling": {"mode": "slowOp"}, + "net": {"port": MONGODB_PORT}, + } + + resource["spec"]["additionalMongodConfig"] = additional_mongod_config + setup_log_rotate_for_agents(resource) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.set_architecture_annotation() + + resource.update() + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_pvc_not_created( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_pvc_not_created(mongodb_multi, member_cluster_clients, namespace) + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_replica_set_is_reachable(mongodb_multi: MongoDB): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_statefulset_overrides(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_headless_service_creation( + mongodb_multi: MongoDB, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_headless_service_creation(mongodb_multi, namespace, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_mongodb_options(mongodb_multi: MongoDB): + testhelper.test_mongodb_options(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_update_additional_options(mongodb_multi: MongoDB, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_update_additional_options(mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_mongodb_options_were_updated(mongodb_multi: MongoDB): + testhelper.test_mongodb_options_were_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_delete_member_cluster_sts( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_member_cluster_sts(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set +def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_cleanup_on_mdbm_delete(mongodb_multi, member_cluster_clients) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_deletion.py new file mode 100644 index 000000000..9fe94000c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_deletion.py @@ -0,0 +1,63 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_deletion as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + + if try_load(resource): + return resource + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource.update() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_automation_config_has_been_updated(mongodb_multi: MongoDB): + testhelper.test_automation_config_has_been_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_delete_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_delete_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_deployment_has_been_removed_from_automation_config(): + testhelper.test_deployment_has_been_removed_from_automation_config() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_deletion +def test_kubernetes_resources_have_been_cleaned_up( + mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_kubernetes_resources_have_been_cleaned_up(mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_ignore_unknown_users.py new file mode 100644 index 000000000..75a209b13 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_ignore_unknown_users.py @@ -0,0 +1,55 @@ +import kubernetes +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_ignore_unknown_users as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = {"authentication": {"enabled": True, "modes": ["SCRAM"]}} + + resource["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = True + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource.update() + + +@mark.e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users +def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDB): + testhelper.test_replica_set(multi_cluster_operator, mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users +def test_authoritative_set_false(mongodb_multi: MongoDB): + testhelper.test_authoritative_set_false(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users +def test_set_ignore_unknown_users_false(mongodb_multi: MongoDB): + testhelper.test_set_ignore_unknown_users_false(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_replica_set_ignore_unknown_users +def test_authoritative_set_true(mongodb_multi: MongoDB): + testhelper.test_authoritative_set_true(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_member_options.py new file mode 100644 index 000000000..0d78cfe24 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_member_options.py @@ -0,0 +1,125 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_member_options as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + member_options = [ + [ + { + "votes": 1, + "priority": "0.3", + "tags": { + "cluster": "cluster-1", + "region": "weur", + }, + }, + { + "votes": 1, + "priority": "0.7", + "tags": { + "cluster": "cluster-1", + "region": "eeur", + }, + }, + ], + [ + { + "votes": 1, + "priority": "0.2", + "tags": { + "cluster": "cluster-2", + "region": "apac", + }, + }, + ], + [ + { + "votes": 1, + "priority": "1.3", + "tags": { + "cluster": "cluster-3", + "region": "nwus", + }, + }, + { + "votes": 1, + "priority": "2.7", + "tags": { + "cluster": "cluster-3", + "region": "seus", + }, + }, + ], + ] + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_member_options_ac(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_update_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_set_member_votes_to_0(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_set_recover_valid_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDB): + testhelper.test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_migration.py new file mode 100644 index 000000000..b92183858 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_migration.py @@ -0,0 +1,73 @@ +from typing import List + +import kubernetes +import pymongo +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_migration as testhelper + +MDBM_RESOURCE = "multi-replica-set-migration" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version, +) -> MongoDB: + + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["version"] = custom_mdb_version + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + return resource + + +@pytest.fixture(scope="module") +def mdb_health_checker(mongodb_multi: MongoDB) -> MongoDBBackgroundTester: + return MongoDBBackgroundTester( + mongodb_multi.tester(), + allowed_sequential_failures=1, + health_function_params={ + "attempts": 1, + "write_concern": pymongo.WriteConcern(w="majority"), + }, + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_migration +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_migration +def test_create_mongodb_multi_running(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi_running(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_migration +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + testhelper.test_start_background_checker(mdb_health_checker) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_migration +def test_migrate_architecture(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_migrate_architecture(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_migration +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_down.py new file mode 100644 index 000000000..84f196250 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_down.py @@ -0,0 +1,112 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_down as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # start at one member in each cluster + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + if try_load(mongodb_multi_unmarshalled): + return mongodb_multi_unmarshalled + + return mongodb_multi_unmarshalled.update() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_scale_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_down +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_up.py new file mode 100644 index 000000000..24b8a09be --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_scale_up.py @@ -0,0 +1,115 @@ +from typing import List + +import kubernetes +import kubetester +import pytest +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_up as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + # we have created certs for all 5 members, but want to start at only 3. + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_scale_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_replica_set_scale_up +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_test_mtls.py new file mode 100644 index 000000000..dfcfad88c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_replica_set_test_mtls.py @@ -0,0 +1,86 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_test_mtls as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_create_mongo_pod_in_separate_namespace( + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + namespace: str, +): + testhelper.test_create_mongo_pod_in_separate_namespace(member_cluster_clients, evergreen_task_id, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_connectivity_fails_from_second_namespace( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_fails_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_enable_istio_injection( + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_enable_istio_injection(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_delete_existing_mongo_pod(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_create_pod_with_istio_sidecar(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_mtls_test +def test_connectivity_succeeds_from_second_namespace( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_succeeds_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_down_cluster.py new file mode 100644 index 000000000..14d06d91f --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_down_cluster.py @@ -0,0 +1,106 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_down_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_scale_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_scale_down_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster.py new file mode 100644 index 000000000..3720afac9 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster.py @@ -0,0 +1,160 @@ +from operator import truediv +from typing import List + +import kubernetes +import pytest +from kubetester import ( + create_or_update_configmap, + random_k8s_name, + read_configmap, + try_load, +) +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_up_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def project_name_prefix(namespace: str) -> str: + return random_k8s_name(f"{namespace}-project-") + + +@pytest.fixture(scope="module") +def new_project_configmap(namespace: str, project_name_prefix: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{project_name_prefix}-new-project" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [3, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="function") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + if try_load(mongodb_multi_unmarshalled): + return mongodb_multi_unmarshalled + + # remove the last element, we are only starting with 2 clusters we will scale up the 3rd one later. + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + # remove one member from the first cluster to start with 2 members + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 2 + return mongodb_multi_unmarshalled + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_scale_mongodb_multi(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) + + +# From here on, the tests are for verifying that we can change the project of the MongoDB resource even with +# non-sequential member ids in the replicaset. + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster +class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): + + def test_scale_up_first_cluster(self, mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_scale_up_first_cluster( + self, mongodb_multi, member_cluster_clients + ) + + def test_change_project(self, mongodb_multi: MongoDB, new_project_configmap: str): + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project( + self, mongodb_multi, new_project_configmap + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster_new_cluster.py new file mode 100644 index 000000000..c2b180e41 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scale_up_cluster_new_cluster.py @@ -0,0 +1,130 @@ +from typing import Callable, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_up_cluster_new_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDB, server_certs: str) -> MongoDB: + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_delete_deployment(namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_re_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_re_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_add_new_cluster_to_mongodb_multi_resource( + mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_add_new_cluster_to_mongodb_multi_resource(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodb_multi_cluster_scale_up_cluster_new_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scram.py new file mode 100644 index 000000000..fcce82c03 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_scram.py @@ -0,0 +1,144 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scram as testhelper + +MDB_RESOURCE = "multi-replica-set-scram" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@pytest.fixture(scope="function") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = { + "authentication": { + "agents": {"mode": "MONGODB-CR"}, + "enabled": True, + "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], + } + } + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.fixture(scope="function") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi_with_scram(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_user_reaches_updated( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, +): + testhelper.test_user_reaches_updated(central_cluster_client, mongodb_user) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDB): + testhelper.test_replica_set_connectivity_using_user_password(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_change_password_and_check_connectivity( + namespace: str, + mongodb_multi: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_change_password_and_check_connectivity(namespace, mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDB): + testhelper.test_user_cannot_authenticate_with_old_password(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_connection_string_secret_was_created( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_connection_string_secret_was_created(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_om_configured_correctly(): + testhelper.test_om_configured_correctly() + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_replica_set_connectivity(mongodb_multi: MongoDB): + testhelper.test_replica_set_connectivity(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_split_horizon.py new file mode 100644 index 000000000..9bebc7ac1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_split_horizon.py @@ -0,0 +1,121 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark + +from ..shared import multi_cluster_split_horizon as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + +# This test will set up an environment which will configure a resource with split horizon enabled. +# Steps to run this test. + +# 1. Change the nodenames under "additional_domains" +# 2. Run this test with: `make e2e test=e2e_mongodb_multi_cluster_split_horizon light=true local=true`. +# 3. Wait for the test to pass (this means the environment is set up.) +# 4. Exec into any database pod and note the contents of the files referenced by the fields +# * net.tls.certificateKeyFile +# * net.tlsCAFile +# from the /data/automation-mongod.conf file. + +# 5. Test the connection +# Testing the connection can be done from either the worker node or from your local machine(note accessing traffic from a pod inside the cluster would work irrespective SH is configured correctly or not) +# 1. Acsessing from worker node +# * ssh into any worker node +# * Install the mongo shell +# * Create files from the two files mentioned above. (server.pem and ca.crt) +# * Run "mongo "mongodb://${WORKER_NODE}:30100,${WORKER_NODE}:30101,${WORKER_NODE}:30102/?replicaSet=test-tls-base-rs-external-access" --tls --tlsCertificateKeyFile server.pem --tlsCAFile ca.crt" +# 2. Accessing from local machine +# * Install the mongo shell +# * Create files from the two files mentioned above. (server.pem and ca.crt) +# * Open access to KOPS nodes from your local machine by following these steps(by default KOPS doesn't expose traffic from all ports to the internet) +# : https://stackoverflow.com/questions/45543694/kubernetes-cluster-on-aws-with-kops-nodeport-service-unavailable/45561848#45561848 +# * Run "mongo "mongodb://${WORKER_NODE1}:30100,${WORKER_NODE2}:30101,${WORKER_NODE3}:30102/?replicaSet=test-tls-base-rs-external-access" --tls --tlsCertificateKeyFile server.pem --tlsCAFile ca.crt" +# When split horizon is not configured, specifying the replicaset name should fail. +# When split horizon is configured, it will successfully connect to the primary. + +# Example: mongo "mongodb://ec2-35-178-71-70.eu-west-2.compute.amazonaws.com:30100,ec2-52-56-69-123.eu-west-2.compute.amazonaws.com:30100,ec2-3-10-22-163.eu-west-2.compute.amazonaws.com:30100" --tls --tlsCertificateKeyFile server.pem --tlsCAFile ca-pem + +# 6. Clean the namespace +# * This test creates node ports, which we should delete. + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi-split-horizon.yaml"), MDB_RESOURCE, namespace) + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + additional_domains=[ + "*", + "ec2-35-178-71-70.eu-west-2.compute.amazonaws.com", + "ec2-52-56-69-123.eu-west-2.compute.amazonaws.com", + "ec2-3-10-22-163.eu-west-2.compute.amazonaws.com", + ], + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "enabled": True, + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodb_multi_cluster_split_horizon +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_split_horizon +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDB, + namespace: str, +): + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace) + + +@mark.e2e_mongodb_multi_cluster_split_horizon +def test_create_node_ports(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_create_node_ports(mongodb_multi, member_cluster_clients) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_split_horizon +def test_tls_connectivity(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_tls_connectivity(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_sts_override.py new file mode 100644 index 000000000..70445df63 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_sts_override.py @@ -0,0 +1,59 @@ +from typing import List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator + +from ..shared import multi_cluster_sts_override as testhelper + +MDB_RESOURCE = "multi-replica-set-sts-override" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml( + yaml_fixture("mongodb-multi-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@pytest.mark.e2e_mongodb_multi_sts_override +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_sts_override +def test_create_mongodb_multi(mongodb_multi: MongoDB): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_sts_override +def test_statefulset_overrides(mongodb_multi: MongoDB, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodb_multi_sts_override +def test_access_modes_pvc( + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_access_modes_pvc(mongodb_multi, member_cluster_clients, namespace) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_no_mesh.py new file mode 100644 index 000000000..1c2510947 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_no_mesh.py @@ -0,0 +1,196 @@ +from typing import List + +import kubernetes +from kubernetes import client +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_no_mesh as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str], custom_mdb_version: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-1.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing0", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-2.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing1", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][2]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-3.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing2", + "port": 27019, + }, + ], + } + }, + } + + return resource + + +@fixture(scope="module") +def disable_istio( + multi_cluster_operator: Operator, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + for mcc in member_cluster_clients: + api = client.CoreV1Api(api_client=mcc.api_client) + labels = {"istio-injection": "disabled"} + ns = api.read_namespace(name=namespace) + ns.metadata.labels.update(labels) + api.replace_namespace(name=namespace, body=ns) + return None + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + disable_istio, + namespace: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + mongodb_multi_unmarshalled["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return mongodb_multi_unmarshalled.update() + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@mark.e2e_mongodb_multi_cluster_tls_no_mesh +def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): + testhelper.test_update_coredns(cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_tls_no_mesh +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_tls_no_mesh +def test_create_mongodb_multi( + mongodb_multi: MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) + + +@mark.e2e_mongodb_multi_cluster_tls_no_mesh +def test_service_overrides( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_service_overrides(namespace, mongodb_multi, member_cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_tls_no_mesh +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_scram.py new file mode 100644 index 000000000..dd3018026 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_scram.py @@ -0,0 +1,175 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_with_scram as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list( + member_cluster_names=member_cluster_names, members=[2, 1, 2] + ) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDB: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@fixture(scope="module") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource["spec"]["mongodbResourceRef"]["namespace"] = namespace + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDB, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace, member_cluster_clients) + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_update_mongodb_multi_tls_with_scram( + mongodb_multi: MongoDB, + namespace: str, +): + + testhelper.test_update_mongodb_multi_tls_with_scram(mongodb_multi, namespace) + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_tls_connectivity(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_tls_connectivity(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDB, ca_path: str): + testhelper.test_replica_set_connectivity_with_scram_and_tls(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDB, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_enable_x509( + mongodb_multi: MongoDB, + namespace: str, +): + testhelper.test_mongodb_multi_tls_enable_x509(mongodb_multi, namespace) + + +@mark.e2e_mongodb_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_automation_config_was_updated( + mongodb_multi: MongoDB, + namespace: str, +): + testhelper.test_mongodb_multi_tls_automation_config_was_updated(mongodb_multi, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_x509.py new file mode 100644 index 000000000..4a32fc590 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_tls_with_x509.py @@ -0,0 +1,173 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import ( + create_multi_cluster_mongodb_x509_tls_certs, + create_multi_cluster_x509_agent_certs, +) +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDB +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_with_x509 as testhelper + +# TODO This test needs to re-introduce certificate rotation and enabling authentication step by step +# See https://jira.mongodb.org/browse/CLOUDP-311366 + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +AGENT_BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-agent-certs" +CLUSTER_BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-clusterfile" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDB: + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["additionalMongodConfig"] = {"net": {"tls": {"mode": "requireTLS"}}} + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_x509_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def cluster_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_x509_tls_certs( + multi_cluster_issuer, + CLUSTER_BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def agent_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDB, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_x509_agent_certs( + multi_cluster_issuer, + AGENT_BUNDLE_SECRET_NAME, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + agent_certs: str, + cluster_certs: str, + mongodb_multi_unmarshalled: MongoDB, + multi_cluster_issuer_ca_configmap: str, + member_cluster_names, +) -> MongoDB: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + "authentication": { + "enabled": True, + "modes": ["X509", "SCRAM"], + "agents": {"mode": "X509"}, + "internalCluster": "X509", + }, + } + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + + return resource + + +@fixture(scope="module") +def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-x509-user.yaml"), "multi-replica-set-x509-user", namespace) + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + + return resource + + +@mark.e2e_mongodb_multi_cluster_tls_with_x509 +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodb_multi_cluster_tls_with_x509 +def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDB, namespace: str): + testhelper.test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi, namespace) + + +@mark.e2e_mongodb_multi_cluster_tls_with_x509 +def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDB): + testhelper.test_ops_manager_state_was_updated_correctly(mongodb_multi) + + +@mark.e2e_mongodb_multi_cluster_tls_with_x509 +def test_create_mongodb_x509_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_x509_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_x509_user(central_cluster_client, mongodb_x509_user, namespace) + + +@skip_if_local +@mark.e2e_mongodb_multi_cluster_tls_with_x509 +def test_x509_user_connectivity( + mongodb_multi: MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer: str, + namespace: str, + ca_path: str, +): + testhelper.test_x509_user_connectivity( + mongodb_multi, central_cluster_client, multi_cluster_issuer, namespace, ca_path + ) + + +# TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms +# keeps the resources reachable and in Running state. +def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): + testhelper.assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_upgrade_downgrade.py new file mode 100644 index 000000000..5d4df6186 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_upgrade_downgrade.py @@ -0,0 +1,85 @@ +import kubernetes +import pymongo +import pytest +from kubetester import try_load +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDB +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_upgrade_downgrade as testhelper + +MDBM_RESOURCE = "multi-replica-set-upgrade" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_prev_version: str, +) -> MongoDB: + + resource = MongoDB.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_prev_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + return resource + + +@pytest.fixture(scope="module") +def mdb_health_checker(mongodb_multi: MongoDB) -> MongoDBBackgroundTester: + return MongoDBBackgroundTester( + mongodb_multi.tester(), + allowed_sequential_failures=1, + health_function_params={ + "attempts": 1, + "write_concern": pymongo.WriteConcern(w="majority"), + }, + ) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_create_mongodb_multi_running(mongodb_multi: MongoDB, custom_mdb_prev_version: str): + testhelper.test_create_mongodb_multi_running(mongodb_multi, custom_mdb_prev_version) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + testhelper.test_start_background_checker(mdb_health_checker) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_mongodb_multi_upgrade(mongodb_multi: MongoDB, custom_mdb_prev_version: str, custom_mdb_version: str): + testhelper.test_mongodb_multi_upgrade(mongodb_multi, custom_mdb_prev_version, custom_mdb_version) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDB): + testhelper.test_upgraded_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_mongodb_multi_downgrade(mongodb_multi: MongoDB, custom_mdb_prev_version: str): + testhelper.test_mongodb_multi_downgrade(mongodb_multi, custom_mdb_prev_version) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDB): + testhelper.test_downgraded_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodb_multi_cluster_upgrade_downgrade +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_validation.py new file mode 100644 index 000000000..9057500c1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodb/mongodb_multi_cluster_validation.py @@ -0,0 +1,22 @@ +import kubernetes +import pytest +from kubetester.kubetester import KubernetesTester +from kubetester.operator import Operator +from tests.multicluster.shared import multi_cluster_validation as testhelper + +MDBM_RESOURCE = "mongodb-multi-cluster.yaml" + + +@pytest.mark.e2e_mongodb_multi_cluster_validation +class TestWebhookValidation(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.TestWebhookValidation.test_deploy_operator(self, multi_cluster_operator) + + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_unique_cluster_names(self, central_cluster_client, MDBM_RESOURCE) + + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_only_one_schema(self, central_cluster_client, MDBM_RESOURCE) + + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(self, central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml new file mode 100644 index 000000000..bda215813 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml @@ -0,0 +1,20 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBUser +metadata: + name: mms-user-1 +spec: + passwordSecretKeyRef: + name: mms-user-1-password + key: password + username: "mms-user-1" + db: "admin" + mongodbResourceRef: + name: "multi-replica-set" + namespace: + roles: + - db: "admin" + name: "clusterAdmin" + - db: "admin" + name: "userAdminAnyDatabase" + - db: "admin" + name: "readWrite" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml new file mode 100644 index 000000000..3e7f09b9e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDBUser +metadata: + name: test-x509-user +spec: + username: 'CN=x509-testing-user' + db: '$external' + mongodbResourceRef: + name: "multi-replica-set" + roles: + - db: "admin" + name: "clusterAdmin" + - db: "admin" + name: "userAdminAnyDatabase" + - db: "admin" + name: "readWrite" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml new file mode 100644 index 000000000..0fa05bc18 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: my-service + labels: + controller: mongodb-enterprise-operator +spec: + type: NodePort + selector: + controller: mongodb-enterprise-operator + ports: + - port: 27017 + targetPort: 27017 + nodePort: 30007 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml new file mode 100644 index 000000000..ebaedbc7f --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: mongodb.com/v1 +kind: MongoDBUser +metadata: + name: oidc-user-1 +spec: + username: "" + db: "$external" + mongodbResourceRef: + name: oidc-multi-replica-set + roles: + - db: "admin" + name: "readWriteAnyDatabase" diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py similarity index 91% rename from docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py index 0cb42037d..111d90c1e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -19,14 +19,14 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture from tests.multicluster.conftest import cluster_spec_list +from ..shared import manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-rs" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" @fixture(scope="module") @@ -39,7 +39,7 @@ def cert_additional_domains() -> list[str]: @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str]) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["persistent"] = False # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) @@ -130,7 +130,7 @@ def server_certs( def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) def test_create_mongodb_multi( @@ -141,4 +141,11 @@ def test_create_mongodb_multi( member_cluster_clients: List[MultiClusterClient], member_cluster_names: List[str], ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py index 277f3fc9f..0a4131db8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py @@ -4,9 +4,6 @@ import pytest from kubetester import ( create_or_update_configmap, - create_or_update_secret, - read_configmap, - read_secret, ) from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version @@ -14,14 +11,12 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_2_cluster_clusterwide_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +MDB_RESOURCE = "multi-replica-set" @pytest.fixture(scope="module") @@ -41,7 +36,7 @@ def mongodb_multi_a_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -58,7 +53,7 @@ def mongodb_multi_b_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set", mdbb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -161,16 +156,12 @@ def mongodb_multi_b( return resource -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -180,34 +171,23 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_namespaces( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + mdbb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -215,24 +195,12 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -240,35 +208,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.reload() - mongodb_multi_a["spec"]["authentication"] = ( - { - "agents": {"mode": "SCRAM"}, - "enabled": True, - "modes": ["SCRAM"], - }, - ) + testhelper.test_enable_mongodb_multi_nsa_auth(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py index fd3d273aa..3198d8cce 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py @@ -7,12 +7,11 @@ from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from .conftest import cluster_spec_list +from ..shared import multi_2_cluster_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" @@ -23,7 +22,7 @@ def mongodb_multi_unmarshalled( namespace: str, member_cluster_names: List[str], custom_mdb_version: str ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) return resource @@ -66,42 +65,30 @@ def mongodb_multi( return resource.create() -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_statefulset_is_created_across_multiple_clusters( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - cluster_one_client = member_cluster_clients[0] - cluster_one_sts = statefulsets[cluster_one_client.cluster_name] - assert cluster_one_sts.status.ready_replicas == 2 - - cluster_two_client = member_cluster_clients[1] - cluster_two_sts = statefulsets[cluster_two_client.cluster_name] - assert cluster_two_sts.status.ready_replicas == 1 + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) @skip_if_local -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py new file mode 100644 index 000000000..b267cc19f --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py @@ -0,0 +1,54 @@ +from typing import List + +import kubernetes +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_agent_flags as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # override agent startup flags + resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} + resource["spec"]["agent"]["logLevel"] = "DEBUG" + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(multi_cluster_operator, mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_multi_replicaset_has_agent_flags( + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_multi_replicaset_has_agent_flags(namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py new file mode 100644 index 000000000..207495ce9 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py @@ -0,0 +1,104 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_automated_disaster_recovery as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_mongodb_multi_leaves_running_state( + mongodb_multi: MongoDBMulti, +): + testhelper.test_mongodb_multi_leaves_running_state(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_replica_reaches_running(mongodb_multi: MongoDBMulti): + testhelper.test_replica_reaches_running(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): + testhelper.test_number_numbers_in_ac(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_sts_count_in_member_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_sts_count_in_member_cluster(mongodb_multi, member_cluster_names, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py index 981df49d4..3d7ce4bf6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py @@ -1,17 +1,12 @@ -import datetime -import time from typing import Dict, List, Optional import kubernetes import kubernetes.client import pymongo import pytest -from kubernetes import client from kubetester import ( create_or_update_configmap, create_or_update_secret, - get_default_storage_class, - read_service, try_load, ) from kubetester.certs import create_ops_manager_tls_certs @@ -25,20 +20,14 @@ from kubetester.omtester import OMTester from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import ( - assert_data_got_restored, - update_coredns_hosts, wait_for_primary, ) -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} +from ..shared import multi_cluster_backup_restore as testhelper MONGODB_PORT = 30000 - - -HEAD_PATH = "/head/" OPLOG_RS_NAME = "my-mongodb-oplog" BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" USER_PASSWORD = "/qwerty@!#:" @@ -67,18 +56,6 @@ def ops_manager_certs( ) -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - def new_om_data_store( mdb: MongoDB, id: str, @@ -139,7 +116,7 @@ def oplog_replica_set( name=OPLOG_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="development", mdb_name=OPLOG_RS_NAME, @@ -172,7 +149,7 @@ def blockstore_replica_set( name=BLOCKSTORE_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="blockstore", mdb_name=BLOCKSTORE_RS_NAME, @@ -241,12 +218,12 @@ def oplog_user( yield resource.update() -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore class TestOpsManagerCreation: """ name: Ops Manager successful creation with backup and oplog stores enabled @@ -259,47 +236,23 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore class TestBackupDatabasesAdded: """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to running state""" @@ -309,33 +262,16 @@ def test_backup_mdbs_created( oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -395,7 +331,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -429,88 +365,39 @@ def mongodb_multi_one( return resource.update() - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_setup_om_connection( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], ): - """ - The base_url makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, ops_manager, central_cluster_client, member_cluster_clients ) - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - backup_completion_time = project_one.get_latest_backup_completion_time() - print("\nbackup_completion_time: {}".format(backup_completion_time)) - - pit_millis = backup_completion_time + 1500 - - print(f"Restoring back to: {pit_millis}") + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection, mdb_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py similarity index 67% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py index 41707aa04..2f34a04e5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py @@ -1,9 +1,7 @@ # This test sets up ops manager in a multicluster "no-mesh" environment. # It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. -import datetime -import time -from typing import List, Optional, Tuple +from typing import List, Tuple import kubernetes import kubernetes.client @@ -12,8 +10,6 @@ from kubetester import ( create_or_update_configmap, create_or_update_secret, - get_default_storage_class, - read_service, try_load, ) from kubetester.certs import create_ops_manager_tls_certs @@ -27,14 +23,11 @@ from kubetester.omtester import OMTester from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase from pytest import fixture, mark -from tests.conftest import assert_data_got_restored, update_coredns_hosts -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} +from ..shared import multi_cluster_backup_restore_no_mesh as testhelper MONGODB_PORT = 30000 -HEAD_PATH = "/head/" OPLOG_RS_NAME = "my-mongodb-oplog" BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" USER_PASSWORD = "/qwerty@!#:" @@ -63,33 +56,6 @@ def ops_manager_certs( ) -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - -def new_om_data_store( - mdb: MongoDB, - id: str, - assignment_enabled: bool = True, - user_name: Optional[str] = None, - password: Optional[str] = None, -) -> dict: - return { - "id": id, - "uri": mdb.mongo_uri(user_name=user_name, password=password), - "ssl": mdb.is_tls_enabled(), - "assignmentEnabled": assignment_enabled, - } - - @fixture(scope="module") def ops_manager( namespace: str, @@ -135,7 +101,7 @@ def oplog_replica_set( name=OPLOG_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="development", mdb_name=OPLOG_RS_NAME, @@ -168,7 +134,7 @@ def blockstore_replica_set( name=BLOCKSTORE_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="blockstore", mdb_name=BLOCKSTORE_RS_NAME, @@ -297,114 +263,58 @@ def disable_istio( return None -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_update_coredns( replica_set_external_hosts: List[Tuple[str, str]], cluster_clients: dict[str, kubernetes.client.ApiClient], ): - """ - This test updates the coredns config in the member clusters to allow connecting to the other replica set members - through an external address. - """ - for cluster_name, cluster_api in cluster_clients.items(): - update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + testhelper.test_update_coredns(replica_set_external_hosts, cluster_clients) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestOpsManagerCreation: - """ - name: Ops Manager successful creation with backup and oplog stores enabled - description: | - Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state - eventually as it will wait for oplog db to be created - """ - def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestBackupDatabasesAdded: - """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to - running state""" - def test_backup_mdbs_created( self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -469,7 +379,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -574,7 +484,7 @@ def mongodb_multi_one( return resource.update() - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_setup_om_connection( self, replica_set_external_hosts: List[Tuple[str, str]], @@ -582,96 +492,35 @@ def test_setup_om_connection( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], ): - """ - test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - hosts = replica_set_external_hosts[:] - hosts.append((ip, interconnected_field)) - - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=hosts, - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients ) - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_add_test_data(self, mongodb_multi_one_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_one_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti): - # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status - # right away. - # But the agent might still do work on the cluster, so we need to wait for that to happen. - mongodb_multi_one.assert_reaches_phase(Phase.Pending) - mongodb_multi_one.assert_reaches_phase(Phase.Running) + testhelper.TestBackupForMongodb.test_mdb_ready(self, mongodb_multi_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py index 75e4e20da..b4dbc3e97 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py @@ -7,14 +7,10 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase -from tests.conftest import ( - MULTI_CLUSTER_OPERATOR_NAME, - run_kube_config_creation_tool, - run_multi_cluster_recovery_tool, -) from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_cli_recover as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -27,7 +23,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -64,74 +60,43 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], namespace: str, ): - run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) - # deploy the operator without the final cluster - operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) - operator.assert_is_running() + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_add_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_add_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() + testhelper.test_mongodb_multi_recovers_adding_cluster(mongodb_multi, member_cluster_names) - mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) - -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() - - last_transition_time = mongodb_multi.get_status_last_transition_time() - - mongodb_multi["spec"]["clusterSpecList"].pop(0) - mongodb_multi.update() - mongodb_multi.assert_state_transition_happens(last_transition_time) - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py index 0041bb414..d173b70d4 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py @@ -1,25 +1,23 @@ import os -import time from typing import Dict, List import kubernetes from kubernetes import client -from kubetester import create_or_update_configmap, create_or_update_secret, read_secret -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import ( - MULTI_CLUSTER_OPERATOR_NAME, _install_multi_cluster_operator, run_kube_config_creation_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import cluster_spec_list -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_cluster_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" @fixture(scope="module") @@ -44,7 +42,7 @@ def mongodb_multi_a( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -61,7 +59,7 @@ def mongodb_multi_b( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -75,7 +73,7 @@ def unmanaged_mongodb_multi( unmanaged_mdb_ns: str, member_cluster_names: List[str], ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", unmanaged_mdb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, unmanaged_mdb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -114,7 +112,7 @@ def install_operator( ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_namespaces( namespace: str, mdba_ns: str, @@ -125,38 +123,19 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_namespaces( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + unmanaged_mdb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - unmanaged_mdb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -164,32 +143,22 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@mark.e2e_multi_cluster_clusterwide +@mark.e2e_mongodbmulticluster_multi_cluster_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_deploy_operator(install_operator: Operator): - install_operator.assert_is_running() + testhelper.test_deploy_operator(install_operator) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -197,35 +166,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti): - """ - For an unmanaged resource, the status should not be updated! - """ - for i in range(10): - time.sleep(5) - - unmanaged_mongodb_multi.reload() - assert "status" not in unmanaged_mongodb_multi + testhelper.test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py new file mode 100644 index 000000000..863499e1e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py @@ -0,0 +1,69 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + +from ..shared import multi_cluster_dr_connect as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping +# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_mongodbmulticluster_multi_cluster_dr local=true +@pytest.fixture(scope="module") +def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-dr.yaml"), MDB_RESOURCE, namespace) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + # return resource.load() + return resource.create() + + +@pytest.fixture(scope="module") +def mongodb_multi_collection(mongodb_multi: MongoDBMulti): + collection = mongodb_multi.tester().client["testdb"] + return collection["testcollection"] + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_kube_config_file(cluster_clients: Dict): + testhelper.test_create_kube_config_file(cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +@pytest.mark.flaky(reruns=100, reruns_delay=6) +def test_add_test_data(mongodb_multi_collection): + testhelper.test_add_test_data(mongodb_multi_collection) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_delete_member_3_cluster(): + testhelper.test_delete_member_3_cluster() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable_after_deletetion(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): + testhelper.test_add_test_data_after_deletion(mongodb_multi_collection, capsys) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py similarity index 64% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py index 86ae862d7..ef31eaff6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py @@ -1,29 +1,25 @@ from typing import List import kubernetes -from kubetester import read_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_enable_tls as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) return resource @@ -57,17 +53,17 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_create_mongodb_multi(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi, namespace) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_enabled_tls_mongodb_multi( mongodb_multi: MongoDBMulti, namespace: str, @@ -75,20 +71,6 @@ def test_enabled_tls_mongodb_multi( multi_cluster_issuer_ca_configmap: str, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.load() - mongodb_multi["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) - - # assert the presence of the generated pem certificates in each member cluster - for client in member_cluster_clients: - read_secret( - namespace=namespace, - name=BUNDLE_PEM_SECRET_NAME, - api_client=client.api_client, - ) + testhelper.test_enabled_tls_mongodb_multi( + mongodb_multi, namespace, server_certs, multi_cluster_issuer_ca_configmap, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py index af460bbbe..bdd3f69fa 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py @@ -1,29 +1,25 @@ -import time from typing import Dict, List import kubernetes -from kubetester import create_secret, wait_until -from kubetester.automation_config_tester import AutomationConfigTester +from kubetester import create_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, Role, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import get_multi_cluster_operator_installation_config from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") @@ -39,7 +35,7 @@ def mongodb_multi_unmarshalled( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which # cause MDB process to exit. It might be a good idea to try uncommenting it after migrating to newer EVG hosts. @@ -168,179 +164,86 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic - recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). - """ - mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + testhelper.test_mongodb_multi_pending(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. - Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. - """ - - def wait_for_ac_exists() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - _ = ac["ldap"]["transportSecurity"] - _ = ac["version"] - return True - except KeyError: - return False - - wait_until(wait_for_ac_exists, timeout=200) - current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] - - def wait_for_ac_pushed() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - transport_security = ac["ldap"]["transportSecurity"] - new_version = ac["version"] - if transport_security != "none": - return False - if new_version <= current_version: - return False - return True - except KeyError: - return False - - wait_until(wait_for_ac_pushed, timeout=500) - - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource.update() + testhelper.test_turn_tls_on_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet - goes into running state. - """ - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + testhelper.test_multi_replicaset_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti): - """ - This function restores the initial desired security configuration to carry on with the next tests normally. - """ - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" - - resource.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_restore_mongodb_multi_ldap_configuration(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ldap_user_created_and_can_authenticate(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_ldap_user_created_and_can_authenticate(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - expected_roles = { - ("admin", "clusterAdmin"), - ("admin", "readWriteAnyDatabase"), - ("admin", "dbAdminAnyDatabase"), - } - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_expected_users(1) - ac.assert_has_user(user_ldap["spec"]["username"]) - ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) - ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) - ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) - - assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] - assert "timeoutMS" in ac.automation_config["ldap"] - assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 - assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + testhelper.test_ops_manager_state_correctly_updated(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_ldap_agent(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_names): - mongodb_multi.reload() - mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_names) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_new_ldap_user_can_authenticate_after_scaling( mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str ): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_new_ldap_user_can_authenticate_after_scaling(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_disable_agent_auth(mongodb_multi: MongoDBMulti): - mongodb_multi.reload() - mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False - mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_disable_agent_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_mongodb_multi_connectivity_with_no_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_no_auth(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py index 6f472e6f2..89f1937f8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py @@ -2,31 +2,27 @@ import kubernetes from kubetester import create_secret -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap_custom_roles as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which @@ -151,84 +147,44 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi_with_ldap(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_collection(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo2", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_collection(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo2", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_automation_config_has_roles(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - role = { - "role": "cn=users,ou=groups,dc=example,dc=org", - "db": "admin", - "privileges": [ - {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, - { - "actions": ["insert", "find"], - "resource": {"collection": "", "db": "admin"}, - }, - ], - "authenticationRestrictions": [], - } - tester.assert_expected_role(role_index=0, expected_value=role) + testhelper.test_automation_config_has_roles(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py index ef8a2c582..ccaae8106 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py @@ -2,15 +2,14 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase -from kubetester.mongodb_multi import MongoDBMulti, MultiClusterClient -from kubetester.mongotester import ReplicaSetTester +from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_group as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -21,7 +20,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-group.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-group.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -38,21 +39,16 @@ def mongodb_multi( return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_group +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(0) - tester.assert_authoritative_set(True) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py index 3faa266f4..c5fc0b9bd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py @@ -2,16 +2,15 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser -from kubetester.mongotester import ReplicaSetTester from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_user as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -22,7 +21,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-user.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-user.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -49,24 +50,19 @@ def oidc_user(namespace) -> MongoDBUser: return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_user +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_create_user(self, oidc_user: MongoDBUser): - oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_user(self, oidc_user) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(1) - tester.assert_authoritative_set(True) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py new file mode 100644 index 000000000..b88229e62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py @@ -0,0 +1,55 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_pvc_resize as testhelper + +RESOURCE_NAME = "multi-replica-set-pvc-resize" + + +@pytest.fixture(scope="module") +def mongodb_multi( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_resize_pvc_state_changes(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_finished( + mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_mongodb_multi_resize_finished(mongodb_multi, namespace, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py new file mode 100644 index 000000000..aec36d2cb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py @@ -0,0 +1,236 @@ +import os +from typing import Dict, List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import ( + _install_multi_cluster_operator, + run_kube_config_creation_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_recover_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def install_operator( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: Dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], + mdba_ns: str, + mdbb_ns: str, +) -> Operator: + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + member_cluster_namespaces = mdba_ns + "," + mdbb_ns + run_kube_config_creation_tool( + member_cluster_names, + namespace, + namespace, + member_cluster_names, + True, + service_account_name=MULTI_CLUSTER_OPERATOR_NAME, + operator_name=OPERATOR_NAME, + ) + + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.createOperatorServiceAccount": "false", + "operator.watchNamespace": member_cluster_namespaces, + "multiCluster.performFailOver": "false", + }, + central_cluster_name, + operator_name=MULTI_CLUSTER_OPERATOR_NAME, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_operator_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_cluster_role_and_binding( + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_cluster_role_and_binding(central_cluster_client, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_deploy_operator(install_operator: Operator): + testhelper.test_deploy_operator(install_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns(namespace, central_cluster_client, mdba_ns, mdbb_ns) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): + testhelper.test_create_mongodb_multi_nsa_nsb(mongodb_multi_a, mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a: MongoDBMulti, + mongodb_multi_b: MongoDBMulti, + mdba_ns: str, + mdbb_ns: str, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a, mongodb_multi_b, mdba_ns, mdbb_ns, member_cluster_names, member_cluster_clients + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster( + member_cluster_names, namespace, mdba_ns, mdbb_ns, central_cluster_client + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py new file mode 100644 index 000000000..e4d2f322a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py @@ -0,0 +1,92 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_recover_network_partition as testhelper + +RESOURCE_NAME = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_manual_remediation) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], +): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_enters_failed_state( + mongodb_multi: MongoDBMulti, + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_mongodb_multi_enters_failed_state(mongodb_multi, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py new file mode 100644 index 000000000..607250b58 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py @@ -0,0 +1,138 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.conftest import ( + setup_log_rotate_for_agents, +) +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set as testhelper + +MONGODB_PORT = 30000 +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-central-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + additional_mongod_config = { + "systemLog": {"logAppend": True, "verbosity": 4}, + "operationProfiling": {"mode": "slowOp"}, + "net": {"port": MONGODB_PORT}, + } + + resource["spec"]["additionalMongodConfig"] = additional_mongod_config + setup_log_rotate_for_agents(resource) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.set_architecture_annotation() + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_pvc_not_created( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_pvc_not_created(mongodb_multi, member_cluster_clients, namespace) + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_headless_service_creation( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_headless_service_creation(mongodb_multi, namespace, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_update_additional_options(mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options_were_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_delete_member_cluster_sts( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_member_cluster_sts(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_cleanup_on_mdbm_delete(mongodb_multi, member_cluster_clients) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py new file mode 100644 index 000000000..760edc3ad --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py @@ -0,0 +1,63 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_deletion as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + + if try_load(resource): + return resource + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): + testhelper.test_automation_config_has_been_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_delete_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deployment_has_been_removed_from_automation_config(): + testhelper.test_deployment_has_been_removed_from_automation_config() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_kubernetes_resources_have_been_cleaned_up( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_kubernetes_resources_have_been_cleaned_up(mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py index 6178377ea..363eada59 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py @@ -1,13 +1,14 @@ import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_ignore_unknown_users as testhelper + +MDB_RESOURCE = "multi-replica-set" + @fixture(scope="module") def mongodb_multi( @@ -18,8 +19,8 @@ def mongodb_multi( ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, namespace, ) resource.set_version(custom_mdb_version) @@ -34,26 +35,21 @@ def mongodb_multi( return resource.update() -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_replica_set(multi_cluster_operator, mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_false(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(False) + testhelper.test_authoritative_set_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti): - mongodb_multi.load() - mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_set_ignore_unknown_users_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_true(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(True) + testhelper.test_authoritative_set_true(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py new file mode 100644 index 000000000..c7e29d666 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py @@ -0,0 +1,125 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_member_options as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + member_options = [ + [ + { + "votes": 1, + "priority": "0.3", + "tags": { + "cluster": "cluster-1", + "region": "weur", + }, + }, + { + "votes": 1, + "priority": "0.7", + "tags": { + "cluster": "cluster-1", + "region": "eeur", + }, + }, + ], + [ + { + "votes": 1, + "priority": "0.2", + "tags": { + "cluster": "cluster-2", + "region": "apac", + }, + }, + ], + [ + { + "votes": 1, + "priority": "1.3", + "tags": { + "cluster": "cluster-3", + "region": "nwus", + }, + }, + { + "votes": 1, + "priority": "2.7", + "tags": { + "cluster": "cluster-3", + "region": "seus", + }, + }, + ], + ] + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_member_options_ac(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_update_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_member_votes_to_0(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_recover_valid_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py similarity index 53% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py index 5f43629d0..fc52981d5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py @@ -4,9 +4,7 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import assert_statefulset_architecture from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import get_default_architecture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.multicluster_client import MultiClusterClient @@ -14,6 +12,8 @@ from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_migration as testhelper + MDBM_RESOURCE = "multi-replica-set-migration" @@ -25,7 +25,7 @@ def mongodb_multi( custom_mdb_version, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource["spec"]["version"] = custom_mdb_version resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -46,46 +46,28 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_create_mongodb_multi_running(mongodb_multi) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_migrate_architecture(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - """ - If the E2E is running with default architecture as non-static, - then the test will migrate to static and vice versa. - """ - original_default_architecture = get_default_architecture() - target_architecture = "non-static" if original_default_architecture == "static" else "static" - - mongodb_multi.trigger_architecture_migration() - - mongodb_multi.load() - assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture - - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) - - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - for statefulset in statefulsets.values(): - assert_statefulset_architecture(statefulset, target_architecture) + testhelper.test_migrate_architecture(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py new file mode 100644 index 000000000..960ced828 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py @@ -0,0 +1,112 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_down as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # start at one member in each cluster + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + if try_load(mongodb_multi_unmarshalled): + return mongodb_multi_unmarshalled + + return mongodb_multi_unmarshalled.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py new file mode 100644 index 000000000..cebe85d8e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py @@ -0,0 +1,115 @@ +from typing import List + +import kubernetes +import kubetester +import pytest +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_up as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + # we have created certs for all 5 members, but want to start at only 3. + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py new file mode 100644 index 000000000..2f593dc62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py @@ -0,0 +1,86 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_test_mtls as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongo_pod_in_separate_namespace( + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + namespace: str, +): + testhelper.test_create_mongo_pod_in_separate_namespace(member_cluster_clients, evergreen_task_id, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_fails_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_fails_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_enable_istio_injection( + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_enable_istio_injection(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_delete_existing_mongo_pod(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_create_pod_with_istio_sidecar(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_succeeds_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_succeeds_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py new file mode 100644 index 000000000..f7f3e9620 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py @@ -0,0 +1,106 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_down_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py index 3acc73dff..b5146c652 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py @@ -8,20 +8,18 @@ random_k8s_name, read_configmap, try_load, - wait_until, ) -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_scale_up_cluster as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -54,7 +52,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: list[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [3, 1, 2]) @@ -97,97 +95,68 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_created_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - # read all statefulsets except the last one - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(3) + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - mongodb_multi["spec"]["clusterSpecList"].append( - {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} - ) - mongodb_multi.update() - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_scaled_up_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(5) + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() @skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) # From here on, the tests are for verifying that we can change the project of the MongoDBMulti resource even with # non-sequential member ids in the replicaset. -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): def test_scale_up_first_cluster( self, mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] ): - # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. - # multi-replica-set-0-0 : 0 - # multi-replica-set-0-1 : 1 - # multi-replica-set-0-2 : 5 - # multi-replica-set-1-0 : 2 - # multi-replica-set-2-0 : 3 - # multi-replica-set-2-1 : 4 - - mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 - mongodb_multi.update() - - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_scale_up_first_cluster( + self, mongodb_multi, member_cluster_clients + ) def test_change_project(self, mongodb_multi: MongoDBMulti, new_project_configmap: str): - oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap - mongodb_multi.update() - - mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) - mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) - - newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - # Assert that the replica set member ids have not changed after changing the project. - assert oldRsMembers == newRsMembers + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project( + self, mongodb_multi, new_project_configmap + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py new file mode 100644 index 000000000..1fd805bbf --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py @@ -0,0 +1,130 @@ +from typing import Callable, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_up_cluster_new_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_delete_deployment(namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_re_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_re_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_add_new_cluster_to_mongodb_multi_resource( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_add_new_cluster_to_mongodb_multi_resource(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py new file mode 100644 index 000000000..d9941c298 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py @@ -0,0 +1,144 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scram as testhelper + +MDB_RESOURCE = "multi-replica-set-scram" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@pytest.fixture(scope="function") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = { + "authentication": { + "agents": {"mode": "MONGODB-CR"}, + "enabled": True, + "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], + } + } + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.fixture(scope="function") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi_with_scram(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_reaches_updated( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, +): + testhelper.test_user_reaches_updated(central_cluster_client, mongodb_user) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity_using_user_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_change_password_and_check_connectivity( + namespace: str, + mongodb_multi: MongoDBMulti, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_change_password_and_check_connectivity(namespace, mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): + testhelper.test_user_cannot_authenticate_with_old_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_connection_string_secret_was_created( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_connection_string_secret_was_created(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_om_configured_correctly(): + testhelper.test_om_configured_correctly() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py index c32b8c38a..7d8be4df1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py @@ -1,30 +1,25 @@ from typing import List import kubernetes -import yaml from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark +from ..shared import multi_cluster_split_horizon as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" # This test will set up an environment which will configure a resource with split horizon enabled. # Steps to run this test. # 1. Change the nodenames under "additional_domains" -# 2. Run this test with: `make e2e test=e2e_multi_cluster_split_horizon light=true local=true`. +# 2. Run this test with: `make e2e test=e2e_mongodbmulticluster_multi_cluster_split_horizon light=true local=true`. # 3. Wait for the test to pass (this means the environment is set up.) # 4. Exec into any database pod and note the contents of the files referenced by the fields # * net.tls.certificateKeyFile @@ -55,7 +50,9 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-split-horizon.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-split-horizon.yaml"), MDB_RESOURCE, namespace + ) return resource @@ -102,48 +99,25 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_mongodb_multi_with_tls( mongodb_multi: MongoDBMulti, namespace: str, ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_create_node_ports(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - for mcc in member_cluster_clients: - with open( - yaml_fixture(f"split-horizon-node-ports/split-horizon-node-port.yaml"), - "r", - ) as f: - service_body = yaml.safe_load(f.read()) - - # configure labels and selectors - service_body["metadata"]["labels"][ - "mongodbmulticluster" - ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" - service_body["metadata"]["labels"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - service_body["spec"]["selector"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - - KubernetesTester.create_service( - mongodb_multi.namespace, - body=service_body, - api_client=mcc.api_client, - ) + testhelper.test_create_node_ports(mongodb_multi, member_cluster_clients) @skip_if_local -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_tls_connectivity(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py new file mode 100644 index 000000000..1d21aff50 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py @@ -0,0 +1,59 @@ +from typing import List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator + +from ..shared import multi_cluster_sts_override as testhelper + +MDB_RESOURCE = "multi-replica-set-sts-override" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_access_modes_pvc( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_access_modes_pvc(mongodb_multi, member_cluster_clients, namespace) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py new file mode 100644 index 000000000..0747f020d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py @@ -0,0 +1,198 @@ +from typing import List + +import kubernetes +from kubernetes import client +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_no_mesh as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, member_cluster_names: List[str], custom_mdb_version: str +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-1.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing0", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-2.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing1", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][2]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-3.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing2", + "port": 27019, + }, + ], + } + }, + } + + return resource + + +@fixture(scope="module") +def disable_istio( + multi_cluster_operator: Operator, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + for mcc in member_cluster_clients: + api = client.CoreV1Api(api_client=mcc.api_client) + labels = {"istio-injection": "disabled"} + ns = api.read_namespace(name=namespace) + ns.metadata.labels.update(labels) + api.replace_namespace(name=namespace, body=ns) + return None + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + disable_istio, + namespace: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return mongodb_multi_unmarshalled.update() + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): + testhelper.test_update_coredns(cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_service_overrides( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_service_overrides(namespace, mongodb_multi, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py new file mode 100644 index 000000000..43ac7a25c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py @@ -0,0 +1,175 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_with_scram as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list( + member_cluster_names=member_cluster_names, members=[2, 1, 2] + ) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@fixture(scope="module") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource["spec"]["mongodbResourceRef"]["namespace"] = namespace + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_update_mongodb_multi_tls_with_scram( + mongodb_multi: MongoDBMulti, + namespace: str, +): + + testhelper.test_update_mongodb_multi_tls_with_scram(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_tls_connectivity(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_connectivity_with_scram_and_tls(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_enable_x509( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_enable_x509(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_automation_config_was_updated( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_automation_config_was_updated(mongodb_multi, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py index c0c421b3f..57d3a4866 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py @@ -1,24 +1,22 @@ -import tempfile from typing import List import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert from kubetester.certs_mongodb_multi import ( create_multi_cluster_mongodb_x509_tls_certs, create_multi_cluster_x509_agent_certs, ) -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_tls_with_x509 as testhelper + # TODO This test needs to re-introduce certificate rotation and enabling authentication step by step # See https://jira.mongodb.org/browse/CLOUDP-311366 @@ -31,7 +29,7 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -131,35 +129,32 @@ def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, names return resource -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi, namespace) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti): - ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) - ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") - ac_tester.assert_internal_cluster_authentication_enabled() + testhelper.test_ops_manager_state_was_updated_correctly(mongodb_multi) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_create_mongodb_x509_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_x509_user: MongoDBUser, namespace: str, ): - mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + testhelper.test_create_mongodb_x509_user(central_cluster_client, mongodb_x509_user, namespace) @skip_if_local -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_x509_user_connectivity( mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient, @@ -167,23 +162,12 @@ def test_x509_user_connectivity( namespace: str, ca_path: str, ): - with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: - create_multi_cluster_x509_user_cert( - multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name - ) - tester = mongodb_multi.tester() - tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + testhelper.test_x509_user_connectivity( + mongodb_multi, central_cluster_client, multi_cluster_issuer, namespace, ca_path + ) # TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms # keeps the resources reachable and in Running state. def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): - cert = Certificate(name=certificate_name, namespace=namespace) - cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) - cert.load() - cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate - cert.update() - # FIXME the assertions below need to be replaced with a robust check that the agents are ready - # and the TLS certificates are rotated. - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py index 4aba05a9c..947d72785 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py @@ -2,14 +2,15 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_upgrade_downgrade as testhelper + MDBM_RESOURCE = "multi-replica-set-upgrade" @@ -21,7 +22,7 @@ def mongodb_multi( custom_mdb_prev_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_prev_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -42,60 +43,43 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_create_mongodb_multi_running(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_upgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str, custom_mdb_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_mongodb_multi_upgrade(mongodb_multi, custom_mdb_prev_version, custom_mdb_version) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) - -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_upgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_mongodb_multi_downgrade(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_downgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py new file mode 100644 index 000000000..bba02ec46 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -0,0 +1,22 @@ +import kubernetes +import pytest +from kubetester.kubetester import KubernetesTester +from kubetester.operator import Operator +from tests.multicluster.shared import multi_cluster_validation as testhelper + +MDBM_RESOURCE = "mongodbmulticluster-multi-cluster.yaml" + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation +class TestWebhookValidation(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.TestWebhookValidation.test_deploy_operator(self, multi_cluster_operator) + + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_unique_cluster_names(self, central_cluster_client, MDBM_RESOURCE) + + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_only_one_schema(self, central_cluster_client, MDBM_RESOURCE) + + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(self, central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py index f94035895..5e4dc5e6e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py @@ -15,12 +15,11 @@ from kubetester.opsmanager import MongoDBOpsManager from kubetester.phase import Phase from tests.conftest import ( - MULTI_CLUSTER_OPERATOR_NAME, - TELEMETRY_CONFIGMAP_NAME, get_central_cluster_client, get_custom_mdb_version, get_member_cluster_names, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, TELEMETRY_CONFIGMAP_NAME from tests.multicluster.conftest import cluster_spec_list @@ -74,7 +73,7 @@ def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti: name = f"mdb-{idx}-mc" resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), namespace=namespace, name=name, ).configure(ops_manager, name, api_client=get_central_cluster_client()) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py new file mode 100644 index 000000000..3679de73d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -0,0 +1,22 @@ +from typing import List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py new file mode 100644 index 000000000..a786f9951 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py @@ -0,0 +1,121 @@ +from typing import Dict, List + +import kubernetes +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + read_configmap, + read_secret, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.reload() + mongodb_multi_a["spec"]["authentication"] = ( + { + "agents": {"mode": "SCRAM"}, + "enabled": True, + "modes": ["SCRAM"], + }, + ) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py new file mode 100644 index 000000000..402c69947 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py @@ -0,0 +1,43 @@ +from typing import Dict, List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + cluster_one_client = member_cluster_clients[0] + cluster_one_sts = statefulsets[cluster_one_client.cluster_name] + assert cluster_one_sts.status.ready_replicas == 2 + + cluster_two_client = member_cluster_clients[1] + cluster_two_sts = statefulsets[cluster_two_client.cluster_name] + assert cluster_two_sts.status.ready_replicas == 1 + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py index 1a552f39c..5b887aa53 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py @@ -1,43 +1,19 @@ from typing import List -import kubernetes from kubetester import client from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders -from tests.multicluster.conftest import cluster_spec_list -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # override agent startup flags - resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} - resource["spec"]["agent"]["logLevel"] = "DEBUG" - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@mark.e2e_multi_cluster_agent_flags -def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_agent_flags def test_multi_replicaset_has_agent_flags( namespace: str, member_cluster_clients: List[MultiClusterClient], @@ -58,10 +34,9 @@ def test_multi_replicaset_has_agent_flags( assert result != "0" -@mark.e2e_multi_cluster_agent_flags def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py index 57928b907..c7511d639 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py @@ -5,39 +5,20 @@ from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import get_member_cluster_api_client - -from .conftest import cluster_spec_list, create_service_entries_objects +from tests.multicluster.conftest import ( + create_service_entries_objects, +) FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@mark.e2e_multi_cluster_disaster_recovery def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -48,26 +29,20 @@ def test_label_namespace(namespace: str, central_cluster_client: kubernetes.clie api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_disaster_recovery def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_disaster_recovery def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -82,16 +57,16 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery def test_mongodb_multi_leaves_running_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, ): mongodb_multi.load() mongodb_multi.assert_abandons_phase(Phase.Running, timeout=300) -@mark.e2e_multi_cluster_disaster_recovery -def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str] +): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) sts_name = f"{mongodb_multi.name}-{failed_cluster_idx}" try: @@ -115,22 +90,17 @@ def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMul ) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@mark.e2e_multi_cluster_disaster_recovery -def test_replica_reaches_running(mongodb_multi: MongoDBMulti): +def test_replica_reaches_running(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) desiredmembers = 0 for c in mongodb_multi["spec"]["clusterSpecList"]: @@ -140,9 +110,8 @@ def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): assert len(processes) == desiredmembers -@mark.e2e_multi_cluster_disaster_recovery def test_sts_count_in_member_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], member_cluster_clients: List[MultiClusterClient], ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py new file mode 100644 index 000000000..d118e2b66 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -0,0 +1,219 @@ +import datetime +import time +from typing import List + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import ( + assert_data_got_restored, + update_coredns_hosts, +) + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +MONGODB_PORT = 30000 + +HEAD_PATH = "/head/" +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + def test_setup_om_connection( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) + + def test_add_test_data(self, mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + backup_completion_time = project_one.get_latest_backup_completion_time() + print("\nbackup_completion_time: {}".format(backup_completion_time)) + + pit_millis = backup_completion_time + 1500 + + print(f"Restoring back to: {pit_millis}") + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py new file mode 100644 index 000000000..68bbaa38d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -0,0 +1,258 @@ +# This test sets up ops manager in a multicluster "no-mesh" environment. +# It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. + +import datetime +import time +from typing import List, Optional, Tuple + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import assert_data_got_restored, update_coredns_hosts + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +HEAD_PATH = "/head/" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def new_om_data_store( + mdb: MongoDB, + id: str, + assignment_enabled: bool = True, + user_name: Optional[str] = None, + password: Optional[str] = None, +) -> dict: + return { + "id": id, + "uri": mdb.mongo_uri(user_name=user_name, password=password), + "ssl": mdb.is_tls_enabled(), + "assignmentEnabled": assignment_enabled, + } + + +def test_update_coredns( + replica_set_external_hosts: List[Tuple[str, str]], + cluster_clients: dict[str, kubernetes.client.ApiClient], +): + """ + This test updates the coredns config in the member clusters to allow connecting to the other replica set members + through an external address. + """ + for cluster_name, cluster_api in cluster_clients.items(): + update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + + def test_setup_om_connection( + self, + replica_set_external_hosts: List[Tuple[str, str]], + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + hosts = replica_set_external_hosts[:] + hosts.append((ip, interconnected_field)) + + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=hosts, + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + + def test_add_test_data(self, mongodb_multi_one_collection): + max_attempts = 100 + while max_attempts > 0: + try: + mongodb_multi_one_collection.insert_one(TEST_DATA) + return + except Exception as e: + print(e) + max_attempts -= 1 + time.sleep(6) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status + # right away. + # But the agent might still do work on the cluster, so we need to wait for that to happen. + mongodb_multi_one.assert_reaches_phase(Phase.Pending) + mongodb_multi_one.assert_reaches_phase(Phase.Running) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py new file mode 100644 index 000000000..5116d323d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py @@ -0,0 +1,81 @@ +from typing import Callable, List + +import kubernetes +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.conftest import ( + run_kube_config_creation_tool, + run_multi_cluster_recovery_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + + +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) + # deploy the operator without the final cluster + operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) + operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_add_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str]): + mongodb_multi.load() + + mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): + mongodb_multi.load() + + last_transition_time = mongodb_multi.get_status_last_transition_time() + + mongodb_multi["spec"]["clusterSpecList"].pop(0) + mongodb_multi.update() + mongodb_multi.assert_state_transition_happens(last_transition_time) + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py new file mode 100644 index 000000000..3ab1c88b1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py @@ -0,0 +1,123 @@ +import time +from typing import Dict, List + +import kubernetes +from kubernetes import client +from kubetester import create_or_update_configmap, create_or_update_secret, read_secret +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + unmanaged_mdb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + unmanaged_mdb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_deploy_operator(install_operator: Operator): + install_operator.assert_is_running() + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti | MongoDB): + """ + For an unmanaged resource, the status should not be updated! + """ + for i in range(10): + time.sleep(5) + + unmanaged_mongodb_multi.reload() + assert "status" not in unmanaged_mongodb_multi diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py index c2cc0d988..bb7d7d467 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py @@ -2,9 +2,7 @@ import time from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase @@ -14,52 +12,28 @@ CLUSTER_TO_DELETE = "member-3a" -# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping -# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_multi_cluster_dr local=true -@pytest.fixture(scope="module") -def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-dr.yaml"), "multi-replica-set", namespace) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - # return resource.load() - return resource.create() - - -@pytest.fixture(scope="module") -def mongodb_multi_collection(mongodb_multi: MongoDBMulti): - collection = mongodb_multi.tester().client["testdb"] - return collection["testcollection"] - - -@pytest.mark.e2e_multi_cluster_dr def test_create_kube_config_file(cluster_clients: Dict): clients = cluster_clients assert len(clients) == 4 -@pytest.mark.e2e_multi_cluster_dr def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_dr -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr -@pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): mongodb_multi_collection.insert_one(TEST_DATA) -@pytest.mark.e2e_multi_cluster_dr def test_delete_member_3_cluster(): # delete 3rd cluster with gcloud command # gcloud container clusters delete member-3a --zone us-west1-a @@ -79,13 +53,11 @@ def test_delete_member_3_cluster(): ) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): max_attempts = 100 while max_attempts > 0: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py new file mode 100644 index 000000000..3f83581e8 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py @@ -0,0 +1,46 @@ +from typing import List + +from kubetester import read_secret +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_enabled_tls_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.load() + mongodb_multi["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) + + # assert the presence of the generated pem certificates in each member cluster + for client in member_cluster_clients: + read_secret( + namespace=namespace, + name=BUNDLE_PEM_SECRET_NAME, + api_client=client.api_client, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py new file mode 100644 index 000000000..edae362ed --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py @@ -0,0 +1,163 @@ +from kubetester import wait_until +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic + recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). + """ + mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + + +def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. + Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. + """ + + def wait_for_ac_exists() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + _ = ac["ldap"]["transportSecurity"] + _ = ac["version"] + return True + except KeyError: + return False + + wait_until(wait_for_ac_exists, timeout=200) + current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] + + def wait_for_ac_pushed() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + transport_security = ac["ldap"]["transportSecurity"] + new_version = ac["version"] + if transport_security != "none": + return False + if new_version <= current_version: + return False + return True + except KeyError: + return False + + wait_until(wait_for_ac_pushed, timeout=500) + + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource.update() + + +def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet + goes into running state. + """ + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + + +def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function restores the initial desired security configuration to carry on with the next tests normally. + """ + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" + + resource.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) + ac.assert_expected_users(1) + + +def test_ldap_user_created_and_can_authenticate( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + expected_roles = { + ("admin", "clusterAdmin"), + ("admin", "readWriteAnyDatabase"), + ("admin", "dbAdminAnyDatabase"), + } + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_expected_users(1) + ac.assert_has_user(user_ldap["spec"]["username"]) + ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) + ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) + ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) + + assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] + assert "timeoutMS" in ac.automation_config["ldap"] + assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 + assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + + +def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names): + mongodb_multi.reload() + mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_new_ldap_user_can_authenticate_after_scaling( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_disable_agent_auth(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.reload() + mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False + mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py new file mode 100644 index 000000000..db3b9582c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py @@ -0,0 +1,80 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) + ac.assert_expected_users(1) + + +def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_collection( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo2", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_database( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo2", + collection="foo", + attempts=10, + ) + + +def test_automation_config_has_roles(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + role = { + "role": "cn=users,ou=groups,dc=example,dc=org", + "db": "admin", + "privileges": [ + {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, + { + "actions": ["insert", "find"], + "resource": {"collection": "", "db": "admin"}, + }, + ], + "authenticationRestrictions": [], + } + tester.assert_expected_role(role_index=0, expected_value=role) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py new file mode 100644 index 000000000..28620bb2c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py @@ -0,0 +1,23 @@ +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + + +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(0) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py new file mode 100644 index 000000000..ee01979e0 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py @@ -0,0 +1,27 @@ +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator + + +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + def test_create_user(self, oidc_user: MongoDBUser): + oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(1) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py index adc9cc668..2fdcb2b54 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py @@ -1,50 +1,26 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester import get_statefulset, try_load -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb_multi import MongoDBMulti +from kubetester import get_statefulset +from kubetester.mongodb_multi import MongoDB, MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list RESOURCE_NAME = "multi-replica-set-pvc-resize" RESIZED_STORAGE_SIZE = "2Gi" -@pytest.fixture(scope="module") -def mongodb_multi( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - try_load(resource) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource - - -@pytest.mark.e2e_multi_cluster_pvc_resize def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti | MongoDB): # Update the resource mongodb_multi.load() mongodb_multi["spec"]["statefulSet"]["spec"]["volumeClaimTemplates"][0]["spec"]["resources"]["requests"][ @@ -55,9 +31,8 @@ def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_pvc_resize def test_mongodb_multi_resize_finished( - mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient] ): statefulsets = [] for i, c in enumerate(member_cluster_clients): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py index 2751be8d6..87bc17ae6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py @@ -1,4 +1,3 @@ -import os from typing import Dict, List import kubernetes @@ -13,113 +12,26 @@ read_secret, statefulset_is_deleted, ) -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( - MULTI_CLUSTER_OPERATOR_NAME, - OPERATOR_NAME, - _install_multi_cluster_operator, - run_kube_config_creation_tool, run_multi_cluster_recovery_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import ( + create_service_entries_objects, +) -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_service_entries_objects from .multi_cluster_clusterwide import create_namespace FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mdba_ns(namespace: str): - return "{}-mdb-ns-a".format(namespace) - - -@fixture(scope="module") -def mdbb_ns(namespace: str): - return "{}-mdb-ns-b".format(namespace) - - -@fixture(scope="module") -def mongodb_multi_a( - central_cluster_client: kubernetes.client.ApiClient, - mdba_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def mongodb_multi_b( - central_cluster_client: kubernetes.client.ApiClient, - mdbb_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def install_operator( - namespace: str, - central_cluster_name: str, - multi_cluster_operator_installation_config: Dict[str, str], - central_cluster_client: client.ApiClient, - member_cluster_clients: List[MultiClusterClient], - member_cluster_names: List[str], - mdba_ns: str, - mdbb_ns: str, -) -> Operator: - os.environ["HELM_KUBECONTEXT"] = central_cluster_name - member_cluster_namespaces = mdba_ns + "," + mdbb_ns - run_kube_config_creation_tool( - member_cluster_names, - namespace, - namespace, - member_cluster_names, - True, - service_account_name=MULTI_CLUSTER_OPERATOR_NAME, - operator_name=OPERATOR_NAME, - ) - - return _install_multi_cluster_operator( - namespace, - multi_cluster_operator_installation_config, - central_cluster_client, - member_cluster_clients, - { - "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.createOperatorServiceAccount": "false", - "operator.watchNamespace": member_cluster_namespaces, - "multiCluster.performFailOver": "false", - }, - central_cluster_name, - operator_name=MULTI_CLUSTER_OPERATOR_NAME, - ) - - -@mark.e2e_multi_cluster_recover_clusterwide def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -130,7 +42,6 @@ def test_label_operator_namespace(namespace: str, central_cluster_client: kubern api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -162,13 +73,11 @@ def test_create_namespaces( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_cluster_role_and_binding( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], @@ -189,12 +98,10 @@ def test_delete_cluster_role_and_binding( delete_cluster_role_binding(name, client.api_client) -@mark.e2e_multi_cluster_recover_clusterwide def test_deploy_operator(install_operator: Operator): install_operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -217,7 +124,6 @@ def test_prepare_namespace( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -236,13 +142,11 @@ def test_copy_configmap_and_secret_across_ns( create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) -@mark.e2e_multi_cluster_recover_clusterwide -def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti | MongoDB, mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -259,10 +163,9 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_database_statefulsets_in_failed_cluster( - mongodb_multi_a: MongoDBMulti, - mongodb_multi_b: MongoDBMulti, + mongodb_multi_a: MongoDBMulti | MongoDB, + mongodb_multi_b: MongoDBMulti | MongoDB, mdba_ns: str, mdbb_ns: str, member_cluster_names: list[str], @@ -308,19 +211,16 @@ def test_delete_database_statefulsets_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -339,8 +239,7 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a["metadata"]["annotations"]["failedClusters"] = None @@ -350,8 +249,7 @@ def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMul mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b["metadata"]["annotations"]["failedClusters"] = None diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py index 88ca7bba3..8e25bf490 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py @@ -4,41 +4,23 @@ from kubeobject import CustomObject from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted -from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( - MULTI_CLUSTER_OPERATOR_NAME, get_member_cluster_api_client, run_multi_cluster_recovery_tool, ) - -from .conftest import cluster_spec_list, create_service_entries_objects +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import ( + create_service_entries_objects, +) FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -RESOURCE_NAME = "multi-replica-set" -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = client.CustomObjectsApi(central_cluster_client) - - return resource - - -@mark.e2e_multi_cluster_recover_network_partition def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -50,24 +32,20 @@ def test_label_namespace(namespace: str, central_cluster_client: client.ApiClien api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_network_partition def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): multi_cluster_operator_manual_remediation.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_recover_network_partition def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -86,9 +64,8 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_delete_database_statefulset_in_failed_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], ): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) @@ -114,9 +91,8 @@ def test_delete_database_statefulset_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_network_partition def test_mongodb_multi_enters_failed_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, central_cluster_client: client.ApiClient, ): @@ -124,7 +100,6 @@ def test_mongodb_multi_enters_failed_state( mongodb_multi.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_network_partition def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -141,8 +116,9 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): mongodb_multi.load() last_transition_time = mongodb_multi.get_status_last_transition_time() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py similarity index 73% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py index 599015d46..175eaf857 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py @@ -6,57 +6,18 @@ from kubernetes.client.rest import ApiException from kubetester import delete_statefulset, get_statefulset, wait_until from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests.conftest import ( assert_log_rotation_process, - member_cluster_clients, - setup_log_rotate_for_agents, ) -from tests.multicluster.conftest import cluster_spec_list MONGODB_PORT = 30000 -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-central-sts-override.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - additional_mongod_config = { - "systemLog": {"logAppend": True, "verbosity": 4}, - "operationProfiling": {"mode": "slowOp"}, - "net": {"port": MONGODB_PORT}, - } - - resource["spec"]["additionalMongodConfig"] = additional_mongod_config - setup_log_rotate_for_agents(resource) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.set_architecture_annotation() - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -66,19 +27,16 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_replica_set def test_statefulset_is_created_across_multiple_clusters( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): def statefulsets_are_ready(): @@ -105,9 +63,8 @@ def statefulsets_are_ready(): wait_until(statefulsets_are_ready, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set def test_pvc_not_created( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -118,15 +75,12 @@ def test_pvc_not_created( assert e.value.reason == "Not Found" -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester(port=MONGODB_PORT) tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_replica_set -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 cluster_one_client = member_cluster_clients[0] @@ -134,9 +88,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar1", cluster_one_sts) -@pytest.mark.e2e_multi_cluster_replica_set def test_headless_service_creation( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -157,8 +110,7 @@ def test_headless_service_creation( assert len(ep_two.subsets[0].addresses) == mongodb_multi.get_item_spec(cluster_two_client.cluster_name)["members"] -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options(mongodb_multi: MongoDBMulti): +def test_mongodb_options(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 4 @@ -168,8 +120,9 @@ def test_mongodb_options(mongodb_multi: MongoDBMulti): assert_log_rotation_process(process) -@pytest.mark.e2e_multi_cluster_replica_set -def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): +def test_update_additional_options( + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient +): mongodb_multi["spec"]["additionalMongodConfig"]["systemLog"]["verbosity"] = 2 mongodb_multi["spec"]["additionalMongodConfig"]["net"]["maxIncomingConnections"] = 100 # update uses json merge+patch which means that deleting keys is done by setting them to None @@ -180,8 +133,7 @@ def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_ mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 2 @@ -192,10 +144,9 @@ def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): assert "mode" not in process["args2_6"]["operationProfiling"] -@pytest.mark.e2e_multi_cluster_replica_set def test_delete_member_cluster_sts( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): sts_name = "{}-0".format(mongodb_multi.name) @@ -223,8 +174,9 @@ def check_if_sts_was_recreated() -> bool: mongodb_multi.assert_reaches_phase(Phase.Running, timeout=400) -@pytest.mark.e2e_multi_cluster_replica_set -def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_cleanup_on_mdbm_delete( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] +): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) cluster_one_client = member_cluster_clients[0] cluster_one_sts = statefulsets[cluster_one_client.cluster_name] diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py index fd387cdda..55b769a83 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py @@ -1,57 +1,34 @@ from typing import List import kubernetes -import pytest -from kubetester import try_load, wait_until +from kubetester import wait_until from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests import test_logger -from tests.multicluster.conftest import cluster_spec_list logger = test_logger.get_test_logger(__name__) -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - - if try_load(resource): - return resource - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource.update() - - -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) processes = tester.get_replica_set_processes(mongodb_multi.name) assert len(processes) == 5 -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.delete() def wait_for_deleted() -> bool: @@ -68,7 +45,6 @@ def wait_for_deleted() -> bool: wait_until(wait_for_deleted, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deployment_has_been_removed_from_automation_config(): def wait_until_automation_config_is_clean() -> bool: tester = AutomationConfigTester(KubernetesTester.get_automation_config()) @@ -82,9 +58,8 @@ def wait_until_automation_config_is_clean() -> bool: wait_until(wait_until_automation_config_is_clean, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_kubernetes_resources_have_been_cleaned_up( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): def wait_until_secrets_are_removed() -> bool: try: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py new file mode 100644 index 000000000..a9e3c17ff --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py @@ -0,0 +1,27 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_false(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(False) + + +def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.load() + mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_true(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py index 7990943e8..5ca347cbb 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py @@ -1,83 +1,11 @@ from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - - -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - member_options = [ - [ - { - "votes": 1, - "priority": "0.3", - "tags": { - "cluster": "cluster-1", - "region": "weur", - }, - }, - { - "votes": 1, - "priority": "0.7", - "tags": { - "cluster": "cluster-1", - "region": "eeur", - }, - }, - ], - [ - { - "votes": 1, - "priority": "0.2", - "tags": { - "cluster": "cluster-2", - "region": "apac", - }, - }, - ], - [ - { - "votes": 1, - "priority": "1.3", - "tags": { - "cluster": "cluster-3", - "region": "nwus", - }, - }, - { - "votes": 1, - "priority": "2.7", - "tags": { - "cluster": "cluster-3", - "region": "seus", - }, - }, - ], - ] - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set_member_options + + def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -87,18 +15,15 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set_member_options def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() config = mongodb_multi.get_automation_config_tester().automation_config rs = config["replicaSets"] @@ -129,8 +54,7 @@ def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): assert member5["tags"] == {"cluster": "cluster-3", "region": "seus"} -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["memberConfig"][0] = { @@ -158,8 +82,7 @@ def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): } -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -175,8 +98,7 @@ def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): assert updated_member["priority"] == 0.0 -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -188,8 +110,7 @@ def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMult ) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # A member with priority 0.0 could still be a voting member. It cannot become primary and cannot trigger elections. # https://www.mongodb.com/docs/v5.0/core/replica-set-priority-0-member/#priority-0-replica-set-members @@ -199,8 +120,7 @@ def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMu mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][2]["memberConfig"][1]["votes"] = 3 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py new file mode 100644 index 000000000..50d4c8efb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py @@ -0,0 +1,52 @@ +from typing import List + +from kubetester.kubetester import ( + assert_statefulset_architecture, + get_default_architecture, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_migrate_architecture(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + """ + If the E2E is running with default architecture as non-static, + then the test will migrate to static and vice versa. + """ + original_default_architecture = get_default_architecture() + target_architecture = "non-static" if original_default_architecture == "static" else "static" + + mongodb_multi.trigger_architecture_migration() + + mongodb_multi.load() + assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture + + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + for statefulset in statefulsets.values(): + assert_statefulset_architecture(statefulset, target_architecture) + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py similarity index 50% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py index 7ba868397..2c6500e67 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py @@ -1,84 +1,24 @@ from typing import List -import kubernetes -import pytest -from kubetester import try_load from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # start at one member in each cluster - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - if try_load(mongodb_multi_unmarshalled): - return mongodb_multi_unmarshalled - - return mongodb_multi_unmarshalled.update() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -95,14 +35,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 1 # Testing scaling down to zero is required to test fix for https://jira.mongodb.org/browse/CLOUDP-324655 @@ -113,9 +51,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -132,14 +69,11 @@ def test_statefulsets_have_been_scaled_down_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py index 7640c2c4a..4b18b23b0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py @@ -1,84 +1,25 @@ from typing import List -import kubernetes import kubetester -import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - # we have created certs for all 5 members, but want to start at only 3. - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -107,14 +48,12 @@ def fn(): kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster three") -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 2 mongodb_multi["spec"]["clusterSpecList"][1]["members"] = 1 @@ -124,9 +63,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_scaled_up_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -161,14 +99,11 @@ def fn(): ) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py similarity index 84% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py index 5c720264c..03a7fbbf5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py @@ -1,46 +1,23 @@ from typing import List import kubernetes -import pytest from kubetester import wait_until from kubetester.kubetester import KubernetesTester, create_testing_namespace -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_mtls_test def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_mtls_test -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_mongo_pod_in_separate_namespace( member_cluster_clients: List[MultiClusterClient], evergreen_task_id: str, @@ -96,9 +73,8 @@ def pod_is_ready() -> bool: wait_until(pod_is_ready, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_fails_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -126,7 +102,6 @@ def test_connectivity_fails_from_second_namespace( ], f"no expected failure messages found in result: {result}" -@pytest.mark.e2e_multi_cluster_mtls_test def test_enable_istio_injection( member_cluster_clients: List[MultiClusterClient], namespace: str, @@ -138,7 +113,6 @@ def test_enable_istio_injection( corev1.patch_namespace(f"{namespace}-mongo", ns) -@pytest.mark.e2e_multi_cluster_mtls_test def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -154,7 +128,6 @@ def pod_is_deleted() -> bool: wait_until(pod_is_deleted, timeout=120) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -191,9 +164,8 @@ def two_containers_are_present() -> bool: wait_until(two_containers_are_present, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_succeeds_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py similarity index 54% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py index 38350027f..fe0b26b95 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py @@ -3,76 +3,24 @@ import kubernetes import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -92,14 +40,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # remove first and last cluster mongodb_multi["spec"]["clusterSpecList"] = [mongodb_multi["spec"]["clusterSpecList"][1]] @@ -108,9 +54,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800, ignore_errors=True) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets([member_cluster_clients[1]]) @@ -130,15 +75,12 @@ def test_statefulsets_have_been_scaled_down_correctly( assert e.value.reason == "Not Found" -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(1) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): # there should only be one member in cluster 2 so there is just a single service. tester = mongodb_multi.tester(service_names=[f"{mongodb_multi.name}-1-0-svc"]) tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py new file mode 100644 index 000000000..c2a8da08b --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py @@ -0,0 +1,96 @@ +from typing import List + +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + # read all statefulsets except the last one + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + + +def test_ops_manager_has_been_updated_correctly_before_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(3) + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + mongodb_multi["spec"]["clusterSpecList"].append( + {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} + ) + mongodb_multi.update() + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + + +def test_ops_manager_has_been_updated_correctly_after_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(5) + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + + +# From here on, the tests are for verifying that we can change the project of the MongoDBMulti | MongoDB resource even with +# non-sequential member ids in the replicaset. + + +class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): + + def test_scale_up_first_cluster( + self, mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] + ): + # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. + # multi-replica-set-0-0 : 0 + # multi-replica-set-0-1 : 1 + # multi-replica-set-0-2 : 5 + # multi-replica-set-1-0 : 2 + # multi-replica-set-2-0 : 3 + # multi-replica-set-2-1 : 4 + + mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 + mongodb_multi.update() + + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + def test_change_project(self, mongodb_multi: MongoDBMulti | MongoDB, new_project_configmap: str): + oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap + mongodb_multi.update() + + mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) + mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) + + newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + # Assert that the replica set member ids have not changed after changing the project. + assert oldRsMembers == newRsMembers diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py index 87c46add2..575ded0d0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py @@ -1,70 +1,18 @@ from typing import Callable, List import kubernetes -import pytest from kubernetes import client from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.conftest import MULTI_CLUSTER_OPERATOR_NAME, run_kube_config_creation_tool -from tests.multicluster.conftest import cluster_spec_list +from tests.conftest import run_kube_config_creation_tool +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # ensure certs are created for the members during scale up - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() - return mongodb_multi_unmarshalled.create() - - -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -76,14 +24,12 @@ def test_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -99,20 +45,17 @@ def test_statefulsets_have_been_created_correctly( assert cluster_two_sts.status.ready_replicas == 1 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): client.AppsV1Api(api_client=central_cluster_client).delete_namespaced_deployment( MULTI_CLUSTER_OPERATOR_NAME, namespace ) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_re_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -125,9 +68,8 @@ def test_re_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_add_new_cluster_to_mongodb_multi_resource( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"].append( @@ -137,9 +79,8 @@ def test_add_new_cluster_to_mongodb_multi_resource( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly_after_cluster_addition( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -158,14 +99,11 @@ def test_statefulsets_have_been_created_correctly_after_cluster_addition( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py index 402355fa1..b71812c41 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py @@ -1,20 +1,17 @@ from typing import List import kubernetes -import pytest from kubetester import create_or_update_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -MDB_RESOURCE = "multi-replica-set-scram" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -23,52 +20,10 @@ NEW_USER_PASSWORD = "my-new-password7" -@pytest.fixture(scope="function") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["security"] = { - "authentication": { - "agents": {"mode": "MONGODB-CR"}, - "enabled": True, - "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], - } - } - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.fixture(scope="function") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.mark.e2e_multi_cluster_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -85,13 +40,11 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Pending, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scram def test_user_reaches_updated( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -99,16 +52,14 @@ def test_user_reaches_updated( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_change_password_and_check_connectivity( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient, ): create_or_update_secret( @@ -125,8 +76,7 @@ def test_change_password_and_check_connectivity( ) -@pytest.mark.e2e_multi_cluster_scram -def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_scram_sha_authentication_fails( password=USER_PASSWORD, @@ -135,10 +85,9 @@ def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti) ) -@pytest.mark.e2e_multi_cluster_scram def test_connection_string_secret_was_created( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for client in member_cluster_clients: @@ -153,7 +102,6 @@ def test_connection_string_secret_was_created( assert "connectionString.standardSrv" in secret_data -@pytest.mark.e2e_multi_cluster_scram def test_om_configured_correctly(): expected_roles = { ("admin", "clusterAdmin"), @@ -170,16 +118,14 @@ def test_om_configured_correctly(): tester.assert_authentication_mechanism_enabled("MONGODB-CR", active_auth_mechanism=False) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, NEW_USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( @@ -195,10 +141,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py new file mode 100644 index 000000000..7a0a70e2e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py @@ -0,0 +1,53 @@ +from typing import List + +import yaml +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_node_ports(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + for mcc in member_cluster_clients: + with open( + yaml_fixture(f"split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml"), + "r", + ) as f: + service_body = yaml.safe_load(f.read()) + + # configure labels and selectors + service_body["metadata"]["labels"][ + "mongodbmulticluster" + ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" + service_body["metadata"]["labels"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + service_body["spec"]["selector"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + + KubernetesTester.create_service( + mongodb_multi.namespace, + body=service_body, + api_client=mcc.api_client, + ) + + +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py similarity index 61% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py index 3a762c580..e05fb74f1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py @@ -1,45 +1,22 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-sts-override.yaml"), - "multi-replica-set-sts-override", - namespace, - ) - resource.set_version(custom_mdb_version) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@pytest.mark.e2e_multi_sts_override def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_sts_override -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_sts_override -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 @@ -54,9 +31,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar2", cluster_two_sts) -@pytest.mark.e2e_multi_sts_override def test_access_modes_pvc( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py index 9932aac29..5b79a3734 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py @@ -3,158 +3,15 @@ import kubernetes from kubernetes import client from kubetester import get_service -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders from tests.conftest import update_coredns_hosts -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" - -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, member_cluster_names: List[str], custom_mdb_version: str -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) - - resource["spec"]["externalAccess"] = {} - resource["spec"]["clusterSpecList"][0]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-1.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing0", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][1]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-2.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing1", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][2]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-3.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing2", - "port": 27019, - }, - ], - } - }, - } - - return resource - - -@fixture(scope="module") -def disable_istio( - multi_cluster_operator: Operator, - namespace: str, - member_cluster_clients: List[MultiClusterClient], -): - for mcc in member_cluster_clients: - api = client.CoreV1Api(api_client=mcc.api_client) - labels = {"istio-injection": "disabled"} - ns = api.read_namespace(name=namespace) - ns.metadata.labels.update(labels) - api.replace_namespace(name=namespace, body=ns) - return None - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - disable_istio, - namespace: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return mongodb_multi_unmarshalled.update() - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@mark.e2e_multi_cluster_tls_no_mesh def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): hosts = [ ("172.18.255.211", "test.kind-e2e-cluster-1.interconnected"), @@ -202,14 +59,12 @@ def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]) update_coredns_hosts(hosts, cluster_name, api_client=cluster_api) -@mark.e2e_multi_cluster_tls_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_no_mesh def test_create_mongodb_multi( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, server_certs: str, multi_cluster_issuer_ca_configmap: str, @@ -219,10 +74,9 @@ def test_create_mongodb_multi( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400, ignore_errors=True) -@mark.e2e_multi_cluster_tls_no_mesh def test_service_overrides( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_idx, member_cluster_client in enumerate(member_cluster_clients): @@ -250,10 +104,9 @@ def test_service_overrides( assert ports[2].port == 27019 -@mark.e2e_multi_cluster_tls_no_mesh def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py index 978ae0832..39f1ebcc5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py @@ -3,22 +3,15 @@ import kubernetes from kubetester import create_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram, with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -26,80 +19,12 @@ USER_PASSWORD = "my-password" -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(ensure_ent_version(custom_mdb_version)) - resource["spec"]["clusterSpecList"] = cluster_spec_list( - member_cluster_names=member_cluster_names, members=[2, 1, 2] - ) - - return resource - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - server_certs: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - - resource = mongodb_multi_unmarshalled - resource["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@fixture(scope="module") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource["spec"]["mongodbResourceRef"]["namespace"] = namespace - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_mongodb_multi_with_tls( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -107,9 +32,8 @@ def test_deploy_mongodb_multi_with_tls( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_update_mongodb_multi_tls_with_scram( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -118,7 +42,6 @@ def test_update_mongodb_multi_tls_with_scram( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -134,16 +57,12 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity( db="admin", @@ -154,11 +73,9 @@ def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -178,11 +95,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -202,9 +117,8 @@ def test_replica_set_connectivity_from_connection_string_standard_srv( ) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_enable_x509( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -219,9 +133,8 @@ def test_mongodb_multi_tls_enable_x509( mongodb_multi.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1000) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_automation_config_was_updated( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py new file mode 100644 index 000000000..2c3e9dc31 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py @@ -0,0 +1,63 @@ +import tempfile + +import kubernetes +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): + ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) + ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") + ac_tester.assert_internal_cluster_authentication_enabled() + + +def test_create_mongodb_x509_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_x509_user: MongoDBUser, + namespace: str, +): + mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + + +def test_x509_user_connectivity( + mongodb_multi: MongoDBMulti | MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer: str, + namespace: str, + ca_path: str, +): + with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: + create_multi_cluster_x509_user_cert( + multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name + ) + tester = mongodb_multi.tester() + tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + + +# TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms +# keeps the resources reachable and in Running state. +def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): + cert = Certificate(name=certificate_name, namespace=namespace) + cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) + cert.load() + cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate + cert.update() + # FIXME the assertions below need to be replaced with a robust check that the agents are ready + # and the TLS certificates are rotated. + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py new file mode 100644 index 000000000..b85c08789 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py @@ -0,0 +1,59 @@ +from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_mongodb_multi_upgrade( + mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str, custom_mdb_version: str +): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) + + +def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py similarity index 79% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py index d24fecf3c..1d4e34986 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py @@ -1,18 +1,16 @@ import kubernetes -import pytest import yaml from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.operator import Operator -@pytest.mark.e2e_multi_cluster_validation class TestWebhookValidation(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() - def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"].append({"clusterName": "kind-e2e-cluster-1", "members": 1}) self.create_custom_resource_from_object( @@ -22,8 +20,8 @@ def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.Ap api_client=central_cluster_client, ) - def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["cloudManager"] = {"configMapRef": {"name": " my-project"}} self.create_custom_resource_from_object( @@ -33,8 +31,8 @@ def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClie api_client=central_cluster_client, ) - def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"] = [] self.create_custom_resource_from_object( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py rename to docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py index b46744278..dddb83999 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py @@ -22,8 +22,12 @@ from tests.common.ops_manager.multi_cluster import ( ops_manager_multi_cluster_with_tls_s3_backups, ) -from tests.conftest import AWS_REGION, assert_data_got_restored +from tests.conftest import assert_data_got_restored +from tests.constants import AWS_REGION from tests.multicluster.conftest import cluster_spec_list +from tests.multicluster_appdb.shared import ( + multicluster_appdb_s3_based_backup_restore as testhelper, +) @fixture(scope="module") @@ -52,7 +56,7 @@ def multi_cluster_s3_replica_set( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace ).configure(ops_manager, "s3metadata", api_client=central_cluster_client) resource["spec"]["clusterSpecList"] = cluster_spec_list(appdb_member_cluster_names, [1, 2]) @@ -93,7 +97,7 @@ def ops_manager( @mark.usefixtures("multi_cluster_operator") -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestOpsManagerCreation: """ name: Ops Manager successful creation with backup and oplog stores enabled @@ -105,51 +109,31 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["members"] = 1 - ops_manager.update() - - ops_manager.appdb_status().assert_reaches_phase(Phase.Running) - ops_manager.om_status().assert_reaches_phase(Phase.Running) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_om_is_running( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - # at this point AppDB is used as the "metadatastore" - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_healthiness() + testhelper.TestOpsManagerCreation.test_om_is_running(self, ops_manager, central_cluster_client) def test_add_metadatastore( self, multi_cluster_s3_replica_set: MongoDBMulti, ops_manager: MongoDBOpsManager, ): - multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) - - # configure metadatastore in om, use dedicate MDB instead of AppDB - ops_manager.load() - ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} - ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { - "name": multi_cluster_s3_replica_set.name - } - ops_manager.update() - - ops_manager.om_status().assert_reaches_phase(Phase.Running) - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + testhelper.TestOpsManagerCreation.test_add_metadatastore(self, multi_cluster_s3_replica_set, ops_manager) def test_om_s3_stores( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) - om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + testhelper.TestOpsManagerCreation.test_om_s3_stores(self, ops_manager, central_cluster_client) -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestBackupForMongodb: @fixture(scope="module") def project_one( @@ -183,7 +167,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -201,38 +185,20 @@ def mongodb_multi_one( return resource.update() def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) - - project_one.create_restore_job_pit(pit_millis) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py new file mode 100644 index 000000000..1190abdf0 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py @@ -0,0 +1,125 @@ +import datetime +import time + +import kubernetes.client +import pymongo +import pytest +from kubetester import create_or_update_configmap +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.omtester import OMTester +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.common.constants import ( + S3_BLOCKSTORE_NAME, + S3_OPLOG_NAME, + TEST_DATA, +) +from tests.conftest import assert_data_got_restored +from tests.constants import AWS_REGION + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["members"] = 1 + ops_manager.update() + + ops_manager.appdb_status().assert_reaches_phase(Phase.Running) + ops_manager.om_status().assert_reaches_phase(Phase.Running) + + def test_om_is_running( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + # at this point AppDB is used as the "metadatastore" + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_healthiness() + + def test_add_metadatastore( + self, + multi_cluster_s3_replica_set: MongoDBMulti, + ops_manager: MongoDBOpsManager, + ): + multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) + + # configure metadatastore in om, use dedicate MDB instead of AppDB + ops_manager.load() + ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} + ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { + "name": multi_cluster_s3_replica_set.name + } + ops_manager.update() + + ops_manager.om_status().assert_reaches_phase(Phase.Running) + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + + def test_om_s3_stores( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) + om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + + +class TestBackupForMongodb: + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + + @pytest.mark.flaky(reruns=100, reruns_delay=6) + def test_add_test_data(self, mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py index 77432c4f2..6a4d45d5b 100644 --- a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py @@ -53,7 +53,7 @@ def replica_set( ) -> MongoDB: if is_multi_cluster(): resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace, ) @@ -99,26 +99,26 @@ def replica_set( # Installs the latest officially released version of MEKO, from Quay -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_latest_official_operator(official_meko_operator: Operator, namespace: str): official_meko_operator.assert_is_running() # Dumping deployments in logs ensures we are using the correct operator version log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_replicaset(replica_set: MongoDB): replica_set.assert_reaches_phase(phase=Phase.Running, timeout=1000 if is_multi_cluster() else 600) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_downscale_latest_official_operator(namespace: str): deployment_name = LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME downscale_operator_deployment(deployment_name, namespace) # Upgrade to MCK -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_upgrade_operator( namespace: str, operator_installation_config, @@ -150,19 +150,19 @@ def test_upgrade_operator( log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_replicaset_reconciled(replica_set: MongoDB): replica_set.assert_abandons_phase(phase=Phase.Running, timeout=300) replica_set.assert_reaches_phase(phase=Phase.Running, timeout=800) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_uninstall_latest_official_operator(namespace: str): helm_uninstall(LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME) log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_operator_still_running(namespace: str, central_cluster_client: client.ApiClient, member_cluster_names): operator_name = MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else OPERATOR_NAME operator_instance = Operator(