From fb1178843eff5f377f3fca2f1bb7609435a54d14 Mon Sep 17 00:00:00 2001 From: Bipul Adhikari Date: Tue, 17 Oct 2023 14:03:06 +0545 Subject: [PATCH] Removes Ceph static plugin from OCP console repository Signed-off-by: Bipul Adhikari --- frontend/package.json | 2 - .../packages/ceph-storage-plugin/.eslintrc | 29 - frontend/packages/ceph-storage-plugin/OWNERS | 8 - .../packages/ceph-storage-plugin/README.md | 89 -- .../integration-tests-cypress/.eslintrc | 15 - .../integration-tests-cypress/consts.ts | 110 --- .../cypress-ceph.json | 20 - .../helpers/index.ts | 1 - .../helpers/installation.ts | 56 -- .../integration-tests-cypress/helpers/pvc.ts | 24 - .../helpers/vault.ts | 345 -------- .../mocks/bucket-class.ts | 41 - .../mocks/deploymentData.ts | 35 - .../mocks/install.ts | 26 - .../mocks/storageclass.ts | 48 -- .../reporter-config.json | 14 - .../support/index.ts | 55 -- .../support/vault-standalone.ts | 75 -- .../tests/add-capacity.spec.ts | 155 ---- .../tests/block-pool-create.spec.ts | 21 - .../tests/block-pool-delete.spec.ts | 71 -- .../tests/block-pool-update.spec.ts | 53 -- .../tests/bucket-class-spec.ts | 101 --- .../tests/create-backing-store.spec.ts | 52 -- .../tests/expand-pvc.spec.ts | 30 - .../tests/kms-encryption-sc.ts | 20 - .../tests/multiple-pool.spec.ts | 67 -- .../multiple-storageclass-selection.spec.ts | 97 --- .../tests/namespace-store.spec.ts | 47 -- .../tests/noobaa-sso.spec.ts | 28 - .../tests/obc-test.spec.ts | 125 --- .../tests/object-service-dashboards.spec.ts | 82 -- .../tests/ocs-presistent-dashboard.spec.ts | 66 -- .../integration-tests-cypress/utils/consts.ts | 32 - .../integration-tests-cypress/views/bc.ts | 214 ----- .../views/block-pool.ts | 96 --- .../integration-tests-cypress/views/common.ts | 29 - .../views/install.ts | 117 --- .../views/multiple-storageclass.ts | 67 -- .../views/obcPage.ts | 63 -- .../integration-tests-cypress/views/pvc.ts | 25 - .../views/storage-class.ts | 47 -- .../integration-tests-cypress/views/store.ts | 75 -- .../integration-tests/OWNERS | 17 - .../mocks/expand-test-mocks.ts | 120 --- .../independent-external-cluster-data.ts | 57 -- .../integration-tests/mocks/storage-class.ts | 16 - .../integration-tests/mocks/storage-pool.ts | 17 - .../integration-tests/mocks/testFile.json | 57 -- .../tests/1-install/installFlow.scenario.ts | 385 --------- .../tests/2-tests/add-capacity.scenario.ts | 298 ------- .../tests/2-tests/multiple-pool.scenario.ts | 99 --- .../tests/2-tests/noobaa-sso-scenario.ts | 39 - ...ocp-dashboard-card-healthcheck.scenario.ts | 42 - .../tests/2-tests/pvc.scenario.ts | 105 --- .../2-tests/storage-dashboard.scenario.ts | 63 -- .../tests/2-tests/test-expand.scenario.ts | 81 -- ...ltiple-storage-class-selection.scenario.ts | 113 --- .../tests/3-tests/upgrade.scenario.ts | 58 -- .../integration-tests/utils/consts.ts | 110 --- .../integration-tests/utils/helpers.ts | 204 ----- .../views/add-capacity.view.ts | 71 -- .../views/installFlow.view.ts | 284 ------- .../views/multiple-pool.view.ts | 80 -- .../views/noobaa-sso.view.ts | 9 - .../ocp-dashboard-card-healthcheck.view.ts | 23 - .../integration-tests/views/pvc.view.ts | 85 -- .../views/storage-dashboard.view.ts | 21 - .../integration-tests/views/upgrade.view.ts | 101 --- .../ceph-storage-plugin/locales/OWNERS | 2 - .../locales/en/ceph-storage-plugin.json | 758 ------------------ .../locales/en/console-shared.json | 7 - .../locales/ja/ceph-storage-plugin.json | 757 ----------------- .../locales/ja/console-shared.json | 7 - .../locales/ko/ceph-storage-plugin.json | 757 ----------------- .../locales/ko/console-shared.json | 7 - .../locales/zh/ceph-storage-plugin.json | 757 ----------------- .../locales/zh/console-shared.json | 7 - .../packages/ceph-storage-plugin/package.json | 46 -- .../src/__mocks__/breakdown-data.ts | 48 -- .../independent-mode-dashboard-data.ts | 70 -- .../src/__tests__/breakdown-body.spec.tsx | 83 -- .../src/__tests__/breakdown-chart.spec.tsx | 64 -- .../src/__tests__/flexible-scaling.spec.ts | 63 -- ...ependent-dashboard-breakdown-card.spec.tsx | 69 -- ...ndependent-dashboard-details-card.spec.tsx | 43 - .../src/actions/actions.ts | 54 -- .../src/actions/csv-actions.ts | 38 - .../ceph-storage-plugin/src/actions/index.ts | 1 - .../attach-obc/attach-obc-deployment.scss | 4 - .../attach-obc/attach-obc-deployment.tsx | 141 ---- .../block-pool/block-pool-details-page.tsx | 89 -- .../block-pool/block-pool-list-page.tsx | 334 -------- .../block-pool/block-pool-menu-action.ts | 46 -- .../src/components/block-pool/body.scss | 16 - .../src/components/block-pool/body.tsx | 286 ------- .../block-pool/create-block-pool.scss | 5 - .../block-pool/create-block-pool.tsx | 137 ---- .../src/components/block-pool/footer.tsx | 48 -- .../bucket-class/_backingstore-table.scss | 30 - .../bucket-class/backingstore-table.tsx | 304 ------- .../components/bucket-class/create-bc.scss | 93 --- .../src/components/bucket-class/create-bc.tsx | 322 -------- .../bucket-class/modals/_bs-modal.scss | 36 - .../modals/edit-backingstore-modal.tsx | 291 ------- .../components/bucket-class/review-utils.tsx | 22 - .../src/components/bucket-class/state.ts | 102 --- .../wizard-pages/backingstore-page.tsx | 64 -- .../wizard-pages/general-page.tsx | 142 ---- .../wizard-pages/namespace-policy-page.tsx | 56 -- .../cache-namespace-store.tsx | 132 --- .../multi-namespace-store.tsx | 122 --- .../single-namespace-store.tsx | 48 -- .../wizard-pages/placement-policy-page.tsx | 154 ---- .../bucket-class/wizard-pages/review-page.tsx | 159 ---- .../backing-store-dropdown.tsx | 117 --- .../create-bs-modal.tsx | 34 - .../create-bs-page.tsx | 61 -- .../create-backingstore-page/create-bs.tsx | 238 ------ .../create-backingstore-page/reducer.ts | 60 -- .../advanced-subscription.scss | 3 - .../advanced-subscription.tsx | 32 - .../create-storage-system/create-steps.tsx | 248 ------ .../backing-storage-step.scss | 12 - .../backing-storage-step.tsx | 289 ------- .../select-deployment.tsx | 68 -- .../capacity-and-nodes-step.tsx | 345 -------- .../capacity-and-nodes.scss | 10 - .../selected-nodes-table.tsx | 94 --- .../stretch-cluster.tsx | 113 --- .../connection-details-step.tsx | 42 - .../create-local-volume-set-step/body.scss | 51 -- .../create-local-volume-set-step/body.tsx | 352 -------- .../create-local-volume-set-step.scss | 21 - .../create-local-volume-set-step.tsx | 378 --------- .../disk-list-modal.tsx | 104 --- .../selected-capacity.scss | 39 - .../selected-capacity.tsx | 227 ------ .../create-storage-class-step.scss | 3 - .../create-storage-class-step.tsx | 73 -- .../create-storage-system-steps/index.ts | 8 - .../review-and-create-step.scss | 3 - .../review-and-create-step.tsx | 184 ----- .../security-and-network-step/encryption.scss | 31 - .../security-and-network-step/encryption.tsx | 256 ------ .../security-and-network-step.tsx | 57 -- .../create-storage-system.scss | 10 - .../create-storage-system.tsx | 83 -- .../create-storage-system/error-handler.tsx | 64 -- .../external-storage/README.md | 224 ------ .../ibm-flashsystem/index.tsx | 200 ----- .../ibm-flashsystem/models.ts | 13 - .../external-storage/ibm-flashsystem/type.ts | 41 - .../external-storage/index.ts | 36 - .../red-hat-ceph-storage/index.scss | 11 - .../red-hat-ceph-storage/index.tsx | 174 ---- .../external-storage/types.ts | 111 --- .../create-storage-system/footer.tsx | 284 ------- .../create-storage-system/header.tsx | 21 - .../create-storage-system/payloads.ts | 160 ---- .../create-storage-system/reducer.ts | 352 -------- .../select-nodes-table-footer.tsx | 38 - .../select-nodes-table.scss | 11 - .../select-nodes-table/select-nodes-table.tsx | 34 - .../create-storage-system/use-fetch-csv.tsx | 49 -- .../block-pool-dashboard-context.tsx | 9 - .../block-pool/block-pool-dashboard.tsx | 36 - .../block-pool/compression-details-card.tsx | 111 --- .../dashboards/block-pool/details-card.tsx | 35 - .../dashboards/block-pool/inventory-card.tsx | 67 -- .../block-pool/mirroring-card-body.tsx | 10 - .../block-pool/mirroring-card-item.tsx | 50 -- .../dashboards/block-pool/mirroring-card.scss | 27 - .../dashboards/block-pool/mirroring-card.tsx | 199 ----- .../block-pool/raw-capacity-card.tsx | 46 -- .../dashboards/block-pool/states.tsx | 52 -- .../dashboards/block-pool/status-card.tsx | 26 - .../block-pool/utilization-card.tsx | 46 -- .../common/breakdown-card/breakdown-body.tsx | 94 --- .../breakdown-card/breakdown-capacity.tsx | 21 - .../common/breakdown-card/breakdown-card.scss | 57 -- .../common/breakdown-card/breakdown-chart.tsx | 142 ---- .../breakdown-card/breakdown-dropdown.tsx | 55 -- .../breakdown-card/breakdown-loading.tsx | 18 - .../common/breakdown-card/consts.ts | 17 - .../common/breakdown-card/utils.tsx | 145 ---- .../capacity-breakdown/breakdown-body.tsx | 94 --- .../capacity-breakdown/breakdown-capacity.tsx | 21 - .../capacity-breakdown/breakdown-card.scss | 57 -- .../capacity-breakdown/breakdown-chart.tsx | 147 ---- .../capacity-breakdown/breakdown-dropdown.tsx | 55 -- .../capacity-breakdown/breakdown-loading.tsx | 18 - .../common/capacity-breakdown/consts.ts | 19 - .../common/capacity-breakdown/utils.tsx | 145 ---- .../common/capacity-card/capacity-card.scss | 98 --- .../common/capacity-card/capacity-card.tsx | 188 ----- .../data-resiliency-activity.tsx | 27 - .../storage-efficiency-card-item.tsx | 41 - .../storage-efficiency-card.scss | 18 - .../activity-card/activity-card.scss | 3 - .../activity-card/activity-card.tsx | 141 ---- .../data-resiliency-activity.scss | 3 - .../data-resiliency-activity.tsx | 31 - .../buckets-card/buckets-card-item.tsx | 44 - .../buckets-card/buckets-card.scss | 22 - .../buckets-card/buckets-card.tsx | 154 ---- .../object-service/buckets-card/utils.ts | 20 - .../capacity-breakdown-card.scss | 20 - .../capacity-breakdown-card.tsx | 268 ------- .../data-consumption-card-dropdown.tsx | 184 ----- .../data-consumption-card-utils.ts | 202 ----- .../data-consumption-card.scss | 56 -- .../data-consumption-card.tsx | 125 --- .../data-consumption-graph.tsx | 145 ---- .../performance-graph.tsx | 131 --- .../details-card/details-card.scss | 3 - .../details-card/details-card.tsx | 153 ---- .../resource-providers-card-body.tsx | 33 - .../resource-providers-card-item.tsx | 46 -- .../resource-providers-card.scss | 37 - .../resource-providers-card.tsx | 131 --- .../status-card/object-service-health.tsx | 71 -- .../status-card/status-card.scss | 4 - .../status-card/status-card.tsx | 191 ----- .../object-service/status-card/statuses.tsx | 73 -- .../storage-efficiency-card.tsx | 113 --- .../dashboards/ocs-system-dashboard.tsx | 215 ----- .../dashboards/odf-system-dashboard.tsx | 101 --- .../persistent-external/breakdown-card.tsx | 104 --- .../persistent-external/details-card.tsx | 120 --- .../persistent-external/status-card.tsx | 39 - .../persistent-external/utilization-card.tsx | 43 - .../activity-card/activity-card.scss | 3 - .../activity-card/activity-card.tsx | 164 ---- .../activity-card/cluster-expand-activity.tsx | 13 - .../activity-card/ocs-upgrade-activity.tsx | 18 - .../capacity-breakdown-card.scss | 18 - .../capacity-breakdown-card.tsx | 110 --- .../persistent-internal/details-card.tsx | 135 ---- .../persistent-internal/inventory-card.tsx | 123 --- .../raw-capacity-card/raw-capacity-card.tsx | 40 - .../status-card/healthchecks.scss | 6 - .../status-card/status-card.tsx | 171 ---- .../persistent-internal/status-card/utils.ts | 63 -- .../status-card/whitelisted-health-checks.ts | 4 - .../storage-efficiency-card.tsx | 103 --- .../top-consumers-card-body.tsx | 119 --- .../top-consumers-card/utils.ts | 45 -- .../utilization-card/area-chart.tsx | 163 ---- .../multi-utilization-item.tsx | 63 -- .../prometheus-multi-utilization-item.tsx | 97 --- .../prometheus-utilization-item.tsx | 137 ---- .../utilization-card/utilization-card.scss | 45 -- .../utilization-card/utilization-card.tsx | 100 --- .../utilization-card/utilization-item.tsx | 218 ----- .../utilization-card/utils.tsx | 23 - .../disk-inventory/ocs-disks-list.tsx | 303 ------- .../disk-inventory/ocs-kebab-options.tsx | 43 - .../disk-inventory/ocs-status-column.scss | 3 - .../disk-inventory/ocs-status-column.tsx | 117 --- .../disk-inventory/state-reducer.ts | 132 --- .../src/components/kms-config/hpcs-config.tsx | 163 ---- .../src/components/kms-config/kms-config.scss | 28 - .../src/components/kms-config/kms-config.tsx | 87 -- .../src/components/kms-config/providers.ts | 39 - .../src/components/kms-config/utils.tsx | 430 ---------- .../kms-config/vault-auth-methods.tsx | 102 --- .../components/kms-config/vault-config.tsx | 366 --------- .../add-capacity-modal.scss | 50 -- .../add-capacity-modal/add-capacity-modal.tsx | 311 ------- .../advanced-ibm-kms-modal.tsx | 84 -- .../advanced-kms-modal.scss | 5 - .../advanced-vault-modal.tsx | 297 ------- .../modals/attach-deployment-obc-modal.tsx | 131 --- .../create-block-pool-modal.tsx | 147 ---- .../delete-block-pool-modal.tsx | 168 ---- .../modals/block-pool-modal/modal-footer.tsx | 194 ----- .../update-block-pool-modal.tsx | 155 ---- .../modals/disk-replacement-modal.tsx | 204 ----- .../create-namespace-store.tsx | 52 -- .../namespace-store-dropdown.tsx | 131 --- .../namespace-store/namespace-store-form.tsx | 247 ------ .../namespace-store/namespace-store-modal.tsx | 35 - .../namespace-store-table.scss | 52 -- .../namespace-store/namespace-store-table.tsx | 164 ---- .../src/components/namespace-store/reducer.ts | 47 -- .../gcp-endpoint-type.tsx | 183 ----- .../noobaa-provider-endpoints.scss | 65 -- .../pvc-endpoint-type.tsx | 98 --- .../s3-endpoint-type.tsx | 179 ----- .../object-bucket-claim-page/create-obc.scss | 3 - .../object-bucket-claim-page/create-obc.tsx | 195 ----- .../object-bucket-claim-page/menu-actions.ts | 19 - .../object-bucket-claim.tsx | 241 ------ .../object-bucket-claim-page/secret.tsx | 115 --- .../object-bucket-page/create-ob.tsx | 122 --- .../object-bucket-page/object-bucket.tsx | 187 ----- .../components/object-bucket-page/state.ts | 61 -- .../attached-devices.scss | 110 --- .../install-wizard-steps/configure-step.tsx | 49 -- .../create-storage-class-step.tsx | 187 ----- .../create-storage-class/disk-list-modal.tsx | 104 --- .../selected-capacity.tsx | 253 ------ .../discover-disks-step.tsx | 91 --- .../review-and-create-step.tsx | 154 ---- .../storage-and-nodes-step.tsx | 233 ------ .../attached-devices-mode/install-wizard.tsx | 299 ------- .../attached-devices-mode/install.tsx | 89 -- .../attached-devices-mode/reducer.ts | 253 ------ .../attached-devices-mode/sc-node-list.tsx | 119 --- .../ocs-install/existing-cluster-modal.tsx | 80 -- .../ocs-install/external-mode/fileUpload.scss | 30 - .../ocs-install/external-mode/fileUpload.tsx | 32 - .../ocs-install/external-mode/install.scss | 43 - .../ocs-install/external-mode/install.tsx | 257 ------ .../ocs-install/external-mode/utils.spec.ts | 106 --- .../ocs-install/external-mode/utils.ts | 139 ---- .../components/ocs-install/install-page.scss | 22 - .../components/ocs-install/install-page.tsx | 202 ----- .../install-wizard/_capacity-and-nodes.scss | 11 - .../install-wizard/_configure.scss | 20 - .../install-wizard/capacity-and-nodes.tsx | 235 ------ .../ocs-install/install-wizard/configure.tsx | 340 -------- .../install-wizard/install-wizard.scss | 91 --- .../install-wizard/review-and-create.tsx | 110 --- .../install-wizard-steps/configure.tsx | 52 -- .../install-wizard-steps/index.ts | 3 - .../review-and-create.tsx | 154 ---- .../select-capacity-and-nodes-step.tsx | 164 ---- .../internal-mode/install-wizard.tsx | 189 ----- .../ocs-install/internal-mode/reducer.ts | 187 ----- .../src/components/ocs-install/node-list.tsx | 140 ---- .../components/ocs-install/ocs-install.scss | 92 --- .../ocs-install/ocs-request-data.ts | 225 ------ .../ocs-install/pvs-available-capacity.scss | 4 - .../ocs-install/pvs-available-capacity.tsx | 58 -- .../ocs-install/subscription-icon.scss | 3 - .../ocs-install/subscription-icon.tsx | 32 - .../src/components/ocs-install/types.ts | 0 .../ocs-storage-class-form.scss | 58 -- .../ocs-storage-class-form.tsx | 642 --------------- .../ocs-thick-provisioner.tsx | 42 - .../src/components/odf-resources/link.tsx | 20 - .../odf-resources/resource-details-page.tsx | 121 --- .../odf-resources/resource-list-page.tsx | 201 ----- .../src/components/odf-system/actions.tsx | 25 - .../components/odf-system/odf-system-list.tsx | 225 ------ .../src/components/odf-system/system-link.tsx | 23 - .../src/components/odf-system/utils.ts | 75 -- .../src/components/storage-popover.tsx | 60 -- .../src/constants/bucket-class.ts | 66 -- .../src/constants/capacity-breakdown.ts | 32 - .../src/constants/common.ts | 45 -- .../src/constants/create-storage-system.ts | 36 - .../src/constants/data-consumption.ts | 55 -- .../src/constants/disks.ts | 3 - .../src/constants/index.ts | 12 - .../ceph-storage-plugin/src/constants/kms.ts | 95 --- .../src/constants/network.ts | 11 - .../src/constants/ocs-install.ts | 59 -- .../ceph-storage-plugin/src/constants/pool.ts | 5 - .../src/constants/providers.ts | 78 -- .../src/constants/status.ts | 11 - .../src/constants/storage-pool-const.ts | 20 - .../src/constants/tooltips.tsx | 36 - .../src/extensions/dashboards.ts | 29 - .../ceph-storage-plugin/src/features.ts | 266 ------ .../packages/ceph-storage-plugin/src/index.ts | 1 - .../ceph-storage-plugin/src/models.ts | 149 ---- .../ceph-storage-plugin/src/plugin.ts | 14 - .../src/queries/ceph-queries.ts | 255 ------ .../ceph-storage-plugin/src/queries/index.ts | 2 - .../src/queries/object-storage-queries.ts | 189 ----- .../src/queries/odf-queries.ts | 15 - .../ceph-storage-plugin/src/resources.ts | 119 --- .../src/selectors/index.ts | 86 -- .../packages/ceph-storage-plugin/src/types.ts | 491 ------------ .../src/utils/add-capacity.ts | 4 - .../src/utils/alert-action-path.tsx | 20 - .../src/utils/block-pool.tsx | 312 ------- .../src/utils/bucket-class.ts | 24 - .../src/utils/common-ocs-install-el.tsx | 208 ----- .../ceph-storage-plugin/src/utils/common.tsx | 37 - .../src/utils/create-storage-system.ts | 121 --- .../src/utils/dashboard.ts | 15 - .../ceph-storage-plugin/src/utils/events.ts | 36 - .../ceph-storage-plugin/src/utils/index.ts | 3 - .../ceph-storage-plugin/src/utils/install.ts | 184 ----- .../src/utils/noobaa-utils.ts | 206 ----- .../src/utils/odf-provisioners.ts | 6 - .../src/utils/osd-size-dropdown.tsx | 88 -- .../src/utils/popover-helper.tsx | 62 -- .../src/utils/table-filters.ts | 49 -- .../src/utils/time-duration-dropdown.tsx | 62 -- frontend/packages/console-app/package.json | 1 - frontend/public/i18n.js | 1 - frontend/webpack.config.ts | 1 - 397 files changed, 40645 deletions(-) delete mode 100644 frontend/packages/ceph-storage-plugin/.eslintrc delete mode 100644 frontend/packages/ceph-storage-plugin/OWNERS delete mode 100644 frontend/packages/ceph-storage-plugin/README.md delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/.eslintrc delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/consts.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/cypress-ceph.json delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/installation.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/pvc.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/vault.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/bucket-class.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/deploymentData.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/install.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/storageclass.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/reporter-config.json delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/vault-standalone.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/add-capacity.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-create.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-delete.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-update.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/bucket-class-spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/create-backing-store.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/expand-pvc.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/kms-encryption-sc.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-pool.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-storageclass-selection.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/namespace-store.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/noobaa-sso.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/obc-test.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/object-service-dashboards.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/ocs-presistent-dashboard.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/utils/consts.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/bc.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/block-pool.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/common.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/install.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/multiple-storageclass.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/obcPage.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/pvc.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/storage-class.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/store.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/OWNERS delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/mocks/expand-test-mocks.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/mocks/independent-external-cluster-data.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-class.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-pool.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/mocks/testFile.json delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/1-install/installFlow.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/add-capacity.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/multiple-pool.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/noobaa-sso-scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/ocp-dashboard-card-healthcheck.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/pvc.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/storage-dashboard.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/test-expand.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/multiple-storage-class-selection.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/upgrade.scenario.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/utils/consts.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/utils/helpers.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/add-capacity.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/installFlow.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/multiple-pool.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/noobaa-sso.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/ocp-dashboard-card-healthcheck.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/pvc.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/storage-dashboard.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/integration-tests/views/upgrade.view.ts delete mode 100644 frontend/packages/ceph-storage-plugin/locales/OWNERS delete mode 100644 frontend/packages/ceph-storage-plugin/locales/en/ceph-storage-plugin.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/en/console-shared.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/ja/ceph-storage-plugin.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/ja/console-shared.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/ko/ceph-storage-plugin.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/ko/console-shared.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/zh/ceph-storage-plugin.json delete mode 100644 frontend/packages/ceph-storage-plugin/locales/zh/console-shared.json delete mode 100644 frontend/packages/ceph-storage-plugin/package.json delete mode 100644 frontend/packages/ceph-storage-plugin/src/__mocks__/breakdown-data.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/__mocks__/independent-mode-dashboard-data.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-body.spec.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-chart.spec.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/__tests__/flexible-scaling.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-breakdown-card.spec.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-details-card.spec.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/actions/actions.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/actions/csv-actions.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/actions/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-details-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-list-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-menu-action.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/body.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/block-pool/footer.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/_backingstore-table.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/backingstore-table.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/_bs-modal.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/edit-backingstore-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/review-utils.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/state.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/backingstore-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/general-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/namespace-policy-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/namespace-store-pages/cache-namespace-store.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/namespace-store-pages/multi-namespace-store.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/namespace-store-pages/single-namespace-store.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/placement-policy-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/review-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-backingstore-page/backing-store-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-backingstore-page/create-bs-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-backingstore-page/create-bs-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-backingstore-page/create-bs.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-backingstore-page/reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/advanced-subscription/advanced-subscription.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/advanced-subscription/advanced-subscription.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-steps.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/backing-storage-step/backing-storage-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/backing-storage-step/select-deployment.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/capacity-and-nodes.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/selected-nodes-table.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/capacity-and-nodes-step/stretch-cluster.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/connection-details-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/create-local-volume-set-step.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/create-local-volume-set-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/disk-list-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/selected-capacity.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-local-volume-set-step/selected-capacity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/create-storage-class-step/create-storage-class-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/review-and-create-step/review-and-create-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/security-and-network-step/encryption.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system-steps/security-and-network-step/security-and-network-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/create-storage-system.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/error-handler.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/README.md delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/ibm-flashsystem/index.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/ibm-flashsystem/models.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/ibm-flashsystem/type.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/red-hat-ceph-storage/index.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/red-hat-ceph-storage/index.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/external-storage/types.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/footer.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/header.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/payloads.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/select-nodes-table/select-nodes-table-footer.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/select-nodes-table/select-nodes-table.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/select-nodes-table/select-nodes-table.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/create-storage-system/use-fetch-csv.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/block-pool-dashboard-context.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/block-pool-dashboard.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/compression-details-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/details-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/inventory-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/mirroring-card-body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/mirroring-card-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/mirroring-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/mirroring-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/raw-capacity-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/states.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/status-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/block-pool/utilization-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-capacity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-chart.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/breakdown-loading.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/consts.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/breakdown-card/utils.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-capacity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-chart.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/breakdown-loading.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/consts.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-breakdown/utils.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-card/capacity-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/capacity-card/capacity-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/data-resiliency/data-resiliency-activity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/storage-efficiency/storage-efficiency-card-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/common/storage-efficiency/storage-efficiency-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/activity-card/activity-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/activity-card/activity-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/activity-card/data-resiliency-activity/data-resiliency-activity.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/activity-card/data-resiliency-activity/data-resiliency-activity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/buckets-card/buckets-card-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/buckets-card/buckets-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/buckets-card/buckets-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/buckets-card/utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/capacity-breakdown/capacity-breakdown-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/capacity-breakdown/capacity-breakdown-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/data-consumption-card-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/data-consumption-card-utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/data-consumption-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/data-consumption-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/data-consumption-graph.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/data-consumption-card/performance-graph.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/details-card/details-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/details-card/details-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/resource-providers-card/resource-providers-card-body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/resource-providers-card/resource-providers-card-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/resource-providers-card/resource-providers-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/resource-providers-card/resource-providers-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/status-card/object-service-health.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/status-card/status-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/status-card/status-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/status-card/statuses.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/object-service/storage-efficiency-card/storage-efficiency-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/ocs-system-dashboard.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/odf-system-dashboard.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-external/breakdown-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-external/details-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-external/status-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-external/utilization-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/activity-card/activity-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/activity-card/activity-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/activity-card/cluster-expand-activity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/activity-card/ocs-upgrade-activity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/capacity-breakdown-card/capacity-breakdown-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/details-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/inventory-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/raw-capacity-card/raw-capacity-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/status-card/healthchecks.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/status-card/status-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/status-card/utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/status-card/whitelisted-health-checks.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/storage-efficiency-card/storage-efficiency-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/top-consumers-card/top-consumers-card-body.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/top-consumers-card/utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/area-chart.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/multi-utilization-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/prometheus-multi-utilization-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/prometheus-utilization-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/utilization-card.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/utilization-card.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/utilization-item.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/dashboards/persistent-internal/utilization-card/utils.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/disk-inventory/ocs-disks-list.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/disk-inventory/ocs-kebab-options.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/disk-inventory/ocs-status-column.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/disk-inventory/ocs-status-column.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/disk-inventory/state-reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/hpcs-config.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/kms-config.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/kms-config.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/providers.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/utils.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/vault-auth-methods.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/kms-config/vault-config.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/add-capacity-modal/add-capacity-modal.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/add-capacity-modal/add-capacity-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/advanced-kms-modal/advanced-ibm-kms-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/advanced-kms-modal/advanced-kms-modal.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/advanced-kms-modal/advanced-vault-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/attach-deployment-obc-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/block-pool-modal/create-block-pool-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/block-pool-modal/delete-block-pool-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/block-pool-modal/modal-footer.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/block-pool-modal/update-block-pool-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/modals/disk-replacement-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/create-namespace-store.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/namespace-store-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/namespace-store-form.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/namespace-store-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/namespace-store-table.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/namespace-store-table.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/namespace-store/reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/noobaa-provider-endpoints/gcp-endpoint-type.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/noobaa-provider-endpoints/noobaa-provider-endpoints.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/noobaa-provider-endpoints/pvc-endpoint-type.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/noobaa-provider-endpoints/s3-endpoint-type.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-claim-page/create-obc.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-claim-page/create-obc.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-claim-page/menu-actions.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-claim-page/object-bucket-claim.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-claim-page/secret.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-page/create-ob.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-page/object-bucket.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/object-bucket-page/state.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/attached-devices.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/configure-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/create-storage-class/create-storage-class-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/create-storage-class/disk-list-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/create-storage-class/selected-capacity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/discover-disks-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/review-and-create-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard-steps/storage-and-nodes-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install-wizard.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/install.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/attached-devices-mode/sc-node-list.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/existing-cluster-modal.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/fileUpload.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/fileUpload.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/install.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/install.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/utils.spec.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/external-mode/utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-page.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/_capacity-and-nodes.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/_configure.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/capacity-and-nodes.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/configure.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/install-wizard.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/install-wizard/review-and-create.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/install-wizard-steps/configure.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/install-wizard-steps/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/install-wizard-steps/review-and-create.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/install-wizard-steps/select-capacity-and-nodes-step.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/install-wizard.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/internal-mode/reducer.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/node-list.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/ocs-install.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/ocs-request-data.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/pvs-available-capacity.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/pvs-available-capacity.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/subscription-icon.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/subscription-icon.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-install/types.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-storage-class-form/ocs-storage-class-form.scss delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-storage-class-form/ocs-storage-class-form.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/ocs-storage-class-form/ocs-thick-provisioner.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-resources/link.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-resources/resource-details-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-resources/resource-list-page.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-system/actions.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-system/odf-system-list.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-system/system-link.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/odf-system/utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/components/storage-popover.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/bucket-class.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/capacity-breakdown.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/common.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/create-storage-system.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/data-consumption.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/disks.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/kms.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/network.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/ocs-install.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/pool.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/providers.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/status.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/storage-pool-const.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/constants/tooltips.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/extensions/dashboards.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/features.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/models.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/plugin.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/queries/ceph-queries.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/queries/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/queries/object-storage-queries.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/queries/odf-queries.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/resources.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/selectors/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/types.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/add-capacity.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/alert-action-path.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/block-pool.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/bucket-class.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/common-ocs-install-el.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/common.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/create-storage-system.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/dashboard.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/events.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/index.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/install.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/noobaa-utils.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/odf-provisioners.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/osd-size-dropdown.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/popover-helper.tsx delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/table-filters.ts delete mode 100644 frontend/packages/ceph-storage-plugin/src/utils/time-duration-dropdown.tsx diff --git a/frontend/package.json b/frontend/package.json index 5562c77da52a..2965b49c30fb 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -38,8 +38,6 @@ "test-cypress-dev-console": "cd packages/dev-console/integration-tests && yarn run test-cypress", "test-cypress-dev-console-headless": "cd packages/dev-console/integration-tests && yarn run test-cypress-headless", "test-cypress-dev-console-nightly": "cd packages/dev-console/integration-tests && yarn run test-cypress-nightly", - "test-cypress-ceph": "cd packages/ceph-storage-plugin/integration-tests-cypress && ../../../node_modules/.bin/cypress open --config-file cypress-ceph.json --env openshift=true", - "test-cypress-ceph-headless": "cd packages/ceph-storage-plugin/integration-tests-cypress && node --max-old-space-size=4096 ../../../node_modules/.bin/cypress run --config-file cypress-ceph.json --env openshift=true --browser ${BRIDGE_E2E_BROWSER_NAME:=chrome}", "test-cypress-pipelines": "cd packages/pipelines-plugin/integration-tests && yarn run test-cypress", "test-cypress-pipelines-headless": "cd packages/pipelines-plugin/integration-tests && yarn run test-cypress-headless", "test-cypress-pipelines-nightly": "cd packages/pipelines-plugin/integration-tests && yarn run test-cypress-headless-all", diff --git a/frontend/packages/ceph-storage-plugin/.eslintrc b/frontend/packages/ceph-storage-plugin/.eslintrc deleted file mode 100644 index a9bb80237614..000000000000 --- a/frontend/packages/ceph-storage-plugin/.eslintrc +++ /dev/null @@ -1,29 +0,0 @@ -{ - "root": true, - extends: ['plugin:console/react-typescript-prettier'], - "rules": { - "import/order": [ - "error", - { - "pathGroups": [ - { - "pattern": "@*/**", - "group": "external", - "position": "after", - }, - ], - "pathGroupsExcludedImportTypes": ["builtin"], - "groups": [ - "builtin", - "external", - "internal", - "index", - "sibling", - "parent", - "object", - "unknown", - ], - }, - ], - }, -} diff --git a/frontend/packages/ceph-storage-plugin/OWNERS b/frontend/packages/ceph-storage-plugin/OWNERS deleted file mode 100644 index dbb7eab0a7c9..000000000000 --- a/frontend/packages/ceph-storage-plugin/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -reviewers: - - bipuladh - - cloudbehl -approvers: - - bipuladh - - cloudbehl -labels: - - component/ceph diff --git a/frontend/packages/ceph-storage-plugin/README.md b/frontend/packages/ceph-storage-plugin/README.md deleted file mode 100644 index 82fa82b30f43..000000000000 --- a/frontend/packages/ceph-storage-plugin/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# OCS UI Features - -The OCS UI requires some annotations in the OCS Operator CSV and Storage Cluster CR to perform various actions. - -Following table maps the annotation to its use case and accepted values: -|Annotation Name|Purpose|Accepted Values |CR/CSV -|---------------------------------------|---------------------------|--------|----------------| -| `features.ocs.openshift.io/enabled`| Activates Optional Features | "multus", "flexible-scaling", "kms", "arbiter", "taint-nodes", "pool-management", "thick-provision" | Operator CSV -| `cluster.ocs.openshift.io/local-devices`| Activates disk replacement and ocs status column in disk inventory | "true" | Storage Cluster CR -| `external.features.ocs.openshift.io/validation`| Mininum required keys to be supplied by the admin to connect to an external cluster | Array of Keys that need to be validated in UI | Operator CSV -|||| - -## Enabling Features in UI - -UI features are activated based on the annotations. The following table maps a feature and the respective annotation key-value pair required to activate it. -| Feature |Annotation Value| Annotation Key -|------------------------------|--------------------------|---------| -| Flexible scaling |`features.ocs.openshift.io/enabled` | `flexible-scaling` | -| KMS encryption |`features.ocs.openshift.io/enabled` | `kms` | -| Arbiter |`features.ocs.openshift.io/enabled` | `arbiter` | -| Multus |`features.ocs.openshift.io/enabled` | `multus` | -| Taint nodes |`features.ocs.openshift.io/enabled` | `taint-nodes` | -| Pool management |`features.ocs.openshift.io/enabled` | `pool-management` | -| Thick Provision |`features.ocs.openshift.io/enabled` | `thick-provision` | -| MCG standalone deployment |`features.ocs.openshift.io/enabled` | `mcg-standalone` | -| Disk Replacement Action| `cluster.ocs.openshift.io/local-devices` | `true`| -| Disk Inventory OCS Status Column | `cluster.ocs.openshift.io/local-devices` | `true`| - -#### Example - - "features.ocs.openshift.io/enabled": `["multus"]` - -## JSON validation (Independent Mode) - -This is used to perform validation of the JSON file uploaded by the user during Independent mode installation. The values in `external.ocs.openshift.io/validation` annotation needs to be a stringified JSON. The JSON object should have the following keys and values: -| Key | Accepted Values | Validation Performed | -|------|-----------|-------------| -| `secrets` | Array of String| JSON should contain all the `secrets` defined in the array. The `data.userKey` /`data.adminKey` field should be Base64 encoded and non-empty. -|`configMaps`| Array of String| JSON should contain all the `configMaps` defined in the array. `data` field should be non-empty. -| `storageClasses`| Array of String| JSON should contain all the `storageClasses` defined in the array. `data` field should be non-empty. - -#### Example (CSV Annotataton) - - external.features.ocs.openshift.io/validation: '{"secrets":["rook-ceph-operator-creds", - - "rook-csi-rbd-node", "rook-csi-rbd-provisioner", "rook-csi-cephfs-node", rook-csi-cephfs-provisioner"], - - "configMaps": ["rook-ceph-mon-endpoints", "rook-ceph-mon"], "storageClasses": - - ["rook-ceph-retain-bucket"]}' - -#### Example (User Uploaded JSON) - -Excerpt of the JSON uploaded by the user. - - [ - { - "kind": "ConfigMap", - "data": { - "maxMonId": "0", - "data": "a=10.106.31.93:6789", - "mapping": {} - }, - "name": "rook-ceph-mon-endpoints" - }, - { - "kind": "Secret", - "data": { - "userKey": "AQBI8bteZd52HxAAAgHS3TJGEfgZurN+gVvDNQ==", - "userID": "client.aaaa" - }, - "name": "rook-ceph-operator-creds" - }, - { - "kind": "Secret", - "data": { - "adminID": "csi-cephfs-provisioner", - "adminKey": "AQBV66pefnqmERAAKYhoO2XK5mUIGKSN4J/URw==" - }, - "name": "rook-csi-cephfs-provisioner" - }, - { - "kind": "StorageClass", - "data": { - "pool": "device_health_metrics" - }, - "name": "ceph-rbd" - }, - ] diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/.eslintrc b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/.eslintrc deleted file mode 100644 index 4c0d2143d9d5..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/.eslintrc +++ /dev/null @@ -1,15 +0,0 @@ -{ - "env": { - "cypress/globals": true, - "node": true - }, - "extends": ["../../.eslintrc", "plugin:cypress/recommended"], - "plugins": ["cypress"], - "rules": { - "no-console": "off", - "no-namespace": "off", - "no-redeclare": "off", - "promise/catch-or-return": "off", - "promise/no-nesting": "off" - } -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/consts.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/consts.ts deleted file mode 100644 index 3adb558016e2..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/consts.ts +++ /dev/null @@ -1,110 +0,0 @@ -export const OCS_OP = 'OpenShift Container Storage'; -export const NS = 'openshift-storage'; - -export const SECOND = 1000; -export const MINUTE = 60 * SECOND; - -export enum POD_NAME_PATTERNS { - OCS = 'ocs-operator-', - ROOK = 'rook-ceph-operator-', - NOOBA_OPERATOR = 'noobaa-operator-', - NOOBAA_CORE = 'noobaa-core-', - ROOK_CEPH_MON = 'rook-ceph-mon', - ROOK_CEPH_MGR = 'rook-ceph-mgr', - CSI_CEPHFS = 'csi-cephfsplugin-', - CSI_RBD = 'csi-rbdplugin-', - ROOK_CEPH_MDS = 'rook-ceph-mds-ocs-storagecluster-cephfilesystem', - ROOK_CEPH_OSD = 'rook-ceph-osd-', - ROOK_CEPH_OSD_PREPARE = 'rook-ceph-osd-prepare-', -} - -export enum STORAGE_CLASS_PATTERNS { - RBD = 'ocs-storagecluster-ceph-rbd', - FS = 'ocs-storagecluster-cephfs', - NOOBAA = 'noobaa.io', -} - -export enum CLUSTER_STATUS { - READY = 'Ready', - PROGRESSING = 'Progressing', - HEALTH_ERROR = 'HEALTH_ERR', -} - -export const OCS_NODE_LABEL = 'cluster.ocs.openshift.io/openshift-storage'; -export const CATALOG_SRC = 'redhat-operators'; - -export const KIND = 'storagecluster'; -export const EXPAND_WAIT = 15 * MINUTE; -export const CAPACITY_UNIT = 'TiB'; -export const CAPACITY_VALUE = '2'; -export const OCS_OPERATOR_NAME = 'ocs-operatorv4'; -export const STORAGE_CLUSTER_NAME = 'ocs-storagecluster'; -export const HOST = 'host'; -export const ZONE = 'zone'; -export const OSD = 'osd'; - -export const SUCCESS = 'Succeeded'; -export const READY_FOR_USE = 'ready for use'; - -export const ocsTaint = Object.freeze({ - key: 'node.ocs.openshift.io/storage', - value: 'true', - effect: 'NoSchedule', -}); - -export enum VOLUME_ACCESS_MODES { - RWO = 'ReadWriteOnce', - RWX = 'ReadWriteMany', - ROX = 'ReadOnlyMany', -} - -export enum SIZE_UNITS { - MI = 'Mi', - GI = 'Gi', - TI = 'Ti', -} - -export enum PVC_STATUS { - PENDING = 'Pending', - BOUND = 'Bound', -} - -export enum OCP_TEXT_STATUS { - HEALTHY = 'healthy', - DEGRADED = 'health is degraded', - NOT_AVAILABLE = 'is not available', -} - -export enum OCP_HEALTH_ICON_COLORS { - GREEN = '#486b00', - YELLOW = '#f0ab00', - RED = '#c9190b', - GREEN46 = '#3e8635', -} - -export const EXAMPLE_PVC = { - name: 'expample-pvc', - namespace: NS, - size: '5', - sizeUnits: SIZE_UNITS.GI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, - accessMode: VOLUME_ACCESS_MODES.RWO, -}; - -export const CHANNEL_43 = 'stable-4.3'; -export const CHANNEL_44 = 'stable-4.4'; -export const CHANNEL_45 = 'stable-4.5'; - -export const LSO_INFO_MSG = 'Local Storage Operator Not Installed'; -export const SC_STEPS_NAME = { - DISCOVERY: 'Discover Disks', - STORAGECLASS: 'Create Storage Class', - STORAGECLUSTER: 'Create Storage Cluster', -}; -export const CONFIRM_MODAL_TITLE = 'Create Storage Class'; - -export const OSD_SIZES_MAP = { - '512Gi': 0.5, - '2Ti': 2, - '4Ti': 4, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/cypress-ceph.json b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/cypress-ceph.json deleted file mode 100644 index 273e2f7ea9d0..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/cypress-ceph.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "integrationFolder": "tests", - "viewportWidth": 1920, - "viewportHeight": 1080, - "screenshotsFolder": "../../../gui_test_screenshots/cypress/screenshots", - "videosFolder": "../../../gui_test_screenshots/cypress/videos", - "video": true, - "reporter": "../../../node_modules/cypress-multi-reporters", - "reporterOptions": { - "configFile": "reporter-config.json" - }, - "supportFile": "./support/index.ts", - "pluginsFile": "../../integration-tests-cypress/plugins/index.js", - "fixturesFolder": false, - "defaultCommandTimeout": 30000, - "retries": { - "runMode": 1, - "openMode": 0 - } -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/index.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/index.ts deleted file mode 100644 index c2ed95076f45..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './installation'; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/installation.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/installation.ts deleted file mode 100644 index c090a09772ba..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/installation.ts +++ /dev/null @@ -1,56 +0,0 @@ -import * as _ from 'lodash'; -import { NodeKind } from 'public/module/k8s'; - -export const SIZE_MAP = { - '512Gi': 0.5, - '2Ti': 2, - '4Ti': 4, -}; - -export const getPodName = (pod) => pod.metadata.name; - -export const getPodRestartCount = (pod) => pod.status.containerStatuses[0].restartCount; - -export const getPresentPod = (pods, podName: string) => - pods.items.find((pod) => getPodName(pod) === podName); - -export const getIds = (nodes, type: string): number[] => - nodes.filter((node) => node.type === type).map((node) => node.id); - -export const getNewOSDIds = (nodes, osds: number[]): number[] => - nodes - .filter((node) => node.type === 'osd' && osds.indexOf(node.id) === -1) - .map((node) => node.id); - -export const createOSDTreeMap = (nodes) => - nodes.reduce((acc, curr) => Object.assign(acc, { [curr.id]: curr }), {}); - -export const verifyZoneOSDMapping = (zones: number[], osds: number[], osdtree): boolean => { - let filteredOsds = [...osds]; - zones.forEach((zone) => { - const hostId = osdtree[zone].children[0]; - const len = osdtree[hostId].children.length; - filteredOsds = filteredOsds.filter((osd) => osd !== osdtree[hostId].children[len - 1]); - }); - - return filteredOsds.length === 0; -}; - -export const verifyNodeOSDMapping = (nodes: number[], osds: number[], osdtree): boolean => { - let filteredOsds = [...osds]; - nodes.forEach((node) => { - const len = osdtree[node].children.length; - filteredOsds = filteredOsds.filter((osd) => osd !== osdtree[node].children[len - 1]); - }); - - return filteredOsds.length === 0; -}; - -export const isNodeReady = (node: NodeKind): boolean => { - const conditions = node.status?.conditions ?? []; - const readyState: any = _.find(conditions, { type: 'Ready' }); - - return readyState && readyState.status === 'True'; -}; - -export const getDeviceCount = (storageCluster) => storageCluster?.spec?.storageDeviceSets[0].count; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/pvc.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/pvc.ts deleted file mode 100644 index c70ec39f6a4a..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/pvc.ts +++ /dev/null @@ -1,24 +0,0 @@ -export const getPVCJSON = ( - name: string, - namespace: string, - storageClassName: string, - size: string = '5Gi', - volumeMode: string = 'Filesystem', -) => ({ - apiVersion: 'v1', - kind: 'PersistentVolumeClaim', - metadata: { - name, - namespace, - }, - spec: { - accessModes: ['ReadWriteOnce'], - resources: { - requests: { - storage: size, - }, - }, - storageClassName, - volumeMode, - }, -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/vault.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/vault.ts deleted file mode 100644 index b6aecfc32773..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/helpers/vault.ts +++ /dev/null @@ -1,345 +0,0 @@ -export const roleBindingJSON = { - apiVersion: 'rbac.authorization.k8s.io/v1', - kind: 'ClusterRoleBinding', - metadata: { - labels: { - 'app.kubernetes.io/name': 'vault', - 'app.kubernetes.io/instance': 'vault', - }, - name: 'vault-server-binding', - }, - roleRef: { - apiGroup: 'rbac.authorization.k8s.io', - kind: 'ClusterRole', - name: 'system:auth-delegator', - }, - subjects: [ - { - kind: 'ServiceAccount', - name: 'vault', - namespace: 'hashicorp', - }, - ], -}; - -export const serviceAccountJSON = { - apiVersion: 'v1', - kind: 'ServiceAccount', - metadata: { - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - name: 'vault', - }, -}; - -export const getPVCJSON = { - kind: 'PersistentVolumeClaim', - apiVersion: 'v1', - metadata: { - name: 'vault-storage', - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - spec: { - accessModes: ['ReadWriteOnce'], - resources: { - requests: { - storage: '10Gi', - }, - }, - }, -}; - -export const configMapJSON = { - kind: 'ConfigMap', - apiVersion: 'v1', - metadata: { - name: 'vault-config', - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - data: { - 'vault-config': - '{"backend": {"file": {"path": "/vault/data"}},' + - '"default_lease_ttl": "168h","max_lease_ttl": "720h","disable_mlock": true,' + - '"ui": true,"listener": {"tcp": {"address": "0.0.0.0:8200","tls_disable" : true}}}', - }, -}; - -export const serviceJSON = { - apiVersion: 'v1', - kind: 'Service', - metadata: { - name: 'vault', - annotations: { - 'service.alpha.openshift.io/serving-cert-secret-name': 'vault-cert', - }, - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - spec: { - ports: [ - { - name: 'vault', - port: 8200, - }, - ], - selector: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - publishNotReadyAddresses: true, - }, -}; - -export const routeJSON = { - kind: 'Route', - apiVersion: 'route.openshift.io/v1', - metadata: { - name: 'vault', - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - spec: { - to: { - kind: 'Service', - name: 'vault', - weight: 100, - }, - port: { - targetPort: 8200, - }, - }, -}; - -export const networkPolicyJSON = { - apiVersion: 'networking.k8s.io/v1', - kind: 'NetworkPolicy', - metadata: { - name: 'vault', - labels: { - 'app.kubernetes.io/name': 'vault', - 'app.kubernetes.io/instance': 'vault', - }, - }, - spec: { - podSelector: { - matchLabels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - ingress: [ - { - from: [ - { - namespaceSelector: {}, - }, - ], - ports: [ - { - port: 8200, - protocol: 'TCP', - }, - ], - }, - ], - }, -}; - -export const deploymentJson = { - apiVersion: 'apps/v1', - kind: 'Deployment', - metadata: { - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - name: 'vault', - }, - spec: { - selector: { - matchLabels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - template: { - metadata: { - labels: { - 'app.kubernetes.io/instance': 'vault', - 'app.kubernetes.io/name': 'vault', - }, - }, - spec: { - containers: [ - { - image: 'vault:1.3.5', - name: 'vault', - ports: [ - { - containerPort: 8200, - name: 'vaultport', - protocol: 'TCP', - }, - ], - args: ['server', '-log-level=debug'], - env: [ - { - name: 'SKIP_SETCAP', - value: 'true', - }, - { - name: 'SKIP_CHOWN', - value: 'true', - }, - { - name: 'VAULT_LOCAL_CONFIG', - valueFrom: { - configMapKeyRef: { - name: 'vault-config', - key: 'vault-config', - }, - }, - }, - { - name: 'VAULT_ADDR', - value: 'http://127.0.0.1:8200', - }, - ], - volumeMounts: [ - { - name: 'data', - mountPath: '/vault/data', - readOnly: false, - }, - { - name: 'config', - mountPath: '/vault/config', - }, - { - name: 'cert', - mountPath: '/var/run/secrets/kubernetes.io/certs', - }, - ], - livenessProbe: { - httpGet: { - path: 'v1/sys/health?standbyok=true&standbycode=200&sealedcode=200&uninitcode=200', - port: 8200, - scheme: 'HTTP', - }, - }, - readinessProbe: { - exec: { - command: ['/bin/sh', '-ec', 'vault status'], - }, - failureThreshold: 2, - initialDelaySeconds: 5, - periodSeconds: 3, - successThreshold: 1, - timeoutSeconds: 5, - }, - resources: { - requests: { - memory: '256Mi', - cpu: '250m', - }, - limits: { - memory: '256Mi', - cpu: '250m', - }, - }, - lifecycle: { - preStop: { - exec: { - command: ['/bin/sh', '-c', 'sleep 5 && kill -SIGTERM $(pidof vault)'], - }, - }, - }, - }, - ], - serviceAccount: 'vault', - serviceAccountName: 'vault', - volumes: [ - { - name: 'data', - persistentVolumeClaim: { - claimName: 'vault-storage', - }, - }, - { - name: 'config', - emptyDir: {}, - }, - { - name: 'cert', - secret: { - secretName: 'vault-cert', - }, - }, - ], - }, - }, - }, -}; - -export const testDeploymentJSON = { - apiVersion: 'apps/v1', - kind: 'Deployment', - metadata: { - name: 'test-vault-deployment', - namespace: 'default', - }, - spec: { - selector: { - matchLabels: { - app: 'hello-openshift', - }, - }, - replicas: 1, - template: { - metadata: { - labels: { - app: 'hello-openshift', - }, - }, - spec: { - containers: [ - { - name: 'hello-openshift', - image: 'openshift/hello-openshift', - ports: [ - { - containerPort: 5555, - }, - ], - volumeMounts: [ - { - name: 'task-pv-storage', - mountPath: '/usr/share/nginx/html', - }, - ], - }, - ], - volumes: [ - { - name: 'task-pv-storage', - persistentVolumeClaim: { - claimName: 'encrypted-pvc', - }, - }, - ], - }, - }, - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/bucket-class.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/bucket-class.ts deleted file mode 100644 index 91a11547d85f..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/bucket-class.ts +++ /dev/null @@ -1,41 +0,0 @@ -import { StoreType } from '../views/store'; - -export const bucketStore = (storeName: string) => ({ - apiVersion: 'noobaa.io/v1alpha1', - kind: 'BackingStore', - metadata: { - name: storeName, - }, - spec: { - pvPool: { - numVolumes: 1, - storageClass: 'gp2', - resources: { - requests: { - storage: '50Gi', - }, - }, - }, - type: 'pv-pool', - }, -}); - -export const namespaceStore = (name: string, type: StoreType) => ({ - apiVersion: 'noobaa.io/v1alpha1', - kind: type === StoreType.NamespaceStore ? 'NamespaceStore' : 'BackingStore', - metadata: { - name, - namespace: 'openshift-storage', - }, - spec: { - awsS3: { - region: 'us-east-1', - secret: { - name: `${name}-secret`, - namespace: 'openshift-storage', - }, - targetBucket: 'target', - }, - type: 'aws-s3', - }, -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/deploymentData.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/deploymentData.ts deleted file mode 100644 index bceddbabdb40..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/deploymentData.ts +++ /dev/null @@ -1,35 +0,0 @@ -export const deployment = { - apiVersion: 'apps/v1', - kind: 'Deployment', - metadata: { - name: 'test-deployment', - }, - spec: { - selector: { - matchLabels: { - app: 'hello-openshift', - }, - }, - replicas: 1, - template: { - metadata: { - labels: { - app: 'hello-openshift', - }, - }, - spec: { - containers: [ - { - name: 'hello-openshift', - image: 'openshift/hello-openshift', - ports: [ - { - containerPort: 8080, - }, - ], - }, - ], - }, - }, - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/install.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/install.ts deleted file mode 100644 index c6b43841e21b..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/install.ts +++ /dev/null @@ -1,26 +0,0 @@ -export const PULL_SECRET_PATH = '/var/run/operator-secret/dockerconfig'; - -export const CATALOG = { - NAMESPACE: 'openshift-marketplace', - SECRET: 'ocs-secret', - IMAGE: 'quay.io/rhceph-dev/ocs-registry:latest-stable-4.8', -}; - -export const ocsCatalogSource = { - apiVersion: 'operators.coreos.com/v1alpha1', - kind: 'CatalogSource', - metadata: { - labels: { - 'ocs-operator-internal': 'true', - }, - namespace: CATALOG.NAMESPACE, - name: 'ocs-catalogsource', - }, - spec: { - sourceType: 'grpc', - image: CATALOG.IMAGE, - secrets: [CATALOG.SECRET], - displayName: 'OpenShift Container Storage', - publisher: 'Red Hat', - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/storageclass.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/storageclass.ts deleted file mode 100644 index 7418ed593289..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/mocks/storageclass.ts +++ /dev/null @@ -1,48 +0,0 @@ -export const testNoProvisionerSC = { - apiVersion: 'storage.k8s.io/v1', - kind: 'StorageClass', - metadata: { name: 'test-no-prov-sc' }, - provisioner: 'kubernetes.io/no-provisioner', - reclaimPolicy: 'Delete', -}; - -export const testEbsSC = { - apiVersion: 'storage.k8s.io/v1', - kind: 'StorageClass', - metadata: { name: 'test-ebs-sc' }, - provisioner: 'kubernetes.io/aws-ebs', - parameters: { type: 'io1' }, - reclaimPolicy: 'Retain', -}; - -export const getPVJSON = (id: number, nodeName: string, scName: string) => { - return { - kind: 'PersistentVolume', - apiVersion: 'v1', - metadata: { name: `test-pv-${id}` }, - spec: { - capacity: { - storage: '10Mi', - }, - local: { path: `/mnt/local-storage/test-${id}/` }, - accessModes: ['ReadWriteOnce'], - persistentVolumeReclaimPolicy: 'Delete', - storageClassName: scName, - nodeAffinity: { - required: { - nodeSelectorTerms: [ - { - matchExpressions: [ - { - key: 'kubernetes.io/hostname', - operator: 'In', - values: [nodeName], - }, - ], - }, - ], - }, - }, - }, - }; -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/reporter-config.json b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/reporter-config.json deleted file mode 100644 index 1e3860324f64..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/reporter-config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "reporterEnabled": "mocha-junit-reporter, mochawesome", - "mochaJunitReporterReporterOptions": { - "mochaFile": "../../../gui_test_screenshots/junit_cypress-[hash].xml", - "toConsole": false - }, - "mochawesomeReporterOptions": { - "reportDir": "../../../gui_test_screenshots/", - "reportFilename": "cypress_report_ceph", - "overwrite": false, - "html": false, - "json": true - } -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/index.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/index.ts deleted file mode 100644 index 82459fbac52e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/index.ts +++ /dev/null @@ -1,55 +0,0 @@ -import '../../../integration-tests-cypress/support/index.ts'; -import { OCS_OP } from '../consts'; -import { CATALOG } from '../mocks/install'; -import { NS } from '../utils/consts'; -import { - createImagePullSecret, - createCustomCatalogSource, - subscribeToOperator, - linkPullSecretToPods, - createInternalStorageCluster, - verifyMonitoring, - verifyNodeLabels, - verifyClusterReadiness, -} from '../views/install'; - -declare global { - namespace Cypress { - interface Chainable { - install(encrypted?: boolean): Chainable; - } - } -} - -Cypress.Commands.add('install', (encrypted = false) => { - cy.exec(`oc get storagecluster ocs-storagecluster -n ${NS}`, { - failOnNonZeroExit: false, - }).then(({ code }) => { - // Only run Installation if the Storage Cluster doesn't already exist - if (code !== 0) { - cy.log('Perform OCS Operator installation and StoargeCluster creation'); - createImagePullSecret(CATALOG.NAMESPACE); - createCustomCatalogSource(); - subscribeToOperator(); - cy.byTestID('view-installed-operators-btn').click(); - cy.log('Wait for operator phase to be `Installing`'); - cy.byLegacyTestID('item-filter').type(`${OCS_OP}`); - cy.byTestID('status-text', { timeout: 120000 }).should('have.text', 'Installing'); - /** - * Waiting for 30 seconds to make sure service accounts are loaded by the operator. - * It is a safer time lapse to ensure everything is ready for next step. - * */ - // eslint-disable-next-line cypress/no-unnecessary-waiting - cy.wait(30000); - linkPullSecretToPods(); - cy.log('Check operator installation to be successful'); - cy.byTestID('success-icon', { timeout: 180000 }).should('be.visible'); - createInternalStorageCluster(encrypted); - verifyNodeLabels(); - verifyMonitoring(); - verifyClusterReadiness(); - } else { - cy.log('OCS Storage Cluster is already Installed. Proceeding without installation.'); - } - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/vault-standalone.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/vault-standalone.ts deleted file mode 100644 index a975eedba78d..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/support/vault-standalone.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { - serviceAccountJSON, - roleBindingJSON, - getPVCJSON, - configMapJSON, - deploymentJson, - serviceJSON, - routeJSON, - networkPolicyJSON, - testDeploymentJSON, -} from '../helpers/vault'; -import { commandPoll } from '../views/common'; - -export const configureVault = () => { - cy.exec('oc get project hashicorp', { - failOnNonZeroExit: false, - }).then(({ code }) => { - // Deploy vault only if doesn't already exist - if (code !== 0) { - cy.log('Create a new project for internel vault'); - cy.exec('oc new-project hashicorp'); - - cy.log('Creating CR to configure vault'); - cy.exec(`echo '${JSON.stringify(serviceAccountJSON)}' | oc apply -f -`); - cy.exec(`echo '${JSON.stringify(roleBindingJSON)}' | oc apply -f -`); - cy.exec(`echo '${JSON.stringify(getPVCJSON)}' | oc apply -f -`); - cy.exec(`echo '${JSON.stringify(configMapJSON)}' | oc apply -f -`); - - cy.log('Deploying vault'); - cy.exec(`echo '${JSON.stringify(deploymentJson)}' | oc apply -f -`); - cy.exec(`echo '${JSON.stringify(serviceJSON)}' | oc apply -f -`); - - cy.log('Generating vault keys and token'); - cy.exec('oc get pods --no-headers -o custom-columns=":metadata.name"').then((pod) => { - const podName: string = pod.stdout; - - cy.log('Checking vault pod rsh is possible'); - commandPoll(`oc exec -ti ${podName} hostname`, podName, false); - - cy.exec( - `oc exec -ti ${podName} -- vault operator init --key-shares=1 --key-threshold=1 --format=json`, - ).then((vault) => { - const vaultObj = JSON.parse(vault.stdout); - const vaultKeys = vaultObj?.unseal_keys_b64; - const vaultToken = vaultObj?.root_token; - cy.log('Unsealing Vault'); - cy.exec(`oc exec -ti ${podName} -- vault operator unseal ${vaultKeys[0]}`); - cy.log('Enabling a key/value secrets engine'); - cy.exec( - `oc exec -ti ${podName} -- /bin/sh -c 'export VAULT_TOKEN=${vaultToken} && vault secrets enable -path=secret kv'`, - ); - cy.log(`vault token = ${vaultToken}`); - cy.exec( - `oc create secret generic ceph-csi-kms-token --from-literal=token=${vaultToken} -n default`, - ); - cy.exec(`echo '${JSON.stringify(testDeploymentJSON)}' | oc apply -f -`); - }); - }); - - cy.log('Configuring router'); - cy.exec(`echo '${JSON.stringify(routeJSON)}' | oc apply -f -`); - cy.exec(`echo '${JSON.stringify(networkPolicyJSON)}' | oc apply -f -`); - } else { - cy.log('Vault is already deployed'); - } - }); -}; - -export const isPodRunningWithEncryptedPV = () => { - cy.log('Checking pod is up and running with encrypted PV'); - commandPoll( - `oc get Deployment ${testDeploymentJSON.metadata.name} -n default -ojsonpath='{.status.availableReplicas}'`, - '1', - ); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/add-capacity.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/add-capacity.spec.ts deleted file mode 100644 index 77ba9bd12ff0..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/add-capacity.spec.ts +++ /dev/null @@ -1,155 +0,0 @@ -import * as _ from 'lodash'; -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { CLUSTER_STATUS } from '../../integration-tests/utils/consts'; -import { - createOSDTreeMap, - getDeviceCount, - getIds, - getNewOSDIds, - getPodRestartCount, - isNodeReady, - SIZE_MAP, - verifyNodeOSDMapping, - getPresentPod, - getPodName, -} from '../helpers'; -import { commonFlows } from '../views/common'; - -xdescribe('OCS Operator Expansion of Storage Class Test', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - beforeEach(() => { - cy.visit('/'); - }); - - afterEach(() => { - checkErrors(); - }); - - after(() => { - cy.logout(); - }); - - it.only('Add additional capacity to Storage Cluster', () => { - const initialState = { - storageCluster: null, - cephCluster: null, - osdTree: null, - pods: null, - formattedOSDTree: null, - osdIDs: null, - }; - - cy.exec('oc get storagecluster ocs-storagecluster -n openshift-storage -o json').then((res) => { - const storageCluster = JSON.parse(res.stdout); - _.set(initialState, 'storageCluster', storageCluster); - }); - cy.exec('oc get cephCluster ocs-storagecluster-cephcluster -n openshift-storage -o json').then( - (res) => { - const cephCluster = JSON.parse(res.stdout); - _.set(initialState, 'cephCluster', cephCluster); - - cy.log('Check if ceph cluster is healthy before expansion'); - expect(cephCluster.status.ceph.health).not.toBe(CLUSTER_STATUS.HEALTH_ERROR); - }, - ); - cy.exec( - `oc -n openshift-storage rsh $(oc get po -n openshift-storage | grep ceph-operator | awk '{print$1}') ceph --conf=/var/lib/rook/openshift-storage/openshift-storage.config osd tree --format=json`, - { timeout: 120000 }, - ).then((res) => { - const osdTree = JSON.parse(res.stdout); - _.set(initialState, 'osdTree', osdTree); - - const formattedOSDTree = createOSDTreeMap(osdTree.nodes); - _.set(initialState, 'formattedOSDTree', formattedOSDTree); - - const osdIDs = getIds(osdTree.nodes, 'osd'); - _.set(initialState, 'osdIDs', osdIDs); - }); - cy.exec('oc get po -n openshift-storage -o json').then((res) => { - const pods = JSON.parse(res.stdout); - _.set(initialState, 'pods', pods); - - commonFlows.navigateToOCS(); - - cy.byLegacyTestID('horizontal-link-Storage Cluster').click(); - cy.byLegacyTestID('kebab-button').click(); - cy.byTestActionID('Add Capacity').click(); - modal.shouldBeOpened(); - - const initialCapcity = - SIZE_MAP[ - initialState.storageCluster?.spec?.storageDeviceSets?.[0]?.dataPVCTemplate?.spec - ?.resources?.requests?.storage - ]; - cy.byLegacyTestID('requestSize').should('have.value', String(initialCapcity)); - cy.byTestID('provisioned-capacity').contains( - `${String((initialCapcity * 3).toFixed(2))} TiB`, - ); - cy.byTestID('add-cap-sc-dropdown', { timeout: 10000 }).should('be.visible'); - modal.submit(); - modal.shouldBeClosed(); - - // Wait for the storage cluster to reach Ready - // Storage Cluster CR flickers so wait for 10 seconds - // Disablng until ocs-operator fixes above issue - // eslint-disable-next-line cypress/no-unnecessary-waiting - cy.wait(10000); - cy.byTestOperandLink('ocs-storagecluster').click(); - cy.byTestID('resource-status').contains('Ready', { timeout: 900000 }); - }); - cy.exec('oc get storagecluster ocs-storagecluster -n openshift-storage -o json').then((res) => { - const storageCluster = JSON.parse(res.stdout); - // Assertion of increment of device count - cy.log('Check cluster deivce set count has increased'); - expect(getDeviceCount(initialState.storageCluster)).toEqual( - getDeviceCount(storageCluster) - 1, - ); - }); - cy.exec('oc get cephCluster ocs-storagecluster-cephcluster -n openshift-storage -o json').then( - (res) => { - const cephCluster = JSON.parse(res.stdout); - - cy.log('Check if ceph cluster is healthy after expansion'); - expect(cephCluster.status.ceph.health).not.toBe(CLUSTER_STATUS.HEALTH_ERROR); - }, - ); - cy.exec('oc get po -n openshift-storage -o json').then((res) => { - const pods = JSON.parse(res.stdout); - - cy.log('Check Pods have not restarted unexpectedly'); - initialState.pods.items.forEach((pod) => { - const initalRestarts = getPodRestartCount(pod); - const updatedPod = getPresentPod(pods, getPodName(pod)); - if (updatedPod) { - const currentRestarts = getPodRestartCount(updatedPod); - expect(initalRestarts).toEqual(currentRestarts); - } - }); - }); - cy.exec( - `oc -n openshift-storage rsh $(oc get po -n openshift-storage | grep ceph-operator | awk '{print$1}') ceph --conf=/var/lib/rook/openshift-storage/openshift-storage.config osd tree --format=json`, - { timeout: 120000 }, - ).then((res) => { - const osdTree = JSON.parse(res.stdout); - const formattedOSDTree = createOSDTreeMap(osdTree.nodes); - const newOSDIds = getNewOSDIds(osdTree.nodes, initialState.osdIDs); - - cy.log('New OSDs are added correctly to the right nodes', () => { - const nodes = getIds(osdTree.nodes, 'host'); - expect(verifyNodeOSDMapping(nodes, newOSDIds, formattedOSDTree)).toBeTruthy(); - }); - }); - cy.exec('oc get nodes -o json').then((res) => { - const nodes = JSON.parse(res.stdout); - const allNodesReady = nodes.items.every(isNodeReady); - cy.log('No Nodes should go to Not Ready state'); - expect(allNodesReady).toBeTruthy(); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-create.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-create.spec.ts deleted file mode 100644 index 6091ea266779..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-create.spec.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { createBlockPool, deleteBlockPoolFromCli } from '../views/block-pool'; - -/** @depricated testcases are moved to odf-console */ -xdescribe('Test block pool creation under OCS UI', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - after(() => { - checkErrors(); - cy.logout(); - }); - - it('Check for a new pool creation', () => { - createBlockPool(); - deleteBlockPoolFromCli(); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-delete.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-delete.spec.ts deleted file mode 100644 index 792d28aff91c..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-delete.spec.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { POOL_PROGRESS } from '../../src/constants/storage-pool-const'; -import { - poolName, - scName, - navigateToBlockPool, - verifyFooterActions, - poolMessage, -} from '../views/block-pool'; -import { pvc } from '../views/pvc'; -import { createStorageClass } from '../views/storage-class'; - -const pvcName: string = 'testing-pvc'; - -/** @depricated testcases are moved to odf-console */ -xdescribe('Test block pool deletion under OCS UI', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - // Todo(bipuladh): Enable after downstream builds are available with v1 CSIDrivers - // cy.log('Creating a test pool'); - // createBlockPool(); - }); - - after(() => { - // Todo(bipuladh): Enable after downstream builds are available with v1 CSIDrivers - // deleteStorageClassFromCli(scName); - checkErrors(); - cy.logout(); - }); - - // Todo(bipuladh): Enable after downstream builds are available with v1 CSIDrivers - it('deletion of a non-default pool deletion pool is successful', () => { - cy.log('Create storage class using newly created pool'); - createStorageClass(scName, poolName); - - cy.log('Create PVC using newly created storage class'); - cy.clickNavLink(['PersistentVolumeClaims']); - pvc.createPVC(pvcName, '1', scName); - cy.visit('/'); - - cy.log('Delete a newly created block pool'); - navigateToBlockPool(); - cy.byLegacyTestID('kebab-button').first().click(); - cy.byTestActionID('Delete BlockPool').click(); - - modal.modalTitleShouldContain('Delete BlockPool'); - cy.byTestID('pool-bound-message').contains(poolMessage[POOL_PROGRESS.BOUNDED]); - cy.byTestID('pool-storage-classes').contains(scName); - verifyFooterActions(POOL_PROGRESS.BOUNDED); - - cy.log('Delete pvc and try pool deletion'); - cy.exec(`oc delete PersistentVolumeClaim ${pvcName} -n openshift-storage`); - - cy.byLegacyTestID('kebab-button').first().click(); - cy.byTestActionID('Delete BlockPool').click(); - verifyFooterActions('delete'); - }); - - it('Deleting the default block pools should fail', () => { - navigateToBlockPool(); - cy.log('Click delete kebab action'); - cy.byLegacyTestID('kebab-button').last().click(); - cy.byTestActionID('Delete BlockPool').click(); - cy.log('Deletion not allowed message is visible'); - cy.byTestID('empty-state-body').contains(poolMessage[POOL_PROGRESS.NOTALLOWED]); - verifyFooterActions(POOL_PROGRESS.NOTALLOWED); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-update.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-update.spec.ts deleted file mode 100644 index 8e6c6231c9be..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/block-pool-update.spec.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { checkErrors } from '@console/cypress-integration-tests/support'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { POOL_PROGRESS } from '../../src/constants/storage-pool-const'; -import { - createBlockPool, - deleteBlockPoolFromCli, - verifyFooterActions, - verifyBlockPoolJSON, - poolMessage, -} from '../views/block-pool'; - -/** @depricated testcases are moved to odf-console */ -xdescribe('Test block pool update under OCS UI', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - cy.log('Creating a test pool'); - createBlockPool(); - }); - - after(() => { - deleteBlockPoolFromCli(); - checkErrors(); - cy.logout(); - }); - - it('Test editing a non-default block pool is succesfu', () => { - cy.log('Updating a newly created block pool'); - cy.byLegacyTestID('kebab-button').first().click(); - cy.byTestActionID('Edit BlockPool').click(); - - modal.modalTitleShouldContain('Edit BlockPool'); - cy.byTestID('replica-dropdown').click(); - cy.byLegacyTestID('replica-dropdown-item').contains('3-way Replication').click(); - cy.byTestID('compression-checkbox').uncheck(); - - cy.log('Updating pool'); - verifyFooterActions('update'); - - cy.log('Verify pool update'); - verifyBlockPoolJSON(false, '3'); - }); - - it('Test editing a default block pool is not allowed', () => { - cy.log('Click edit kebab action'); - cy.byLegacyTestID('kebab-button').last().click(); - cy.byTestActionID('Edit BlockPool').click(); - cy.log('Editing not allowed message is visible'); - cy.byTestID('empty-state-body').contains(poolMessage[POOL_PROGRESS.NOTALLOWED]); - verifyFooterActions(POOL_PROGRESS.NOTALLOWED); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/bucket-class-spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/bucket-class-spec.ts deleted file mode 100644 index 93599b028878..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/bucket-class-spec.ts +++ /dev/null @@ -1,101 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { - createBucketClass, - Tier, - BucketClassType, - StandardBucketClassConfig, - verifyBucketClass, - NamespaceBucketClassConfig, - NamespacePolicyType, - deleteBucketClass, - visitBucketClassPage, -} from '../views/bc'; - -xdescribe('Tests creation of Standard Bucket Class', () => { - const backingStoreResources = ['test-store1', 'test-store2', 'test-store3', 'test-store4']; - const config = new StandardBucketClassConfig(backingStoreResources, BucketClassType.STANDARD); - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - config.setup(); - }); - - beforeEach(() => { - visitBucketClassPage(); - }); - - afterEach(() => { - verifyBucketClass(); - deleteBucketClass(); - checkErrors(); - }); - - after(() => { - config.cleanup(); - cy.logout(); - }); - - it('Create a 1 Tier(Spread) Bucket Class', () => { - config.tiers = [Tier.SPREAD]; - createBucketClass(config); - }); - - it('Create a 1 Tier(Mirror) Bucket Class', () => { - config.tiers = [Tier.MIRROR]; - createBucketClass(config); - }); - - it('Create a 2 Tier(Spread, Spread) Bucket Class', () => { - config.tiers = [Tier.SPREAD, Tier.SPREAD]; - createBucketClass(config); - }); - - it('Create a 2 Tier(Spread, Mirror) Bucket Class', () => { - config.tiers = [Tier.SPREAD, Tier.MIRROR]; - createBucketClass(config); - }); -}); - -xdescribe('Tests creation of Namespace Bucket Class', () => { - const config = new NamespaceBucketClassConfig( - ['ns1', 'ns2', 'ns3', 'ns4'], - BucketClassType.NAMESPACE, - ); - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - config.setup(); - }); - - beforeEach(() => { - visitBucketClassPage(); - }); - - afterEach(() => { - verifyBucketClass(); - deleteBucketClass(); - checkErrors(); - }); - - after(() => { - config.cleanup(); - cy.logout(); - }); - - it('Create a Single Namespace Bucket Class', () => { - config.namespacePolicyType = NamespacePolicyType.SINGLE; - createBucketClass(config); - }); - - it('Create a Multi Namespace Bucket Class', () => { - config.namespacePolicyType = NamespacePolicyType.MULTI; - createBucketClass(config); - }); - - it('Create a Cache Namespace Bucket Class', () => { - config.namespacePolicyType = NamespacePolicyType.CACHE; - createBucketClass(config); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/create-backing-store.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/create-backing-store.spec.ts deleted file mode 100644 index 243f8b03aa11..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/create-backing-store.spec.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { commonFlows } from '../views/common'; -import { createStore, Providers, testName } from '../views/store'; - -xdescribe('Tests creation of Backing Stores', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - after(() => { - cy.logout(); - }); - - afterEach(() => { - cy.byLegacyTestID('actions-menu-button').click(); - cy.byTestActionID('Delete Backing Store').click(); - cy.byTestID('confirm-action').click(); - checkErrors(); - }); - - beforeEach(() => { - cy.visit('/'); - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Backing Store').click(); - cy.byTestID('item-create').click(); - }); - - it('Test creation of AWS backing store', () => { - createStore(Providers.AWS); - cy.byLegacyTestID('resource-title').contains(testName); - cy.exec(`oc delete secrets ${testName}-secret -n openshift-storage`); - }); - - it('Test creation of Azure backing store', () => { - createStore(Providers.AZURE); - cy.byLegacyTestID('resource-title').contains(testName); - cy.exec(`oc delete secrets ${testName}-secret -n openshift-storage`); - }); - - it('Test creation of S3 Endpoint Type', () => { - createStore(Providers.S3); - cy.byLegacyTestID('resource-title').contains(testName); - cy.exec(`oc delete secrets ${testName}-secret -n openshift-storage`); - }); - - it('Test creation of PVC Endpoint Type', () => { - createStore(Providers.PVC); - cy.byLegacyTestID('resource-title').contains(testName); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/expand-pvc.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/expand-pvc.spec.ts deleted file mode 100644 index 7bfdc2277792..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/expand-pvc.spec.ts +++ /dev/null @@ -1,30 +0,0 @@ -import { pvc } from '../views/pvc'; - -xdescribe('Tests Expansion of a PVC', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - after(() => { - cy.logout(); - }); - - beforeEach(() => { - cy.visit('/'); - cy.clickNavLink(['Storage', 'PersistentVolumeClaims']); - }); - - it('Test expansion of a CephFS PVC', () => { - pvc.createPVC('testpvcfs', '5', 'ocs-storagecluster-cephfs'); - pvc.expandPVC('10'); - cy.byTestID('pvc-requested-capacity').contains('10 GiB'); - }); - - it('Test expansion of a RBD PVC', () => { - pvc.createPVC('testpvcrbd', '5', 'ocs-storagecluster-ceph-rbd', 'Block'); - pvc.expandPVC('10'); - cy.byTestID('pvc-requested-capacity').contains('10 GiB'); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/kms-encryption-sc.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/kms-encryption-sc.ts deleted file mode 100644 index e705ccfffb89..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/kms-encryption-sc.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { configureVault, isPodRunningWithEncryptedPV } from '../support/vault-standalone'; -import { pvc } from '../views/pvc'; -import { createStorageClass } from '../views/storage-class'; - -// Todo(bipuladh): Enable after downstream builds are available with v1 CSIDrivers -xdescribe('Test Ceph pool creation', () => { - before(() => { - configureVault(); - cy.login(); - cy.visit('/'); - cy.install(); - }); - it('Sc KMS encryption', () => { - const scName: string = 'sc-encrypt'; - createStorageClass(scName, '', true); - cy.clickNavLink(['PersistentVolumeClaims']); - pvc.createPVC('encrypted-pvc', '1', scName); - isPodRunningWithEncryptedPV(); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-pool.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-pool.spec.ts deleted file mode 100644 index 81497aeefd69..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-pool.spec.ts +++ /dev/null @@ -1,67 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { POOL_PROGRESS } from '../../src/constants/storage-pool-const'; -import { - poolName, - populateBlockPoolForm, - verifyFooterActions, - deleteBlockPoolFromCli, - verifyBlockPoolJSON, - poolMessage, -} from '../views/block-pool'; - -const prepareStorageClassForm = () => { - cy.log('Selecting provisioner'); - cy.byTestID('storage-class-provisioner-dropdown').click(); - cy.byLegacyTestID('dropdown-text-filter').type('openshift-storage.rbd.csi.ceph.com'); - cy.byTestID('dropdown-menu-item-link').contains('openshift-storage.rbd.csi.ceph.com'); - cy.byTestID('dropdown-menu-item-link').click(); - - cy.log('Creating a new block pool'); - cy.byTestID('pool-dropdown-toggle').click(); - cy.byTestID('create-new-pool-button').click(); -}; - -const createBlockPool = (poolCreationAction: string) => { - cy.log('Make sure the storage pool creation form is open'); - modal.shouldBeOpened(); - modal.modalTitleShouldContain('Create BlockPool'); - populateBlockPoolForm(); - verifyFooterActions('create'); - - cy.log('Verify a new block pool creation'); - cy.byTestID('empty-state-body').contains(poolMessage[poolCreationAction]); - verifyFooterActions(poolCreationAction); - cy.byTestID('pool-dropdown-toggle').contains(poolName); - verifyBlockPoolJSON(); -}; - -/** @depricated testcases are moved to odf-console */ -xdescribe('Test Ceph pool creation', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - after(() => { - checkErrors(); - cy.logout(); - }); - - it('Check for a new pool creation', () => { - cy.visit('/'); - cy.clickNavLink(['Storage', 'StorageClasses']); - cy.byTestID('item-create').click(); - - cy.log('Test creation of a new pool'); - prepareStorageClassForm(); - createBlockPool(POOL_PROGRESS.CREATED); - - cy.log('Try to create a new pool with already existing name'); - prepareStorageClassForm(); - createBlockPool(POOL_PROGRESS.FAILED); - - deleteBlockPoolFromCli(); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-storageclass-selection.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-storageclass-selection.spec.ts deleted file mode 100644 index 04a4709fb686..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/multiple-storageclass-selection.spec.ts +++ /dev/null @@ -1,97 +0,0 @@ -import { K8sResourceKind } from '@console/internal/module/k8s'; -import { getCurrentDeviceSetIndex } from '../../src/utils/add-capacity'; -import { testEbsSC, testNoProvisionerSC, getPVJSON } from '../mocks/storageclass'; -import { commonFlows } from '../views/common'; -import { - withJSONResult, - fetchStorageClusterJson, - fetchWorkerNodesJson, - addCapacity, - newStorageClassTests, - existingStorageClassTests, - IndexAndDeviceSet, - UidAndDeviceSet, -} from '../views/multiple-storageclass'; - -xdescribe('Add capacity using multiple storage classes', () => { - const beforeCapacityAddition: UidAndDeviceSet = { - deviceSets: null, - uid: null, - portability: null, - devicesCount: null, - }; - - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - cy.exec(`echo '${JSON.stringify(testEbsSC)}' | kubectl apply -f -`); - cy.exec(`echo '${JSON.stringify(testNoProvisionerSC)}' | kubectl apply -f -`); - fetchWorkerNodesJson().then((res) => { - const nodes = JSON.parse(res.stdout); - const { name: scName } = testNoProvisionerSC.metadata; - nodes.items.forEach((node, id) => { - const nodeName = node.metadata.name; - cy.exec(`echo '${JSON.stringify(getPVJSON(id, nodeName, scName))}' | kubectl apply -f -`); - }); - }); - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Storage Cluster').click(); - }); - - beforeEach(() => { - fetchStorageClusterJson().then((res) => { - const json: K8sResourceKind = JSON.parse(res.stdout); - beforeCapacityAddition.deviceSets = json.spec.storageDeviceSets; - beforeCapacityAddition.uid = json.metadata.uid; - }); - }); - - after(() => { - cy.exec(`echo '${JSON.stringify(testEbsSC)}' | kubectl delete -f -`); - cy.exec(`echo '${JSON.stringify(testNoProvisionerSC)}' | kubectl delete -f -`); - fetchWorkerNodesJson().then((res) => { - const nodes = JSON.parse(res.stdout); - const { name: scName } = testNoProvisionerSC.metadata; - nodes.items.forEach((node, id) => { - const nodeName = node.metadata.name; - cy.exec(`echo '${JSON.stringify(getPVJSON(id, nodeName, scName))}' | kubectl delete -f -`); - }); - }); - cy.logout(); - }); - - it('Add capacity with a new storage class having EBS as provisioner', () => { - const { name: scName } = testEbsSC.metadata; - const iAndD: IndexAndDeviceSet = { index: 0, deviceSets: [] }; - addCapacity(beforeCapacityAddition.uid, scName); - fetchStorageClusterJson().then((res) => { - withJSONResult(res, scName, iAndD); - newStorageClassTests(beforeCapacityAddition, iAndD, true); - }); - }); - - it('Add capacity with an existing storage class having EBS as provisioner', () => { - const { name: scName } = testEbsSC.metadata; - const iAndD: IndexAndDeviceSet = { index: 0, deviceSets: [] }; - const { deviceSets } = beforeCapacityAddition; - const index = getCurrentDeviceSetIndex(deviceSets, scName); - beforeCapacityAddition.portability = deviceSets[index].portable; - beforeCapacityAddition.devicesCount = deviceSets[index].count; - addCapacity(beforeCapacityAddition.uid, scName); - fetchStorageClusterJson().then((res) => { - withJSONResult(res, scName, iAndD); - existingStorageClassTests(beforeCapacityAddition, iAndD); - }); - }); - - it(`Add capacity with a new storage class having NO-PROVISIONER as provisioner`, () => { - const { name: scName } = testNoProvisionerSC.metadata; - const iAndD: IndexAndDeviceSet = { index: 0, deviceSets: [] }; - addCapacity(beforeCapacityAddition.uid, scName); - fetchStorageClusterJson().then((res) => { - withJSONResult(res, scName, iAndD); - newStorageClassTests(beforeCapacityAddition, iAndD, false); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/namespace-store.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/namespace-store.spec.ts deleted file mode 100644 index 9d4df95e1298..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/namespace-store.spec.ts +++ /dev/null @@ -1,47 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { commonFlows } from '../views/common'; -import { createStore, Providers, testName, StoreType } from '../views/store'; - -xdescribe('Tests creation of Namespace Stores', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - after(() => { - cy.logout(); - }); - - afterEach(() => { - cy.byLegacyTestID('actions-menu-button').click(); - cy.log('Deleting namespace store'); - cy.byTestActionID('Delete Namespace Store').click(); - cy.byTestID('confirm-action').click(); - cy.log('Deleting secrets'); - cy.exec(`oc delete secrets ${testName}-secret -n openshift-storage`); - checkErrors(); - }); - - beforeEach(() => { - cy.visit('/'); - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Namespace Store').click(); - cy.byTestID('item-create').click(); - }); - - it('Test creation of AWS namespace store', () => { - createStore(Providers.AWS, StoreType.NamespaceStore); - cy.byLegacyTestID('resource-title').contains(testName); - }); - - it('Test creation of Azure namespace store', () => { - createStore(Providers.AZURE, StoreType.NamespaceStore); - cy.byLegacyTestID('resource-title').contains(testName); - }); - - it('Test creation of S3 Endpoint Type', () => { - createStore(Providers.S3, StoreType.NamespaceStore); - cy.byLegacyTestID('resource-title').contains(testName); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/noobaa-sso.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/noobaa-sso.spec.ts deleted file mode 100644 index a5b328093d42..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/noobaa-sso.spec.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; - -xdescribe('Check noobaa link in obejct service dashboard and perform SSO', () => { - before(() => { - cy.login(); - cy.install(); - cy.visit('/ocs-dashboards/object'); - }); - - afterEach(() => { - checkErrors(); - }); - - after(() => { - cy.logout(); - }); - - it.skip('Check that noobaa dashboard is opening and links available.', () => { - cy.byLegacyTestID('system-name-mcg') - .invoke('attr', 'href') - .then((href) => { - cy.request(href).then((response) => { - expect(response.status).toEqual(200); - expect(response.body).toContain('NooBaa Management Console'); - }); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/obc-test.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/obc-test.spec.ts deleted file mode 100644 index 1144332ec63f..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/obc-test.spec.ts +++ /dev/null @@ -1,125 +0,0 @@ -import { expect } from 'chai'; -import { testName, checkErrors } from '../../../integration-tests-cypress/support/index'; -import { detailsPage } from '../../../integration-tests-cypress/views/details-page'; -import { listPage } from '../../../integration-tests-cypress/views/list-page'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { deployment } from '../mocks/deploymentData'; -import { - ACCESS_KEY, - BOUND, - MASK, - MINUTE, - NO_ANNOTATIONS, - NOOBAA_LABEL, - NS, - OBC_NAME, - OBC_STORAGE_CLASS, - OBC_STORAGE_CLASS_EXACT, - SECRET_KEY, -} from '../utils/consts'; -import { CreateOBCHandler } from '../views/obcPage'; - -xdescribe('Test Object Bucket Claim resource', () => { - let obcHandler; - let obcUrl; - - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - obcHandler = new CreateOBCHandler(OBC_NAME, testName, OBC_STORAGE_CLASS); - obcHandler.createBucketClaim(); - cy.url().then((url) => { - obcUrl = url; - }); - }); - - afterEach(() => { - checkErrors(); - }); - - after(() => { - cy.visit(obcUrl); - obcHandler.deleteBucketClaim(); - cy.logout(); - }); - - it('Test if Object Bucket Claim details page is rendered correctly', () => { - cy.log('Test if OBC is bound'); - cy.byTestID('resource-status').contains(BOUND, { timeout: MINUTE }); - - cy.log('Test if owner and creation date are shown correctly'); - cy.byTestSelector('details-item-value__Owner').scrollIntoView().contains('No owner'); - - cy.log('Test if secret data is masked'); - cy.contains('Reveal Values'); - cy.byTestID('copy-to-clipboard').as('secrets'); - cy.get('@secrets').should(($el) => { - const childCount = $el.length; - const hiddenText = 'Value hidden'; - const elText = $el.text(); - const secrets = elText.split(hiddenText).slice(1); - secrets.forEach((arrEl: string) => { - expect(arrEl).to.equal(MASK); - }); - expect(secrets.length).to.equal(childCount); - }); - - cy.log('Test if secret data can be revealed'); - obcHandler.revealHiddenValues(); - cy.byTestID('secret-data').should(($h) => { - expect($h[0].innerText).to.equal('Endpoint'); - expect($h[2].innerText).to.equal('Access Key'); - expect($h[3].innerText).to.equal('Secret Key'); - }); - cy.byTestID('copy-to-clipboard').then(($el) => { - expect($el[0].innerText).to.include(NS); - expect($el[2].innerText).to.match(new RegExp(ACCESS_KEY)); - expect($el[3].innerText).to.match(new RegExp(SECRET_KEY)); - }); - - cy.log('Test if secret data can be hidden again'); - obcHandler.hideValues(); - cy.byTestID('copy-to-clipboard').as('secrets'); - cy.get('@secrets').should(($el) => { - const childCount = $el.length; - const hiddenText = 'Value hidden'; - const elText = $el.text(); - const secrets = elText.split(hiddenText).slice(1); - secrets.forEach((arrEl) => { - expect(arrEl).to.equal(MASK); - }); - expect(secrets.length).to.equal(childCount); - }); - - cy.log('Test if labels and annotations are shown correctly'); - cy.byTestID('label-list').contains(NOOBAA_LABEL); - cy.byTestSelector('details-item-value__Annotations').contains(NO_ANNOTATIONS); - - cy.log('Test if namespace and secret are shown correctly'); - obcHandler.assertNamespaceExists(); - cy.byLegacyTestID(OBC_NAME).contains(OBC_NAME); - - cy.log('Test if status and storage class are shown correctly'); - cy.byTestID('status-text').contains(BOUND); - cy.byLegacyTestID('openshift-storage.noobaa.io').contains(OBC_STORAGE_CLASS_EXACT); - - cy.log('Test if Object Bucket is created'); - cy.byTestID('ob-link').click(); - detailsPage.isLoaded(); - cy.byLegacyTestID('resource-title').should('be.visible'); - cy.byTestID('resource-status').contains(BOUND); - }); - - it('Test attachment to a Deployment', () => { - cy.exec(`echo '${JSON.stringify(deployment)}' | kubectl create -n ${testName} -f -`); - cy.clickNavLink(['Storage', 'Object Bucket Claims']); - listPage.rows.shouldBeLoaded(); - listPage.rows.clickKebabAction(OBC_NAME, 'Attach to Deployment'); - cy.byTestID('dropdown-selectbox').click(); - cy.contains(deployment.metadata.name).click(); - modal.submit(); - obcHandler.deploymentReady(deployment.metadata.name); - cy.exec(`echo '${JSON.stringify(deployment)}' | kubectl delete -n ${testName} -f -`); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/object-service-dashboards.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/object-service-dashboards.spec.ts deleted file mode 100644 index f5e73c4112bc..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/object-service-dashboards.spec.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { testBucket } from '../utils/consts'; - -xdescribe('Tests Buckets, Status, Object Storage Efficiency, and Resource Providers Cards', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - }); - - beforeEach(() => { - cy.visit('/ocs-dashboards'); - cy.byLegacyTestID('horizontal-link-Object').click(); - }); - - it('Tests Buckets Cards', () => { - // TODO add test for "atleast one Noobaa bucket is present" (using Prometheus APIs) - - cy.log('Create an Object Bucket Claim and test equality'); - cy.exec(`kubectl get ObjectBucketClaims -A | wc -l`).then(({ stdout }) => { - // "-1" excludes the first heading row from the initial OBC count. - let initCount = parseInt(stdout, 10); - initCount = initCount ? initCount - 1 : initCount; - cy.exec(`echo '${JSON.stringify(testBucket)}' | kubectl create -f -`); - const newCount = initCount + 1; - cy.byTestID('resource-inventory-item-obc').contains( - `${newCount} Object Bucket Claim${newCount > 1 ? 's' : ''}`, - ); - cy.exec(`echo '${JSON.stringify(testBucket)}' | kubectl delete -f -`); - }); - }); - - it('Test Status Cards', () => { - cy.log('Check if Multi Cloud Gateway is in a healthy state'); - cy.byTestID('Object Service-health-item-icon').within(() => { - cy.byTestID('success-icon'); - }); - - cy.log('Check if Data Resiliency of MCG is in healthy state'); - cy.byTestID('Data Resiliency-health-item-icon').within(() => { - cy.byTestID('success-icon'); - }); - }); - - it('Test Object Storage Efficiency Card', () => { - cy.log('Check if Efficiency Ratio is in acceptable data range'); - cy.byTestID('Compression ratio-efficiency-card-status') - .invoke('text') - .should('not.eq', '') - .then((text) => { - const [ratioA, ratioB] = text.split(':'); - const [numA, numB] = [Number(ratioA), Number(ratioB)]; - if (Number.isNaN(numA) || Number.isNaN(numB)) { - expect(text).toEqual('Not available'); - } else { - expect(numA).toBeGreaterThan(0); - expect(numB).toEqual(1); - } - }); - - cy.log('Check for savings value to be in acceptable data range'); - cy.byTestID('Savings-efficiency-card-status') - .invoke('text') - .then((text) => { - const [savDigits] = text.split(' '); - const numSav = Number(savDigits); - if (Number.isNaN(numSav)) { - expect(text.trim()).toEqual('Not available'); - } else { - expect(numSav).toBeGreaterThanOrEqual(0); - } - }); - }); - - it('Test Resource Providers card', () => { - cy.log('Check if resource provider has at least 1 provider'); - cy.byTestID('nb-resource-providers-card') - .invoke('text') - .then((text) => { - expect(text).toBeDefined(); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/ocs-presistent-dashboard.spec.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/ocs-presistent-dashboard.spec.ts deleted file mode 100644 index 2ce6d2e59c2e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/tests/ocs-presistent-dashboard.spec.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { checkErrors } from '../../../integration-tests-cypress/support'; -import { getPVCJSON } from '../helpers/pvc'; - -xdescribe('Check OCS Dashboards', () => { - before(() => { - cy.login(); - cy.visit('/'); - cy.install(); - cy.visit('/ocs-dashboards/block-file'); - }); - - afterEach(() => { - checkErrors(); - }); - - after(() => { - cy.logout(); - }); - - it('Check Status Card is in Healthy', () => { - cy.log('Check if OCS Cluster is Healthy'); - cy.byTestID('success-icon').first().should('be.visible'); - cy.log('Check if Data Resiliency is Healthy'); - cy.byTestID('success-icon').last().should('be.visible'); - }); - - it('Check Details card is correct', () => { - cy.byTestID('ocs-link') - .contains('OpenShift Container Storage') - .scrollIntoView() - .should('be.visible'); - cy.contains('ocs-storagecluster').scrollIntoView().should('be.visible'); - }); - - it('Check Inventory card is correct', () => { - cy.log('Check the total number of OCS nodes'); - cy.get('.skeleton-activity').should('not.exist'); - cy.byTestID('inventory-nodes') - .invoke('text') - .then((text) => { - cy.exec( - `oc get nodes -l cluster.ocs.openshift.io/openshift-storage -o json | jq '.items | length'`, - ).then(({ stdout }) => { - expect(text).toEqual(`${stdout.trim()} Nodes`); - }); - }); - - cy.log('Check that number of PVCs and PVs is updated after sucessful PVC creation'); - cy.byTestID('inventory-pvc') - .invoke('text') - .then((pvcText) => { - const [numberPVC] = pvcText.split(' '); - const initialPVC = Number(numberPVC); - cy.exec( - ` echo '${JSON.stringify( - getPVCJSON('dummy-pvc', 'openshift-storage', 'ocs-storagecluster-ceph-rbd', '5Gi'), - )}' | oc create -f -`, - ).then(() => { - cy.byTestID('inventory-pvc').contains( - `${(initialPVC + 1).toString()} PersistentVolumeClaims`, - ); - cy.byTestID('inventory-pv').contains(`${(initialPVC + 1).toString()} PersistentVolumes`); - }); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/utils/consts.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/utils/consts.ts deleted file mode 100644 index fd68b7b89345..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/utils/consts.ts +++ /dev/null @@ -1,32 +0,0 @@ -// OCS namespace -export const NS = 'openshift-storage'; -export const ACCESS_KEY = '[a-zA-Z0-9]{20}'; -export const ATTACH_TO_DEPLOYMENT = 'Attach to Deployment'; -export const BOUND = 'Bound'; -export const DEPLOYMENT_REPLICAS_STATUS = 'MinimumReplicasAvailable'; -export const MASK = '•••••'; -export const MINUTE = 60 * 1000; -export const NOOBAA_LABEL = 'app=noobaa'; -export const NO_ANNOTATIONS = '0 annotations'; -export const OBC_NAME = 'test-obc'; -export const OBC_RESOURCE_PATH = 'objectbucket.io~v1alpha1~ObjectBucketClaim'; -export const OBC_STORAGE_CLASS = 'openshift-storage\\.noobaa\\.io'; -export const OBC_STORAGE_CLASS_EXACT = 'openshift-storage.noobaa.io'; -export const SECOND = 1000; -export const SECRET_KEY = '[a-zA-Z0-9/+]{40}'; -export const testName = `test-${Math.random() - .toString(36) - .replace(/[^a-z]+/g, '') - .substr(0, 5)}`; -export const testBucket = { - apiVersion: 'objectbucket.io/v1alpha1', - kind: 'ObjectBucketClaim', - metadata: { - namespace: 'openshift-storage', - name: testName, - }, - spec: { - storageClassName: 'openshift-storage.noobaa.io', - generateBucketName: 'test-bucket', - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/bc.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/bc.ts deleted file mode 100644 index c213e2ca19e7..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/bc.ts +++ /dev/null @@ -1,214 +0,0 @@ -import { bucketStore, namespaceStore } from '../mocks/bucket-class'; -import { commonFlows } from './common'; -import { StoreType } from './store'; - -export const bcName = 'test-bucketclass'; -const bcDescription = - 'test-bucketClass is a bucket class being used for testing purposes. Please do not use it for real storage purposes in case the test fails and the class is not deleted'; - -export enum Tier { - SPREAD = 'SPREAD', - MIRROR = 'MIRROR', -} -const TierCountMap = Object.freeze({ - [Tier.SPREAD]: 1, - [Tier.MIRROR]: 2, -}); - -export enum BucketClassType { - STANDARD = 'STANDARD', - NAMESPACE = 'NAMESPACE', -} - -export enum NamespacePolicyType { - SINGLE = 'Single', - MULTI = 'Multi', - CACHE = 'Cache', -} - -abstract class BucketClassConfig { - public abstract setup: () => void; - - public abstract cleanup: () => void; - - constructor(public resources: string[], public type: BucketClassType) {} -} - -export class StandardBucketClassConfig extends BucketClassConfig { - tiers: Tier[]; - - private createPVCBackingStore = (storeName: string) => { - cy.log(`Creating a Backing Store resource named ${storeName}`); - cy.exec( - `echo '${JSON.stringify(bucketStore(storeName))}' | kubectl create -n openshift-storage -f -`, - ); - }; - - setup = () => this.resources.forEach(this.createPVCBackingStore); - - cleanup = () => { - cy.log('Deleting backing stores'); - cy.exec(`oc delete backingstore ${this.resources.join(' ')} -n openshift-storage`); - }; -} - -export class NamespaceBucketClassConfig extends BucketClassConfig { - namespacePolicyType: NamespacePolicyType; - - readonly testBackingStore: string = 'backingstore-test'; - - createAWSStore = (name: string, type: StoreType) => { - cy.log( - `Creating a ${ - type === StoreType.NamespaceStore ? 'Namespace' : 'Backing' - } Store resource named ${name}`, - ); - - cy.exec( - `echo '${JSON.stringify( - namespaceStore(name, type), - )}' | kubectl create -n openshift-storage -f -`, - ); - }; - - setup = () => { - this.resources.forEach((testResource) => - this.createAWSStore(testResource, StoreType.NamespaceStore), - ); - this.createAWSStore(this.testBackingStore, StoreType.BackingStore); - }; - - cleanup = () => { - cy.log('Deleting namespace stores and backing store'); - cy.exec(`oc delete namespacestores ${this.resources.join(' ')} -n openshift-storage`); - cy.exec(`oc delete backingstore ${this.testBackingStore} -n openshift-storage`); - }; -} - -const tierLevelToButton = (level: number, tier: Tier) => - level === 1 - ? tier === Tier.SPREAD - ? cy.byTestID('placement-policy-spread1') - : cy.byTestID('placement-policy-mirror1') - : tier === Tier.SPREAD - ? cy.byTestID('placement-policy-spread2') - : cy.byTestID('placement-policy-mirror2'); - -const setGeneralData = (type: BucketClassType) => { - // be.visible check added to wait for the page to load - cy.byTestID(`${type.toLowerCase()}-radio`).click(); - cy.byTestID('bucket-class-name').scrollIntoView().should('be.visible'); - cy.byTestID('bucket-class-name').type(bcName); - cy.byTestID('bucket-class-description').type(bcDescription); -}; - -const setPlacementPolicy = (tiers: Tier[]) => { - tierLevelToButton(1, tiers[0]).click(); - if (tiers.length > 1) { - cy.byTestID('add-tier-btn').click(); - tierLevelToButton(2, tiers[1]).click(); - } -}; - -const selectStoreFromTable = (storeNo: number, name: string) => { - cy.byLegacyTestID(name) - .eq(storeNo - 1) - .parent() - .parent() - .parent() - .find('input[type="checkbox"]') - .first() - .click(); -}; - -const setBackingStores = (tiers: Tier[]) => { - const tests = ['test-store4', 'test-store3', 'test-store2', 'test-store1']; - if (tiers.length > 1) { - cy.byLegacyTestID('item-filter').should(($items) => { - expect($items).toHaveLength(2); - }); - } - selectStoreFromTable(1, tests.pop()); - if (TierCountMap[tiers[0]] > 1) { - selectStoreFromTable(1, tests.pop()); - } - // Select tier 2 Backing Stores - if (tiers.length > 1) { - selectStoreFromTable(2, tests.pop()); - if (TierCountMap[tiers[1]] > 1) { - selectStoreFromTable(2, tests.pop()); - } - } -}; - -const selectItemFromStoreDropdown = (name: string, type: StoreType) => { - cy.byTestID(`${type === StoreType.NamespaceStore ? 'nns' : 'nbs'}-dropdown-toggle`).click(); - cy.byTestID(`${name}-dropdown-item`).click(); -}; - -const configureNamespaceBucketClass = ( - namespacePolicyType: NamespacePolicyType, - config: NamespaceBucketClassConfig, -) => { - switch (namespacePolicyType) { - case NamespacePolicyType.SINGLE: - selectItemFromStoreDropdown(config.resources[0], StoreType.NamespaceStore); - break; - case NamespacePolicyType.MULTI: - selectStoreFromTable(1, config.resources[0]); - selectStoreFromTable(1, config.resources[1]); - selectItemFromStoreDropdown(config.resources[0], StoreType.NamespaceStore); - break; - case NamespacePolicyType.CACHE: - selectItemFromStoreDropdown(config.resources[0], StoreType.NamespaceStore); - selectItemFromStoreDropdown(config.testBackingStore, StoreType.BackingStore); - cy.byTestID('time-to-live-input').type('2'); - break; - default: - } -}; - -export const createBucketClass = (config: BucketClassConfig) => { - cy.log('Select bucket class type'); - setGeneralData(config.type); - cy.contains('Next').click(); - if (config.type === BucketClassType.STANDARD) { - const { tiers } = config as StandardBucketClassConfig; - cy.log('Select Placement policy'); - setPlacementPolicy(tiers); - cy.contains('Next').click(); - cy.log('Select Backing Store'); - setBackingStores(tiers); - } else { - const { namespacePolicyType } = config as NamespaceBucketClassConfig; - cy.log('Select Namespace policy'); - cy.byTestID(`${namespacePolicyType.toLowerCase()}-radio`).click(); - cy.contains('Next').click(); - cy.log('Select Namespace Store'); - configureNamespaceBucketClass(namespacePolicyType, config as NamespaceBucketClassConfig); - } - cy.contains('Next').click(); - cy.log('Create bucket class'); - cy.contains('button', 'Create BucketClass').click(); -}; - -export const verifyBucketClass = () => { - cy.log('Verifying bucket class'); - cy.byTestSelector('details-item-value__Name').contains(bcName); - cy.byLegacyTestID('resource-title').contains(bcName); -}; - -export const deleteBucketClass = () => { - cy.log('Deleting bucket class'); - cy.byLegacyTestID('actions-menu-button').click(); - cy.byTestActionID('Delete Bucket Class').click(); - cy.byTestID('confirm-action').click(); - cy.byTestID('item-create').should('be.visible'); -}; - -export const visitBucketClassPage = () => { - cy.visit('/'); - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Bucket Class').click(); - cy.byTestID('item-create').click(); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/block-pool.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/block-pool.ts deleted file mode 100644 index b8b8e732cb53..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/block-pool.ts +++ /dev/null @@ -1,96 +0,0 @@ -import { POOL_PROGRESS } from '../../src/constants/storage-pool-const'; -import { NS } from '../utils/consts'; -import { commonFlows } from './common'; - -// Pool var -export const poolName: string = 'example.pool'; -export const replicaCount: string = '2'; -export const volumeType: string = 'ssd'; -export const scName: string = 'testing-sc'; - -export const poolMessage: { - [key in POOL_PROGRESS]?: string; -} = { - [POOL_PROGRESS.FAILED]: `Pool "${poolName}" already exists`, - [POOL_PROGRESS.CREATED]: `Pool ${poolName} was successfully created`, - [POOL_PROGRESS.NOTALLOWED]: - "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.", - [POOL_PROGRESS.BOUNDED]: `${poolName} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):`, -}; - -export const navigateToBlockPool = () => { - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Block Pools').click(); -}; - -export const populateBlockPoolForm = () => { - cy.byTestID('new-pool-name-textbox').type(poolName); - cy.byTestID('replica-dropdown').click(); - cy.byLegacyTestID('replica-dropdown-item').contains(`${replicaCount}-way Replication`).click(); - cy.byTestID('volume-type-dropdown').click(); - cy.byTestID('volume-type-dropdown-item').contains(volumeType.toLocaleUpperCase()).click(); - cy.byTestID('compression-checkbox').check(); -}; - -export enum Actions { - created = 'created', - failed = 'failed', - notAllowed = 'notAllowed', - bound = 'bounded', -} - -export const verifyFooterActions = (action: string) => { - switch (action) { - case Actions.failed: - cy.log('Check try-again-action and finish-action are enabled'); - cy.byLegacyTestID('modal-try-again-action').should('be.visible'); - cy.byLegacyTestID('modal-finish-action').click(); - break; - case Actions.created: - cy.log('Check finish-action is enabled'); - cy.byLegacyTestID('modal-finish-action').click(); - break; - case Actions.notAllowed: - cy.log('Check close-action is enabled'); - cy.byLegacyTestID('modal-close-action').click(); - break; - case Actions.bound: - cy.log('Check go-to-pvc-list-action and close-action are enabled'); - cy.byLegacyTestID('modal-go-to-pvc-list-action').should('be.visible'); - cy.byLegacyTestID('modal-close-action').click(); - break; - default: - cy.log(`Invoke ${action} action`); - cy.byLegacyTestID('confirm-action').scrollIntoView().click(); - } -}; - -export const verifyBlockPoolJSON = ( - compressionEnabled: boolean = true, - replica: string = replicaCount, -) => - cy.exec(`oc get cephBlockPool ${poolName} -n ${NS} -o json`).then((res) => { - const blockPool = JSON.parse(res.stdout); - expect(blockPool.spec?.replicated?.size).toEqual(Number(replica)); - expect(blockPool.spec?.compressionMode).toEqual(compressionEnabled ? 'aggressive' : 'none'); - expect(blockPool.spec?.parameters?.compression_mode).toEqual( - compressionEnabled ? 'aggressive' : 'none', - ); - expect(blockPool.spec?.deviceClass).toEqual(volumeType); - }); - -export const createBlockPool = () => { - navigateToBlockPool(); - cy.byTestID('item-create').click(); - populateBlockPoolForm(); - verifyFooterActions('create'); - cy.log('Verify a new block pool creation'); - cy.byTestID('status-text').contains('Ready'); - verifyBlockPoolJSON(); - cy.byLegacyTestID('breadcrumb-link-1').click(); -}; - -export const deleteBlockPoolFromCli = () => { - cy.log('Deleting a pool'); - cy.exec(`oc delete CephBlockPool ${poolName} -n ${NS}`); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/common.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/common.ts deleted file mode 100644 index b4e593f6d180..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/common.ts +++ /dev/null @@ -1,29 +0,0 @@ -export const commonFlows = { - navigateToOCS: (isOperatorsOpen = false) => { - const path = isOperatorsOpen ? ['Installed Operators'] : ['Operators', 'Installed Operators']; - cy.clickNavLink(path); - cy.byLegacyTestID('item-filter').type('ocs'); - cy.byTestOperatorRow('OpenShift Container Storage').click(); - }, - checkAll: () => cy.get('input[name=check-all]'), -}; - -export const commandPoll = ( - cmd: string, - expected: string, - failOnNonZeroExit: boolean = true, - retry: number = 300, -) => { - cy.exec(cmd, { failOnNonZeroExit }).then((res) => { - if (res.stdout === expected) { - assert(true); - return; - } - if (retry <= 0) { - assert(false); - return; - } - - commandPoll(cmd, expected, failOnNonZeroExit, retry - 1); - }); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/install.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/install.ts deleted file mode 100644 index bf2658eaa119..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/install.ts +++ /dev/null @@ -1,117 +0,0 @@ -import { wizard } from '@console/cypress-integration-tests/views/wizard'; -import { ServiceAccountKind } from '@console/internal/module/k8s'; -import '../../../integration-tests-cypress/support/index.ts'; -import { CATALOG, PULL_SECRET_PATH, ocsCatalogSource } from '../mocks/install'; -import { NS } from '../utils/consts'; -import { commonFlows } from './common'; - -export const createImagePullSecret = (namespace: string) => { - cy.log(`Create ${CATALOG.SECRET} in ${namespace}`); - cy.exec( - `oc create secret generic ocs-secret --from-file=.dockerconfigjson=${PULL_SECRET_PATH} --type=kubernetes.io/dockerconfigjson -n ${namespace}`, - ); -}; - -export const createCustomCatalogSource = () => { - cy.log('Create custom catalog source with latest stable image of OCS'); - cy.exec(`echo '${JSON.stringify(ocsCatalogSource)}' | kubectl apply -f -`); -}; - -export const subscribeToOperator = () => { - cy.log('Search in Operator Hub'); - cy.clickNavLink(['Operators', 'OperatorHub']); - cy.byTestID('search-operatorhub').type('Openshift Container Storage'); - cy.byTestID('ocs-operator-ocs-catalogsource-openshift-marketplace', { timeout: 120000 }).click(); - cy.log('Subscribe to OCS Operator'); - cy.byLegacyTestID('operator-install-btn').click({ force: true }); - cy.byTestID('Operator recommended Namespace:-radio-input').should('be.checked'); - cy.byTestID('install-operator').click(); -}; - -export const linkPullSecretToPods = () => { - createImagePullSecret(NS); - cy.log(`Add ${CATALOG.SECRET} to all service accounts in ${NS} namespace`); - cy.exec(`oc get serviceaccounts -n ${NS} -o json`).then((res) => { - const { items: saList } = JSON.parse(res.stdout); - saList.forEach((sa: ServiceAccountKind) => { - cy.log(`Linking ${CATALOG.SECRET} to ${sa.metadata.name}`); - cy.exec(`oc secrets link ${sa.metadata.name} ${CATALOG.SECRET} -n ${NS} --for=pull`); - }); - }); - cy.log(`Rolling out secret update to pods`); - cy.exec(`oc delete pods --all -n ${NS}`); -}; - -export const verifyMonitoring = () => { - cy.log(`Verify monitoring enablement in ${NS} namespace`); - cy.exec(`oc get project ${NS} -o json`).then((res) => { - const obj = JSON.parse(res.stdout); - expect(obj.metadata.labels?.['openshift.io/cluster-monitoring']).toEqual('true'); - }); -}; - -export const verifyNodeLabels = () => { - cy.log('Verify all worker nodes are labelled'); - cy.exec('oc get nodes -o json').then((res) => { - const { items } = JSON.parse(res.stdout); - items - .map((item) => item.metadata.labels) - .filter((item) => item.hasOwnProperty('node-role.kubernetes.io/worker')) - .forEach((item) => - expect(item.hasOwnProperty('cluster.ocs.openshift.io/openshift-storage')).toBeTruthy(), - ); - }); -}; - -export const verifyClusterReadiness = () => { - // Wait for the storage cluster to reach Ready - // Storage Cluster CR flickers so wait for 10 seconds - // Disablng until ocs-operator fixes above issue - // eslint-disable-next-line cypress/no-unnecessary-waiting - cy.wait(10000); - cy.log('Verify Storage cluster is `Ready`'); - cy.byTestID('resource-status').contains('Ready', { timeout: 900000 }); - cy.byLegacyTestID('horizontal-link-Resources').click(); - cy.log('Verify ceph cluster is `Ready`'); - cy.byTestOperandLink('ocs-storagecluster-cephcluster').click(); - cy.byTestID('resource-status').contains('Ready', { timeout: 900000 }); - cy.go('back'); - cy.log('Verify noobaa system is `Ready`'); - cy.byTestOperandLink('noobaa').click(); - cy.byTestID('resource-status').contains('Ready', { timeout: 900000 }); -}; - -export const createInternalStorageCluster = (encrypted: boolean) => { - const mode = 'Internal'; - // Make changes to this once we add annotation - cy.log(`Install OCS in ${mode} Mode`); - // Reload because StorageCluster CRD is not registered in the UI; hence getting 404 Error - cy.visit('/'); - cy.reload(true); - commonFlows.navigateToOCS(); - cy.byLegacyTestID('horizontal-link-Storage Cluster').click(); - cy.byTestID('item-create').click(); - - cy.log(`Select ${mode}`); - cy.byTestID('Internal-radio-input').should('be.checked'); - - // Step 1 - // Select all worker Nodes - commonFlows.checkAll().check(); - commonFlows.checkAll().should('be.checked'); - // Two dropdowns in the same page. - // (Todo: )make dropdown data-test-id be something that can be passed as a prop - cy.byLegacyTestID('dropdown-button').first().click(); - cy.byTestDropDownMenu('512Gi').click(); - wizard.next(); - - // Step 2 - if (encrypted) { - cy.log('Enabling Encryption'); - cy.byTestID('encryption-checkbox').click(); - } - wizard.next(); - - // Final Step - wizard.create(); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/multiple-storageclass.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/multiple-storageclass.ts deleted file mode 100644 index 617395b7b2ca..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/multiple-storageclass.ts +++ /dev/null @@ -1,67 +0,0 @@ -import { K8sResourceKind } from '@console/internal/module/k8s'; -import { OCS_INTERNAL_CR_NAME } from '../../src/constants'; -import { DeviceSet } from '../../src/types'; -import { getCurrentDeviceSetIndex } from '../../src/utils/add-capacity'; -import { NS } from '../consts'; - -export const withJSONResult = (res: Cypress.Exec, scName: string, iAndD: IndexAndDeviceSet) => { - const jsonOut: K8sResourceKind = JSON.parse(res.stdout); - iAndD.deviceSets = jsonOut.spec.storageDeviceSets; - iAndD.index = getCurrentDeviceSetIndex(iAndD.deviceSets, scName); -}; - -export const fetchStorageClusterJson = () => - cy.exec(`kubectl get --ignore-not-found storagecluster ${OCS_INTERNAL_CR_NAME} -n ${NS} -o json`); - -export const fetchWorkerNodesJson = () => - cy.exec('oc get nodes -l "node-role.kubernetes.io/worker" -o json'); - -export const addCapacity = (uid: string, scName: string) => { - cy.byLegacyTestID('kebab-button').click(); // 'data-test-id' - cy.byTestActionID('Add Capacity').click(); // 'data-test-action' - cy.byTestID('add-cap-sc-dropdown').click(); // 'data-test' - cy.byTestID('dropdown-menu-item-link').contains(scName).click(); - cy.byTestID('confirm-action').click(); -}; - -export const newStorageClassTests = ( - beforeCapacityAddition: UidAndDeviceSet, - iAndD: IndexAndDeviceSet, - portability: boolean, -) => { - const portabilityStatus = portability ? 'enabled' : 'disabled'; - cy.log('New device set is created'); - expect(iAndD.deviceSets.length).toBe(beforeCapacityAddition.deviceSets.length + 1); - - cy.log('Device count is 1 in the new device set'); - expect(iAndD.deviceSets[iAndD.index].count).toBe(1); - - cy.log(`Osd portability is ${portabilityStatus} in the new device set`); - expect(iAndD.deviceSets[iAndD.index].portable).toBe(portability); -}; - -export const existingStorageClassTests = ( - beforeCapacityAddition: UidAndDeviceSet, - iAndD: IndexAndDeviceSet, -) => { - cy.log('New device set is not created'); - expect(iAndD.deviceSets.length).toBe(beforeCapacityAddition.deviceSets.length); - - cy.log('Devices count is incremented by 1 in the corresponding device set'); - expect(iAndD.deviceSets[iAndD.index].count).toBe(beforeCapacityAddition.devicesCount + 1); - - cy.log('Osd portability is not modified in the corresponding device set'); - expect(iAndD.deviceSets[iAndD.index].portable).toBe(beforeCapacityAddition.portability); -}; - -export interface IndexAndDeviceSet { - index: number; - deviceSets: DeviceSet[]; -} - -export interface UidAndDeviceSet { - uid: string; - deviceSets: DeviceSet[]; - portability?: boolean; - devicesCount?: number; -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/obcPage.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/obcPage.ts deleted file mode 100644 index 07e8473cdcd1..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/obcPage.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { projectNameSpace } from '../../../dev-console/integration-tests/support/pages/app'; -import { modal } from '../../../integration-tests-cypress/views/modal'; -import { DEPLOYMENT_REPLICAS_STATUS, MINUTE } from '../utils/consts'; - -export class CreateOBCHandler { - name: string; - - namespace: string; - - storageclass: string; - - constructor(name: string, namespace: string, storageclass: string) { - this.name = name; - this.namespace = namespace; - this.storageclass = storageclass; - } - - createBucketClaim() { - cy.clickNavLink(['Storage', 'Object Bucket Claims']); - projectNameSpace.selectOrCreateProject(this.namespace); - cy.clickNavLink(['Storage', 'Object Bucket Claims']); - cy.byLegacyTestID('namespace-bar-dropdown').contains('Project').click(); - cy.contains(this.namespace); - cy.byTestID('item-create').click(); - cy.byTestID('obc-name').type(this.name); - cy.byTestID('sc-dropdown').should('be.visible').click(); - cy.contains('openshift-storage.noobaa.io').click(); - modal.submit(); - cy.byLegacyTestID('resource-title').contains(this.name, { timeout: MINUTE }); - } - - revealHiddenValues() { - cy.contains('Reveal Values').click(); - } - - hideValues() { - cy.contains('Hide Values').click(); - } - - assertNamespaceExists() { - cy.byTestSelector('details-item-value__Namespace').contains(this.namespace); - } - - deploymentReady(deploymentName: string) { - cy.byLegacyTestID('horizontal-link-Details').click(); - cy.contains(DEPLOYMENT_REPLICAS_STATUS, { timeout: MINUTE }); - cy.byTestSelector('details-item-value__Name').should('be.visible').contains(deploymentName); - } - - deleteBucketClaim() { - cy.byTestID('loading-indicator').should('not.exist'); - cy.byLegacyTestID('details-actions') - .byLegacyTestID('actions-menu-button') - .should('be.visible') - .click(); - cy.byLegacyTestID('details-actions').byLegacyTestID('action-items').should('be.visible'); - cy.byTestActionID('Delete Object Bucket Claim') - .should('be.visible') - .should('be.enabled') - .click(); - cy.byTestID('confirm-action').should('be.visible').click(); - } -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/pvc.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/pvc.ts deleted file mode 100644 index bf28b981b7f1..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/pvc.ts +++ /dev/null @@ -1,25 +0,0 @@ -export const pvc = { - createPVC: ( - name: string, - size: string, - storageClass: string, - mode: 'Block' | 'Filesystem' = 'Filesystem', - ) => { - cy.byTestID('item-create').click(); - cy.byTestID('storageclass-dropdown').click(); - cy.get(`#${storageClass}-link`).click(); - cy.byTestID('pvc-name').type(name); - cy.byTestID('pvc-size').type(size); - if (mode === 'Block') { - cy.byTestID('Block-radio-input').click(); - } - cy.byTestID('create-pvc').click(); - cy.contains('Bound'); - }, - expandPVC: (expansionSize) => { - cy.byLegacyTestID('actions-menu-button').click(); - cy.byTestActionID('Expand PVC').click(); - cy.byTestID('pvc-expand-size-input').clear().type(expansionSize); - cy.byTestID('confirm-action').click(); - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/storage-class.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/storage-class.ts deleted file mode 100644 index 3c134e63d2db..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/storage-class.ts +++ /dev/null @@ -1,47 +0,0 @@ -const configureKms = () => { - cy.byTestID('storage-class-encryption').check(); - cy.byTestID('kms-service-name-text').type('vault'); - cy.exec('echo http://$(oc get route vault --no-headers -o custom-columns=HOST:.spec.host)').then( - (hostname) => { - cy.byTestID('kms-address-text').type(hostname.stdout); - }, - ); - cy.byTestID('kms-address-port-text').type('80'); - cy.byTestID('kms-advanced-settings-link').click(); - cy.byTestID('kms-service-backend-path').type('secret'); - - // save - cy.byTestID('confirm-action').click(); - cy.byTestID('save-action').click(); - cy.byTestID('kms-connection-dropdown').should('contain', 'vault'); -}; - -export const createStorageClass = (scName: string, poolName?: string, encrypted?: boolean) => { - cy.clickNavLink(['Storage', 'StorageClasses']); - cy.byTestID('item-create').click(); - cy.byLegacyTestID('storage-class-form').get('input#storage-class-name').type(scName); - - cy.log('Selecting Ceph RBD provisioner'); - cy.byTestID('storage-class-provisioner-dropdown').click(); - cy.byLegacyTestID('dropdown-text-filter').type('openshift-storage.rbd.csi.ceph.com'); - cy.byTestID('dropdown-menu-item-link').should('contain', 'openshift-storage.rbd.csi.ceph.com'); - cy.byTestID('dropdown-menu-item-link').click(); - - cy.log('Enable encryption'); - encrypted && configureKms(); - - cy.log(`Selecting block pool ${poolName}`); - cy.byTestID('pool-dropdown-toggle').click(); - cy.byTestID(poolName || 'ocs-storagecluster-cephblockpool').click(); - - cy.log('Creating new StorageClass'); - cy.byTestID('storage-class-volume-binding-mode').click(); - cy.byTestDropDownMenu('Immediate').click(); - cy.byLegacyTestID('storage-class-form').get('button#save-changes').click(); - cy.byLegacyTestID('resource-title').contains(scName); -}; - -export const deleteStorageClassFromCli = (scName: string) => { - cy.log('Deleting a storage class'); - cy.exec(`oc delete StorageClass ${scName}`); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/store.ts b/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/store.ts deleted file mode 100644 index 97ab97bbc342..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests-cypress/views/store.ts +++ /dev/null @@ -1,75 +0,0 @@ -export enum Providers { - AWS = 'AWS S3', - AZURE = 'Azure Blob', - S3 = 'S3 Compatible', - PVC = 'PVC', -} - -// Used to identify data-test ids, values are based on data-test fields -export enum StoreType { - BackingStore = 'backingstore', - NamespaceStore = 'namespacestore', -} - -export const testName = 'test-bucket'; - -const inputCustomSecrets = (storeType: StoreType) => { - cy.log('Set custom secrets'); - cy.byTestID('switch-to-creds').click(); - cy.byTestID(`${storeType}-access-key`).type('my_dummy_test_key'); - cy.byTestID(`${storeType}-secret-key`).type('my_dummy_sec_key'); - cy.byTestID(`${storeType}-target-bucket`).type('my_dummy_target'); -}; - -const setupAWS = (storeType: StoreType) => { - cy.log('Setting up AWS provider'); - cy.byTestDropDownMenu(Providers.AWS).click(); - cy.byTestID(`${storeType}-aws-region-dropdown`).click(); - cy.byTestDropDownMenu('us-east-1').click(); - inputCustomSecrets(storeType); -}; - -const setupAzureBlob = (storeType: StoreType) => { - cy.log('Setting up Azure provider'); - cy.byTestDropDownMenu(Providers.AZURE).click(); - inputCustomSecrets(storeType); -}; - -const setupS3Endpoint = (storeType: StoreType) => { - cy.log('Setting up s3 endpoint provider'); - const ENDPOINT = 'http://test-endpoint.com'; - cy.byTestDropDownMenu('S3 Compatible').click(); - cy.byTestID(`${storeType}-s3-endpoint`).type(ENDPOINT); - inputCustomSecrets(storeType); -}; - -const setupPVC = () => { - cy.log('Setting up PVC provider'); - cy.byTestDropDownMenu('PVC').click(); -}; -const setupProvider = (provider: Providers, storeType: StoreType) => { - cy.byTestID(`${storeType}-provider`).click(); - switch (provider) { - case Providers.AWS: - setupAWS(storeType); - break; - case Providers.AZURE: - setupAzureBlob(storeType); - break; - case Providers.S3: - setupS3Endpoint(storeType); - break; - case Providers.PVC: - setupPVC(); - break; - default: - break; - } -}; - -export const createStore = (provider: Providers, storeType: StoreType = StoreType.BackingStore) => { - cy.log(`Creating ${storeType}`); - cy.byTestID(`${storeType}-name`).type(testName); - setupProvider(provider, storeType); - cy.byTestID(`${storeType}-create-button`).click(); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/OWNERS b/frontend/packages/ceph-storage-plugin/integration-tests/OWNERS deleted file mode 100644 index ed84004b03b1..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/OWNERS +++ /dev/null @@ -1,17 +0,0 @@ -reviewers: - - a2batic - - afreen23 - - bipuladh - - cloudbehl - - gnehapk - - rawagner - - shyRozen -approvers: - - afreen23 - - bipuladh - - cloudbehl - - gnehapk - - rawagner - - shyRozen -labels: - - component/ceph-integration-tests diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/expand-test-mocks.ts b/frontend/packages/ceph-storage-plugin/integration-tests/mocks/expand-test-mocks.ts deleted file mode 100644 index c361274013c5..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/expand-test-mocks.ts +++ /dev/null @@ -1,120 +0,0 @@ -import { testName } from '@console/internal-integration-tests/protractor.conf'; -import { SIZE_UNITS, STORAGE_CLASS_PATTERNS } from '../utils/consts'; - -export const testDeployment = { - apiVersion: 'apps/v1', - kind: 'Deployment', - metadata: { - name: 'example', - namespace: testName, - }, - spec: { - selector: { - matchLabels: { - app: 'hello-openshift', - }, - }, - replicas: 1, - template: { - metadata: { - labels: { - app: 'hello-openshift', - }, - }, - spec: { - volumes: [ - { - name: `${testName}-pvc`, - persistentVolumeClaim: { - claimName: `${testName}-pvc`, - }, - }, - ], - containers: [ - { - name: 'hello-openshift', - image: 'openshift/hello-openshift', - ports: [ - { - containerPort: 8080, - }, - ], - volumeMounts: [ - { - name: `${testName}-pvc`, - mountPath: '/data', - }, - ], - }, - ], - }, - }, - }, -}; - -export const testDeploymentRbd = { - apiVersion: 'apps/v1', - kind: 'Deployment', - metadata: { - name: 'example2', - namespace: testName, - }, - spec: { - selector: { - matchLabels: { - app: 'hello-openshift', - }, - }, - replicas: 1, - template: { - metadata: { - labels: { - app: 'hello-openshift', - }, - }, - spec: { - volumes: [ - { - name: `${testName}-rbdpvc`, - persistentVolumeClaim: { - claimName: `${testName}-rbdpvc`, - }, - }, - ], - containers: [ - { - name: 'my-container', - image: 'nginx', - securityContext: { - capabilities: { - add: ['SYS_ADMIN'], - }, - }, - volumeDevices: [ - { - name: `${testName}-rbdpvc`, - devicePath: '/dev/rbdblock', - }, - ], - }, - ], - }, - }, - }, -}; - -export const testPVC = { - name: `${testName}-pvc`, - namespace: testName, - size: '5', - sizeUnits: SIZE_UNITS.MI, - storageClass: STORAGE_CLASS_PATTERNS.FS, -}; - -export const testRbdPVC = { - name: `${testName}-rbdpvc`, - namespace: testName, - size: '5', - sizeUnits: SIZE_UNITS.MI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/independent-external-cluster-data.ts b/frontend/packages/ceph-storage-plugin/integration-tests/mocks/independent-external-cluster-data.ts deleted file mode 100644 index ee33ea2f7ee9..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/independent-external-cluster-data.ts +++ /dev/null @@ -1,57 +0,0 @@ -export const ClusterMetadata = [ - { - kind: 'ConfigMap', - data: { maxMonId: '0', data: 'a=10.97.47.226:6789', mapping: {} }, - name: 'rook-ceph-mon-endpoints', - }, - { - kind: 'Secret', - data: { - 'mon-secret': 'mon-secret', - fsid: 'b99e887c-9f73-47e7-96cc-33c6f3f39aa9', - 'cluster-name': 'openshift-storage', - 'admin-secret': 'admin-secret', - }, - name: 'rook-ceph-mon', - }, - { - kind: 'Secret', - data: { - userKey: 'AQAwA85ezIXyORAA8YDlVMoyo5+tByxcCZigoQ==', - userID: 'client.healthchecker', - }, - name: 'rook-ceph-operator-creds', - }, - { - kind: 'Secret', - data: { userKey: 'AQDtAc5e/S7JJBAAhh2k1cjM5KszMObC0Wugyg==', userID: 'csi-rbd-node' }, - name: 'rook-csi-rbd-node', - }, - { - kind: 'Secret', - data: { - userKey: 'AQDtAc5ebRXHFBAAAGwy0lHtt3gYt9Q/9jhzXg==', - userID: 'csi-rbd-provisioner', - }, - name: 'rook-csi-rbd-provisioner', - }, - { - kind: 'Secret', - data: { - adminID: 'csi-cephfs-node', - adminKey: 'AQDuAc5esOp8DRAAi2cI3nGVBWH++9cOoE9b9g==', - }, - name: 'rook-csi-cephfs-node', - }, - { - kind: 'Secret', - data: { - adminID: 'csi-cephfs-provisioner', - adminKey: 'AQDtAc5eHe3DNRAAIe6HHnQRPSABuhj3GzEeoA==', - }, - name: 'rook-csi-cephfs-provisioner', - }, - { kind: 'StorageClass', data: { pool: 'myfs-data0' }, name: 'ceph-rbd' }, - { kind: 'StorageClass', data: { pool: 'myfs-data0', fsName: 'myfs' }, name: 'cephfs' }, - { kind: 'StorageClass', data: { endpoint: '10.10.212.122:9000' }, name: 'ceph-rgw' }, -]; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-class.ts b/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-class.ts deleted file mode 100644 index ae614959cc7b..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-class.ts +++ /dev/null @@ -1,16 +0,0 @@ -export const testNoProvisionerSC = { - apiVersion: 'storage.k8s.io/v1', - kind: 'StorageClass', - metadata: { name: 'test-no-prov-sc' }, - provisioner: 'kubernetes.io/no-provisioner', - reclaimPolicy: 'Delete', -}; - -export const testEbsSC = { - apiVersion: 'storage.k8s.io/v1', - kind: 'StorageClass', - metadata: { name: 'test-ebs-sc' }, - provisioner: 'kubernetes.io/aws-ebs', - parameters: { type: 'io1' }, - reclaimPolicy: 'Retain', -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-pool.ts b/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-pool.ts deleted file mode 100644 index 4fbc8d46945b..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/storage-pool.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { StoragePoolKind } from '../../src/types'; -import { NS } from '../utils/consts'; - -export const poolData: StoragePoolKind = { - apiVersion: 'ceph.rook.io/v1', - kind: 'CephBlockPool', - metadata: { - name: 'foo', - namespace: NS, - }, - spec: { - compressionMode: '', - replicated: { - size: 2, - }, - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/testFile.json b/frontend/packages/ceph-storage-plugin/integration-tests/mocks/testFile.json deleted file mode 100644 index 44f559d57079..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/mocks/testFile.json +++ /dev/null @@ -1,57 +0,0 @@ -[ - { - "kind": "ConfigMap", - "data": { "maxMonId": "0", "data": "a=10.97.47.226:6789", "mapping": {} }, - "name": "rook-ceph-mon-endpoints" - }, - { - "kind": "Secret", - "data": { - "mon-secret": "mon-secret", - "fsid": "b99e887c-9f73-47e7-96cc-33c6f3f39aa9", - "cluster-name": "openshift-storage", - "admin-secret": "admin-secret" - }, - "name": "rook-ceph-mon" - }, - { - "kind": "Secret", - "data": { - "userKey": "AQAwA85ezIXyORAA8YDlVMoyo5+tByxcCZigoQ==", - "userID": "client.healthchecker" - }, - "name": "rook-ceph-operator-creds" - }, - { - "kind": "Secret", - "data": { "userKey": "AQDtAc5e/S7JJBAAhh2k1cjM5KszMObC0Wugyg==", "userID": "csi-rbd-node" }, - "name": "rook-csi-rbd-node" - }, - { - "kind": "Secret", - "data": { - "userKey": "AQDtAc5ebRXHFBAAAGwy0lHtt3gYt9Q/9jhzXg==", - "userID": "csi-rbd-provisioner" - }, - "name": "rook-csi-rbd-provisioner" - }, - { - "kind": "Secret", - "data": { - "adminID": "csi-cephfs-node", - "adminKey": "AQDuAc5esOp8DRAAi2cI3nGVBWH++9cOoE9b9g==" - }, - "name": "rook-csi-cephfs-node" - }, - { - "kind": "Secret", - "data": { - "adminID": "csi-cephfs-provisioner", - "adminKey": "AQDtAc5eHe3DNRAAIe6HHnQRPSABuhj3GzEeoA==" - }, - "name": "rook-csi-cephfs-provisioner" - }, - { "kind": "StorageClass", "data": { "pool": "myfs-data0" }, "name": "ceph-rbd" }, - { "kind": "StorageClass", "data": { "pool": "myfs-data0", "fsName": "myfs" }, "name": "cephfs" }, - { "kind": "StorageClass", "data": { "endpoint": "10.10.212.122:9000" }, "name": "ceph-rgw" } -] diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/1-install/installFlow.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/1-install/installFlow.scenario.ts deleted file mode 100644 index 1a3a8cc58f3e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/1-install/installFlow.scenario.ts +++ /dev/null @@ -1,385 +0,0 @@ -import { execSync } from 'child_process'; -import { browser, ExpectedConditions as until } from 'protractor'; -import * as _ from 'lodash'; -import { Base64 } from 'js-base64'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { DISCOVERY_CR_NAME } from '@console/local-storage-operator-plugin/src/constants'; -import { - MINUTE, - OCS_NODE_LABEL, - POD_NAME_PATTERNS, - STORAGE_CLASS_PATTERNS, - STORAGE_CLUSTER_NAME, - SECOND, - NS as OCS_NS, - LSO_INFO_MSG, - SC_STEPS_NAME, - CONFIRM_MODAL_TITLE, -} from '../../utils/consts'; -import { - InstallCluster, - filterInput, - goToStorageClasses, - TEST_PLATFORM, - MODE, - Platform, - Mode, - currentSelectors, -} from '../../views/installFlow.view'; -import { - checkIfClusterIsReady, - getDataFromRowAndCol, - getPodData, - getPodPhase, - podNameFilter, - sendKeys, - testPodIsRunning, - verifyNodeLabels, - testPodIsSucceeded, -} from '../../utils/helpers'; -import { ClusterMetadata } from '../../mocks/independent-external-cluster-data'; -import { testNoProvisionerSC } from '../../mocks/storage-class'; -import { MINIMUM_NODES } from '../../../src/constants'; -import { goToInstalledOperators } from '../../views/add-capacity.view'; - -const Installer = new InstallCluster(); - -/** - * - Tests the namespace creation (Remove this) - * - Installs OCS Operator from Operator Hub - * - Tests for various resources associated with OCS Operator to be in acceptable state - * - Creates Storage Cluster - * - Tests for resources associated with Storage Cluster to be in acceptable state - */ - -describe('Testing OCS Subscription', () => { - it( - 'tests subscription flow for OCS Operator', - async () => { - await Installer.subscribeToOperator(); - await Installer.checkOCSOperatorInstallation(); - }, - 3 * MINUTE, - ); - - it('tests for presence of 3 operator pods', async () => { - const podList = JSON.parse( - execSync('kubectl get po -n openshift-storage -o json').toString('utf-8'), - ); - const pods = podList.items; - let pod = getPodData(pods, POD_NAME_PATTERNS.OCS); - let phase = getPodPhase(pod); - expect(phase).toBe('Running'); - pod = getPodData(pods, POD_NAME_PATTERNS.ROOK); - phase = getPodPhase(pod); - expect(phase).toBe('Running'); - pod = getPodData(pods, POD_NAME_PATTERNS.NOOBA_OPERATOR); - expect(getPodPhase(pod)).toBe('Running'); - }); -}); - -if (TEST_PLATFORM === Platform.OCP || (TEST_PLATFORM === Platform.OCS && MODE === Mode.CONVERGED)) { - describe('Test creation of Converged Storage Cluster', () => { - it( - 'creates a storage cluster', - async () => { - const { selectedNodes } = await Installer.createConvergedStorageCluster(); - browser.sleep(2 * SECOND); - const text = await crudView.resourceTitle.getText(); - expect(text).toEqual(STORAGE_CLUSTER_NAME); - // Verify all the nodes have the required labels - // Wait for 5 seconds for label to apply - await browser.sleep(5 * SECOND); - let nodes: string[] = await Promise.all(selectedNodes); - // Data syntax Node\nN\n - nodes = nodes.map((node) => node.split('\n')[2]); - nodes.forEach((node) => expect(verifyNodeLabels(node, OCS_NODE_LABEL)).toBeTruthy()); - const storageCR = JSON.parse( - execSync( - `kubectl get storageclusters ${STORAGE_CLUSTER_NAME} -n ${OCS_NS} -o json`, - ).toString(), - ); - const scFromCR = - storageCR?.spec?.storageDeviceSets?.[0]?.dataPVCTemplate?.spec?.storageClassName; - const sizeFromCR = - storageCR?.spec?.storageDeviceSets?.[0]?.dataPVCTemplate?.spec?.resources?.requests - ?.storage; - const defaultSC = execSync(`kubectl get storageclasses | grep -Po '\\w+(?=.*default)'`) - .toString() - .trim(); - expect(sizeFromCR).toEqual('512Gi'); - expect(defaultSC).toEqual(scFromCR); - }, - 16 * MINUTE, - ); - }); -} - -if (TEST_PLATFORM === Platform.OCP || (TEST_PLATFORM === Platform.OCS && MODE === Mode.EXTERNAL)) { - describe('Test creation of External Storage Cluster', () => { - beforeAll(async () => { - await Installer.createExternalStorageCluster(); - }); - - it('Test secret is created with required data', async () => { - const secret = JSON.parse( - execSync( - `kubectl get secret rook-ceph-external-cluster-details -n openshift-storage -o json`, - ).toString(), - ); - const fileData = JSON.parse(Base64.decode(secret.data.external_cluster_details)); - expect(_.isEqual(fileData, ClusterMetadata)).toEqual(true); - }); - - it('Test Storage Cluster CR is created with externalStorage set to true', async () => { - const storageCluster = JSON.parse( - execSync( - `kubectl get storagecluster ocs-independent-storagecluster -n openshift-storage -o json`, - ).toString(), - ); - expect(storageCluster.spec.externalStorage.enable).toEqual(true); - }); - }); -} - -if (TEST_PLATFORM === Platform.OCS && MODE === Mode.ATTACHED_DEVICES) { - describe('Test creation of Attached Storage Cluster', () => { - it('Info Alert should be shown as LSO will not be installed', async () => { - await Installer.createAttachedStorageCluster(); - const msg = await currentSelectors.LSOAlert.getText(); - expect(msg.includes(LSO_INFO_MSG)).toBe(true); - }); - - it( - 'tests subscription flow for LSO Operator', - async () => { - await Installer.subscribeToLSOOperator(); - }, - 3 * MINUTE, - ); - - it('Create Storage Class Wizard should be present once LSO is installed and no storage class is present', async () => { - await goToInstalledOperators(); - await Installer.selectOCSOperator(); - await Installer.createAttachedStorageCluster(); - expect(currentSelectors.LSOWizard.isPresent()).toBeTruthy(); - }); - - it('Create Storage Cluster View should be present when LSO is installed and storage class is present', async () => { - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc apply -f -`); - await goToInstalledOperators(); - await Installer.selectOCSOperator(); - await Installer.createAttachedStorageCluster(); - expect(currentSelectors.LSOWizard.isPresent()).toBeFalsy(); - - // installation page should not be present - const msg = await currentSelectors.LSOAlert.getText(); - expect(msg.includes(LSO_INFO_MSG)).toBe(false); - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc delete -f -`); - }); - - it('Should show error message on Create Storage Cluster View, if storage class does not contain minimum 3 nodes ', async () => { - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc apply -f -`); - await goToInstalledOperators(); - await Installer.selectOCSOperator(); - await Installer.createAttachedStorageCluster(); - await click(currentSelectors.scDropdown); - await click(currentSelectors.selectSC(testNoProvisionerSC.metadata.name)); - await browser.wait(until.visibilityOf(currentSelectors.createNewSCBtn)); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - expect(currentSelectors.errorAlert.isPresent()).toBeTruthy(); - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc delete -f -`); - }); - - describe('Should be able to create storage class via the wizard, when no storage class is present for attached devices', async () => { - const scName = 'lvs'; - let step = ''; - - beforeAll(async () => { - await goToInstalledOperators(); - await Installer.selectOCSOperator(); - await Installer.createAttachedStorageCluster(); - }); - - it('Should see the 1st step - Discover Disks', async () => { - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - step = await currentSelectors.currentStep.getText(); - expect(step.includes(SC_STEPS_NAME.DISCOVERY)).toBeTruthy(); - // next btn - await click(currentSelectors.primaryButton); - }); - - it('Should see the 2nd step - Create Local volume Set and all the required conditions should be met', async () => { - // 2nd step - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - - // verify dicovery CR got created - const discoveryCR = JSON.parse( - execSync(`kubectl get LocalVolumeDiscovery ${DISCOVERY_CR_NAME} -A -o json`).toString(), - ); - const numOfNodes = - discoveryCR?.spec?.nodeSelector?.nodeSelectorTerms?.[0]?.matchExpressions?.[0]?.values - ?.length; - expect(numOfNodes).toBe(MINIMUM_NODES); - - await browser.wait(until.visibilityOf(currentSelectors.localVolumeSetView)); - step = await currentSelectors.currentStep.getText(); - expect(step.includes(SC_STEPS_NAME.STORAGECLASS)).toBeTruthy(); - let classes = await currentSelectors.primaryButton.getAttribute('class'); - // next btn should be disabled as lvs name is not yet entered - expect(classes).toContain('pf-m-disabled'); - - await currentSelectors.volumeSetName.sendKeys(scName); - classes = await currentSelectors.primaryButton.getAttribute('class'); - // next btn should be enabled now as lvs name is entered - expect(classes).not.toContain('pf-m-disabled'); - const nodesCnt = await currentSelectors.nodesCntOnLVS.getText(); - expect(nodesCnt.includes(MINIMUM_NODES)).toBeTruthy(); - // next btn - await click(currentSelectors.primaryButton); - - // confirm Modal - const modalTitle = await currentSelectors.confirmModal.getText(); - expect(modalTitle.includes(CONFIRM_MODAL_TITLE)).toBe(true); - await click(currentSelectors.confirmBtn); - }); - - it('Should see the 3rd step - Create storage cluster and all the required conditions should be met', async () => { - // 3rd step - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await browser.wait(until.visibilityOf(currentSelectors.createStorageClusterView)); - step = await currentSelectors.currentStep.getText(); - expect(step.includes(SC_STEPS_NAME.STORAGECLUSTER)).toBe(true); - // need to wait for the nodes to get associated properly to storage class - await browser.sleep(40 * SECOND); - await click(currentSelectors.scDropdown); - await click(currentSelectors.selectSC(scName)); - await currentSelectors.nodeListHandler(); - - const nodeNames = await currentSelectors.nodeNamesForAD; - const selectedNodes = nodeNames.map((nodeName) => nodeName.getText()); - - await click(currentSelectors.primaryButton); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - - const text = await crudView.resourceTitle.getText(); - expect(text).toEqual(STORAGE_CLUSTER_NAME); - // Verify all the nodes have the required labels - // Wait for 5 seconds for label to apply - await browser.sleep(5 * SECOND); - let nodes: string[] = await Promise.all(selectedNodes); - // Data syntax Node\nN\n - nodes = nodes.map((node) => node.split('\n')[2]); - nodes.forEach((node) => expect(verifyNodeLabels(node, OCS_NODE_LABEL)).toBeTruthy()); - const storageCR = JSON.parse( - execSync( - `kubectl get storageclusters ${STORAGE_CLUSTER_NAME} -n ${OCS_NS} -o json`, - ).toString(), - ); - const scFromCR = - storageCR?.spec?.storageDeviceSets?.[0]?.dataPVCTemplate?.spec?.storageClassName; - const monDataDirHostPath = storageCR?.spec?.monDataDirHostPath; - const portable = storageCR?.spec?.storageDeviceSets?.[0]?.portable; - expect(scFromCR).toEqual(scName); - expect(monDataDirHostPath).toBe('/var/lib/rook'); - expect(portable).toBeFalsy(); - }); - }); - }); -} - -if (TEST_PLATFORM === 'OCS') { - describe('Tests for pods and storage classes', () => { - let pods = null; - - beforeAll(async () => { - // Wait for cluster to come to ready state - await checkIfClusterIsReady(); - const podList = JSON.parse( - execSync('kubectl get po -n openshift-storage -o json').toString('utf-8'), - ); - pods = podList.items; - }); - - it('tests if ocs-operator is running', () => { - const pod = getPodData(pods, POD_NAME_PATTERNS.OCS); - testPodIsRunning(getPodPhase(pod)); - }); - - it('tests if rook-ceph-operator is running', () => { - const pod = getPodData(pods, POD_NAME_PATTERNS.ROOK); - testPodIsRunning(getPodPhase(pod)); - }); - - it('tests if noobaa-operator is running', () => { - const pod = getPodData(pods, POD_NAME_PATTERNS.NOOBA_OPERATOR); - testPodIsRunning(getPodPhase(pod)); - }); - - it('tests if noobaa-core is running', () => { - const pod = getPodData(pods, POD_NAME_PATTERNS.NOOBAA_CORE); - testPodIsRunning(getPodPhase(pod)); - }); - - it("tests if 3 rook-ceph-mon's are running", () => { - const podList = getPodData(pods, POD_NAME_PATTERNS.ROOK_CEPH_MON); - expect(podList.length).toBe(3); - podList.forEach((pod) => { - testPodIsRunning(getPodPhase(pod)); - }); - }); - - it('tests if rook-ceph-mgr is running', () => { - const pod = getPodData(pods, POD_NAME_PATTERNS.ROOK_CEPH_MGR); - testPodIsRunning(getPodPhase(pod)); - }); - - // 3 cephfsplugin-* 2 csi-cephfsplugin-provisioner-* - it('tests if 5 csi-cephfsplugin are running', () => { - const podList = getPodData(pods, POD_NAME_PATTERNS.CSI_CEPHFS); - expect(podList.length).toBe(5); - podList.forEach((pod) => { - testPodIsRunning(getPodPhase(pod)); - }); - }); - - // 2 csi-rbdplugin-provisioner-* 3 csi-rbd-plugin-* - it('tests if 5 csi-rbdplugin are running', () => { - const podList = getPodData(pods, POD_NAME_PATTERNS.CSI_RBD); - expect(podList.length).toBe(5); - podList.forEach((pod) => { - testPodIsRunning(getPodPhase(pod)); - }); - }); - - it('tests if 2 rook-ceph-mds-ocs-storagecluster-cephfilesystem pods are running', () => { - const podList = getPodData(pods, POD_NAME_PATTERNS.ROOK_CEPH_MDS); - expect(podList.length).toBe(2); - podList.forEach((pod) => { - testPodIsRunning(getPodPhase(pod)); - }); - }); - - it('tests if 3 rook-ceph-osd-prepare-ocs-deviceset have succeeded', () => { - const podList = getPodData(pods, POD_NAME_PATTERNS.ROOK_CEPH_OSD_PREPARE); - expect(podList.length).toBe(2); - podList.forEach((pod) => { - testPodIsSucceeded(getPodPhase(pod)); - }); - }); - - it('tests if all ceph-rbd, cephfs, noobaa storage classes are shown', async () => { - await goToStorageClasses(); - await sendKeys(filterInput, STORAGE_CLASS_PATTERNS.RBD); - const rdbClass = await getDataFromRowAndCol(0, 1, podNameFilter); - expect(rdbClass.includes(STORAGE_CLASS_PATTERNS.RBD)).toBe(true); - await sendKeys(filterInput, STORAGE_CLASS_PATTERNS.FS); - const fsClass = await getDataFromRowAndCol(0, 1, podNameFilter); - expect(fsClass.includes(STORAGE_CLASS_PATTERNS.FS)).toBe(true); - await sendKeys(filterInput, STORAGE_CLASS_PATTERNS.NOOBAA); - const noobaaClass = await getDataFromRowAndCol(0, 1, podNameFilter); - expect(noobaaClass.includes(STORAGE_CLASS_PATTERNS.NOOBAA)).toBe(true); - }); - }); -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/add-capacity.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/add-capacity.scenario.ts deleted file mode 100644 index 0c021e58936e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/add-capacity.scenario.ts +++ /dev/null @@ -1,298 +0,0 @@ -import { execSync } from 'child_process'; -import { browser, ExpectedConditions as until } from 'protractor'; -import { click } from '@console/shared/src/test-utils/utils'; -import { isNodeReady } from '@console/shared/src/selectors/node'; -import { PodKind } from '@console/internal/module/k8s'; -import { getName } from '@console/shared/src/selectors/common'; -import { DeviceSet } from '../../../src/types'; -import { verifyFields, selectSCDropdown, currentACSelector } from '../../views/add-capacity.view'; -import { - CLUSTER_STATUS, - EXPAND_WAIT, - HOST, - KIND, - NS, - OSD, - POD_NAME_PATTERNS, - SECOND, - ZONE, - MINUTE, - STORAGE_CLUSTER_NAME, - OSD_SIZES_MAP, -} from '../../utils/consts'; -import { - createOSDTreeMap, - getIds, - getNewOSDIds, - getPodName, - getPodPhase, - getPodRestartCount, - isPodPresent, - NodeType, - FormattedOsdTreeType, - testPodIsSucceeded, - verifyNodeOSDMapping, - verifyZoneOSDMapping, -} from '../../utils/helpers'; -import { TEST_PLATFORM } from '../../views/installFlow.view'; -import { testNoProvisionerSC } from '../../mocks/storage-class'; - -const storageCluster = JSON.parse(execSync(`kubectl get -o json -n ${NS} ${KIND}`).toString()); -const cephValue = JSON.parse(execSync(`kubectl get cephCluster -n ${NS} -o json`).toString()); -const clusterStatus = storageCluster.items[0]; -const cephHealth = cephValue.items[0]; - -const expansionObjects: ExpansionObjectsType = { - clusterJSON: {}, - previousCnt: 0, - updatedCnt: 0, - updatedClusterJSON: {}, - previousPods: { items: [] }, - updatedPods: { items: [] }, - previousOSDTree: { nodes: [] }, - updatedOSDTree: { nodes: [] }, - formattedOSDTree: {}, - previousOSDIds: [], - newOSDIds: [], - uid: '', - defaultSC: '', - name: '', -}; - -describe('Check add capacity functionality for ocs service', () => { - describe('For common test cases', () => { - beforeAll(async () => { - [expansionObjects.clusterJSON] = storageCluster.items; - expansionObjects.name = getName(expansionObjects.clusterJSON); - const initialDeviceSet: DeviceSet = - expansionObjects.clusterJSON?.spec?.storageDeviceSets?.[0]; - expansionObjects.previousCnt = initialDeviceSet?.count; - - expansionObjects.uid = expansionObjects.clusterJSON?.metadata?.uid; - expansionObjects.previousPods = JSON.parse( - execSync(`kubectl get pods -n ${NS} -o json`).toString(), - ); - - expansionObjects.previousOSDTree = JSON.parse( - execSync( - `oc -n ${NS} rsh $(oc -n ${NS} get pod | grep ceph-operator| awk '{print$1}') ceph --conf=/var/lib/rook/${NS}/${NS}.config osd tree --format=json`, - ).toString(), - ); - expansionObjects.previousOSDIds = getIds(expansionObjects.previousOSDTree.nodes, OSD); - const initialClusterCapacity = - initialDeviceSet?.dataPVCTemplate?.spec?.resources?.requests?.storage; - - await selectSCDropdown(expansionObjects.uid); - - // eslint-disable-next-line no-useless-escape - expansionObjects.defaultSC = execSync( - `kubectl get storageclasses | grep -Po '\\w+(?=.*default)'`, - ) - .toString() - .trim(); - await click(currentACSelector.getSCOption(expansionObjects.defaultSC)); - verifyFields(OSD_SIZES_MAP[initialClusterCapacity]); - await click(currentACSelector.confirmButton); - - await browser.sleep(5 * SECOND); - - expansionObjects.updatedClusterJSON = JSON.parse( - execSync(`kubectl get -o json -n ${NS} ${KIND} ${expansionObjects.name}`).toString(), - ); - expansionObjects.updatedCnt = - expansionObjects?.updatedClusterJSON?.spec?.storageDeviceSets?.[0]?.count; - }); - - it('Newly added capacity should takes into effect at the storage level', () => { - expect(expansionObjects.updatedCnt - expansionObjects.previousCnt).toEqual(1); - }); - - it('Selected storage class should be sent in the YAML', () => { - const storageCR = JSON.parse( - execSync(`kubectl get storageclusters ${STORAGE_CLUSTER_NAME} -n ${NS} -o json`).toString(), - ); - const scFromYAML = - storageCR?.spec?.storageDeviceSets?.[0]?.dataPVCTemplate?.spec?.storageClassName; - expect(expansionObjects.defaultSC).toEqual(scFromYAML); - }); - }); - - describe('Addition tests for Baremetal infra', () => { - beforeAll(async () => { - await selectSCDropdown(expansionObjects.uid); - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc apply -f -`); - // need to wait for some time in order to reflect the storage class - await browser.sleep(40 * SECOND); - await click(currentACSelector.getSCOption(testNoProvisionerSC.metadata.name)); - }); - - afterAll(async () => { - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | oc delete -f -`); - }); - - it('Raw Capacity field should be hidden', () => { - expect(currentACSelector.capacityValueInput.isPresent()).toBe(false); - expect(currentACSelector.totalRequestedcapacity.isPresent()).toBe(false); - }); - }); -}); - -if (TEST_PLATFORM === 'OCS') { - describe('Check availability of ocs cluster', () => { - if (clusterStatus) { - it('Should check if the ocs cluster is Ready for expansion', () => { - expect(clusterStatus?.status?.phase).toBe(CLUSTER_STATUS.READY); - }); - } else { - it('Should state that ocs cluster is not ready for expansion', () => { - expect(clusterStatus).toBeUndefined(); - }); - } - }); - - describe('Check availability of Ceph cluster', () => { - if (cephHealth) { - it('Check if the Ceph cluster is healthy before expansion', () => { - expect(cephHealth.status.ceph.health).not.toBe(CLUSTER_STATUS.HEALTH_ERROR); - }); - } else { - it('Should state that Ceph cluster doesnt exist', () => { - expect(cephHealth).toBeUndefined(); - }); - } - }); - - if (clusterStatus && cephHealth) { - describe('Check for remaining tests for add capacity functionality for ocs service', () => { - beforeAll(async () => { - const statusCol = currentACSelector - .storageClusterRow(expansionObjects.uid) - .$('td:nth-child(3)'); - // need to wait as cluster states fluctuates for some time. Waiting for 2 secs for the same - await browser.sleep(2 * SECOND); - - await browser.wait( - until.textToBePresentInElement( - currentACSelector.getProgressingStateEl(statusCol), - CLUSTER_STATUS.PROGRESSING, - ), - ); - await browser.wait( - until.textToBePresentInElement( - currentACSelector.getReadyStateEl(statusCol), - CLUSTER_STATUS.READY, - ), - ); - - expansionObjects.updatedPods = JSON.parse( - execSync(`kubectl get pod -o json -n ${NS}`).toString(), - ); - // need to wait to get the new osds reflected in osd tree - await browser.sleep(1 * MINUTE); - expansionObjects.updatedOSDTree = JSON.parse( - execSync( - `oc -n ${NS} rsh $(oc -n ${NS} get pod | grep ceph-operator| awk '{print$1}') ceph --conf=/var/lib/rook/${NS}/${NS}.config osd tree --format=json`, - ).toString(), - ); - expansionObjects.formattedOSDTree = createOSDTreeMap( - expansionObjects.updatedOSDTree.nodes, - ) as FormattedOsdTreeType; - expansionObjects.newOSDIds = getNewOSDIds( - expansionObjects.updatedOSDTree.nodes, - expansionObjects.previousOSDIds, - ); - }, EXPAND_WAIT); - - it('No ocs pods should get restarted unexpectedly', () => { - expansionObjects.previousPods.items.forEach((pod) => { - const prevRestartCnt = getPodRestartCount(pod); - const updatedpod = isPodPresent(expansionObjects.updatedPods, getPodName(pod)); - if (updatedpod) { - const updatedRestartCnt = getPodRestartCount(updatedpod); - expect(prevRestartCnt).toBe(updatedRestartCnt); - } - }); - }); - - it('No ocs nodes should go to NotReady state', () => { - const nodes = JSON.parse(execSync(`kubectl get nodes -o json`).toString()); - const areAllNodes = nodes.items.every((node) => isNodeReady(node)); - - expect(areAllNodes).toEqual(true); - }); - - it('Ceph cluster should be healthy after expansion', () => { - const cephValueAfter = JSON.parse( - execSync(`kubectl get cephCluster -n ${NS} -o json`).toString(), - ); - const cephHealthAfter = cephValueAfter.items[0]; - expect(cephHealthAfter.status.ceph.health).not.toBe(CLUSTER_STATUS.HEALTH_ERROR); - }); - - it('New osds are added correctly to the availability zones/failure domains', () => { - const zones = getIds(expansionObjects.updatedOSDTree.nodes, ZONE); - expect( - verifyZoneOSDMapping( - zones, - expansionObjects.newOSDIds, - expansionObjects.formattedOSDTree, - ), - ).toEqual(true); - }); - - it('New osds are added correctly to the right nodes', () => { - const nodes = getIds(expansionObjects.updatedOSDTree.nodes, HOST); - expect( - verifyNodeOSDMapping( - nodes, - expansionObjects.newOSDIds, - expansionObjects.formattedOSDTree, - ), - ).toEqual(true); - }); - }); - - it('New osd pods corresponding to the additional capacity should be in running state', () => { - const newOSDPods = [] as PodKind[]; - const newOSDPreparePods = [] as PodKind[]; - - expansionObjects.updatedPods.items.forEach((pod) => { - const podName = getPodName(pod); - if (!isPodPresent(expansionObjects.previousPods, podName)) { - if (podName.includes(POD_NAME_PATTERNS.ROOK_CEPH_OSD_PREPARE)) { - newOSDPreparePods.push(pod); - } else if (podName.includes(POD_NAME_PATTERNS.ROOK_CEPH_OSD)) { - newOSDPods.push(pod); - } - } - }); - - expect(newOSDPods.length).toEqual(3); - expect(newOSDPreparePods.length).toEqual(3); - - newOSDPreparePods.forEach((pod) => { - testPodIsSucceeded(getPodPhase(pod)); - }); - }); - } -} -type PodType = { - items: PodKind[]; -}; - -export type ExpansionObjectsType = { - clusterJSON: any; - previousCnt: number; - updatedCnt: number; - updatedClusterJSON: any; - previousPods: PodType; - updatedPods: PodType; - previousOSDTree: { nodes: NodeType[] }; - updatedOSDTree: { nodes: NodeType[] }; - formattedOSDTree: FormattedOsdTreeType; - previousOSDIds: number[]; - newOSDIds: number[]; - uid: string; - defaultSC: string; - name: string; -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/multiple-pool.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/multiple-pool.scenario.ts deleted file mode 100644 index 9cd219941b3f..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/multiple-pool.scenario.ts +++ /dev/null @@ -1,99 +0,0 @@ -import { execSync } from 'child_process'; -import { browser, ExpectedConditions as until } from 'protractor'; -import { click } from '@console/shared/src/test-utils/utils'; -import { NS, CLUSTER_STATUS } from '../../utils/consts'; -import { - prepareStorageClassForm, - allowExpand, - poolDropdownButton, - createPoolDropdown, - createPool, - poolForm, - poolStatusCheck, - poolMessage, - POOL_STATUS, - finishButton, - dropdownPoolName, - poolDescription, - modalPresence, - showProvisioner, -} from '../../views/multiple-pool.view'; - -const cephValue = JSON.parse(execSync(`kubectl get cephCluster -n ${NS} -o json`).toString()); -const cephStatus = cephValue.items[0]; - -if (cephStatus?.status?.phase !== CLUSTER_STATUS.READY) { - describe('Check for pool creation if ceph cluster is not in ready state', () => { - beforeAll(async () => { - prepareStorageClassForm('openshift-storage.rbd.csi.ceph.com'); - }); - - it('Should show that provisioner supports expanding', () => { - expect(allowExpand.getText()).toEqual('Allow persistent volume claims to be expanded'); - }); - - it('Should open a modal when clicked on create new pool and report the ceph cluster is not ready', async () => { - await click(createPoolDropdown); - expect(poolStatusCheck.getText()).toEqual(poolMessage.PROGRESS); - }); - }); -} - -if (cephStatus?.status?.phase === CLUSTER_STATUS.READY) { - describe('Check for pool creation if ceph cluster is in ready state', () => { - beforeAll(async () => { - prepareStorageClassForm('openshift-storage.rbd.csi.ceph.com'); - await click(poolDropdownButton); - await click(createPoolDropdown); - }); - - afterAll(async () => { - execSync(`kubectl delete CephBlockPool foo -n ${NS}`); - }); - - it('Should show that provisioner supports expanding', () => { - expect(allowExpand.getText()).toEqual('Allow persistent volume claims to be expanded'); - }); - - it('Should show the pool form', () => { - expect(poolForm.getText()).toEqual('Pool Name'); - }); - - it('Should initiate pool creation', async () => { - createPool(); - await browser.wait(until.presenceOf(poolStatusCheck)); - expect(poolStatusCheck.getText()).toEqual(poolMessage.POOL_START); - }); - - it('Should successfully create a pool', async () => { - await browser.wait(until.textToBePresentInElement(poolStatusCheck, poolMessage.POOL_CREATED)); - const poolValue = JSON.parse( - execSync(`kubectl get Cephblockpool foo -n ${NS} -o json`).toString(), - ); - expect(poolValue.status.phase).toEqual(POOL_STATUS.READY); - expect(poolStatusCheck.getText()).toEqual(poolMessage.POOL_CREATED); - }); - - it('Should successfully close the modal', async () => { - await click(finishButton); - expect(modalPresence.isPresent()).toBe(false); - }); - - it('Should add the pool to the dropdown', async () => { - await browser.refresh(); - showProvisioner('openshift-storage.rbd.csi.ceph.com'); - await click(poolDropdownButton); - expect(dropdownPoolName.getText()).toEqual('foo'); - expect(poolDescription.getText()).toEqual('Replica 2, no compression'); - }); - - it('Should throw an error if duplicate pool is created', async () => { - await click(createPoolDropdown); - createPool(); - await browser.wait( - until.textToBePresentInElement(poolStatusCheck, poolMessage.POOL_DUPLICATED), - ); - expect(poolStatusCheck.getText()).toEqual(poolMessage.POOL_DUPLICATED); - }); - }); -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/noobaa-sso-scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/noobaa-sso-scenario.ts deleted file mode 100644 index ebb9d030d3e8..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/noobaa-sso-scenario.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { browser } from 'protractor'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { isLoaded } from '@console/shared/src/test-views/dashboard-shared.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { - noobaaAddStorageResource, - noobaaAddStorageResourceModal, - noobaaExternalLink, - objectServiceLink, - overviewLink, -} from '../../views/noobaa-sso.view'; -import { SECOND } from '../../utils/consts'; - -describe('Check noobaa link in obejct service dashboard and perform SSO.', () => { - beforeAll(async () => { - await browser.get(`${appHost}/dashboards`); - await isLoaded(); - }); - - it('Check that noobaa dashboard is opening and links available.', async () => { - await click(objectServiceLink); - const parentGUID = await browser.getWindowHandle(); - await click(noobaaExternalLink); - await browser.sleep(2 * SECOND); - for (const guid of await browser.getAllWindowHandles()) { - if (guid !== parentGUID) { - browser.switchTo().window(guid); - break; - } - } - - await click(noobaaAddStorageResource); - await browser.sleep(1 * SECOND); - expect(noobaaAddStorageResourceModal.isPresent()).toBe(true); - await browser.close(); - await browser.switchTo().window(parentGUID); - expect(overviewLink.isPresent()).toBe(true); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/ocp-dashboard-card-healthcheck.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/ocp-dashboard-card-healthcheck.scenario.ts deleted file mode 100644 index 823f33beced5..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/ocp-dashboard-card-healthcheck.scenario.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { execSync } from 'child_process'; -import { browser, ExpectedConditions as until } from 'protractor'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { isLoaded as dashboardIsLoaded } from '@console/shared/src/test-views/dashboard-shared.view'; -import { - mainHealtGreenSvg, - mainHealtRedSvg, - mainHealtYellowSvg, - noOutChange, -} from '../../views/ocp-dashboard-card-healthcheck.view'; -import { SECOND } from '../../utils/consts'; - -describe('Check health data on main OCP dashboard ', () => { - beforeAll(async () => { - await browser.get(`${appHost}/dashboards`); - await dashboardIsLoaded(); - }); - - it('Check main dashboard health is green', async () => { - await browser.sleep(7 * SECOND); - expect(mainHealtGreenSvg.isPresent()).toBe(true); - }); - - it('Check main dashboard health icon is yellow and cluster health is degraded.', async () => { - await browser.wait(until.presenceOf(mainHealtGreenSvg)); - noOutChange('set'); - await browser.wait(until.not(until.presenceOf(mainHealtGreenSvg)), 60 * SECOND); - await browser.sleep(2 * SECOND); - expect(mainHealtYellowSvg.isPresent()).toBe(true); - noOutChange('unset'); - await browser.wait(until.presenceOf(mainHealtGreenSvg), 120 * SECOND); - }); - - xit('Check main dashboard health icon is red and cluster is NA', async () => { - execSync('kubectl -n openshift-storage scale deployment/rook-ceph-mgr-a --replicas=0'); - await browser.wait(until.not(until.presenceOf(mainHealtGreenSvg))); - await browser.sleep(2 * SECOND); - expect(mainHealtRedSvg.isPresent()).toBe(true); - execSync('kubectl -n openshift-storage scale deployment/rook-ceph-mgr-a --replicas=1'); - await browser.wait(until.presenceOf(mainHealtGreenSvg)); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/pvc.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/pvc.scenario.ts deleted file mode 100644 index 8e7d12c693aa..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/pvc.scenario.ts +++ /dev/null @@ -1,105 +0,0 @@ -import { browser } from 'protractor'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { resourceRowsPresent } from '@console/internal-integration-tests/views/crud.view'; -import { - createNewPersistentVolumeClaim, - deletePersistentVolumeClaim, - goToPersistentVolumeClaims, - pvcStatus, - pvcSize, -} from '../../views/pvc.view'; -import { - NS, - PVC_STATUS, - SIZE_UNITS, - STORAGE_CLASS_PATTERNS, - VOLUME_ACCESS_MODES, -} from '../../utils/consts'; - -describe('Test PVC creation with options.', () => { - beforeAll(async () => { - await browser.get(`${appHost}/`); - }); - - it('Test RBD PVC is created and gets bound', async () => { - const testPvc = { - name: 'rbdpvc', - namespace: NS, - size: '5', - sizeUnits: SIZE_UNITS.GI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, - accessMode: VOLUME_ACCESS_MODES.RWO, - }; - await createNewPersistentVolumeClaim(testPvc, true); - expect(pvcStatus.getText()).toEqual(PVC_STATUS.BOUND); - await goToPersistentVolumeClaims(); - await resourceRowsPresent(); - await deletePersistentVolumeClaim('rbdpvc', NS); - }); - - it('Test PVC size is rounded', async () => { - // PVC size of 1.5 should be rounded to 2 - // https://bugzilla.redhat.com/show_bug.cgi?id=1746156 - const testPvc = { - name: 'rbdpvc', - namespace: NS, - size: '1.5', - sizeUnits: SIZE_UNITS.GI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, - accessMode: VOLUME_ACCESS_MODES.RWO, - }; - await createNewPersistentVolumeClaim(testPvc, true); - expect(pvcSize.getText()).toEqual('2 GiB'); - await goToPersistentVolumeClaims(); - await resourceRowsPresent(); - await deletePersistentVolumeClaim('rbdpvc', NS); - }); - - it('Test cephFS PVC is created and gets bound', async () => { - const testPvc = { - name: 'cephfspvc', - namespace: NS, - size: '1', - sizeUnits: SIZE_UNITS.TI, - storageClass: STORAGE_CLASS_PATTERNS.FS, - accessMode: VOLUME_ACCESS_MODES.RWO, - }; - await createNewPersistentVolumeClaim(testPvc, true); - expect(pvcStatus.getText()).toEqual(PVC_STATUS.BOUND); - await goToPersistentVolumeClaims(); - await resourceRowsPresent(); - await deletePersistentVolumeClaim('cephfspvc', NS); - }); - - it('Test RWX RBD PVC is created and gets bound', async () => { - const testPvc = { - name: 'rwxrbdpvc', - namespace: NS, - size: '512', - sizeUnits: SIZE_UNITS.MI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, - accessMode: VOLUME_ACCESS_MODES.RWX, - }; - await createNewPersistentVolumeClaim(testPvc, true); - expect(pvcStatus.getText()).toEqual(PVC_STATUS.BOUND); - await goToPersistentVolumeClaims(); - await resourceRowsPresent(); - await deletePersistentVolumeClaim('rwxrbdpvc', NS); - }); - - it('Test RWX CephFS PVC is created and gets bound', async () => { - const testPvc = { - name: 'rwxcephfspvc', - namespace: NS, - size: '5', - sizeUnits: SIZE_UNITS.GI, - storageClass: STORAGE_CLASS_PATTERNS.FS, - accessMode: VOLUME_ACCESS_MODES.RWX, - }; - await createNewPersistentVolumeClaim(testPvc, true); - expect(pvcStatus.getText()).toEqual(PVC_STATUS.BOUND); - await goToPersistentVolumeClaims(); - await resourceRowsPresent(); - await deletePersistentVolumeClaim('rwxcephfspvc', NS); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/storage-dashboard.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/storage-dashboard.scenario.ts deleted file mode 100644 index 1e2ebc09df4e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/storage-dashboard.scenario.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { execSync } from 'child_process'; -import { - allNodes, - allPvcs, - allPvs, - clusterHealth, - clusterName, - goToStorageDashboard, - serviceName, -} from '../../views/storage-dashboard.view'; -import { createNewPersistentVolumeClaim, deletePersistentVolumeClaim } from '../../views/pvc.view'; -import { EXAMPLE_PVC, OCP_HEALTH_ICON_COLORS } from '../../utils/consts'; - -const OCS_SERVICE_NAME = 'OpenShift Container Storage'; - -describe('Check data on Persistent Storage Dashboard.', () => { - beforeAll(async () => { - await goToStorageDashboard(); - }); - - it('Check cluster is healthy', () => { - expect([OCP_HEALTH_ICON_COLORS.GREEN, OCP_HEALTH_ICON_COLORS.GREEN46]).toContain( - clusterHealth.getAttribute('fill'), - ); - }); - - it('Check service name is OCS', () => { - expect(serviceName.getText()).toContain(OCS_SERVICE_NAME); - }); - - it('Check if cluster name is correct', async () => { - const cephClusterName = execSync( - "kubectl get storagecluster -n openshift-storage -o jsonpath='{.items..metadata.name}'", - ); - expect(clusterName.getText()).toEqual(cephClusterName.toString().trim()); - }); - - it('Check the total number of OCS nodes', async () => { - const ocsNodesNumber = execSync( - "kubectl get nodes -l cluster.ocs.openshift.io/openshift-storage -o json | jq '.items | length'", - ); - expect(allNodes.getText()).toEqual(`${ocsNodesNumber.toString().trim()} Nodes`); - }); - - it('Check that number of PVCs is updated after successful PVC creation', async () => { - const pvcsNumber = Number(allPvcs.getText()); - await createNewPersistentVolumeClaim(EXAMPLE_PVC, true); - await goToStorageDashboard(); - const newPvcsNumber = Number(allPvcs.getText()); - await deletePersistentVolumeClaim(EXAMPLE_PVC.name, EXAMPLE_PVC.namespace); - expect(newPvcsNumber).toEqual(pvcsNumber + 1); - }); - - it('Check that number of PVs is updated after successful PVC creation', async () => { - await goToStorageDashboard(); - const pvsNumber = Number(allPvs.getText()); - await createNewPersistentVolumeClaim(EXAMPLE_PVC, true); - await goToStorageDashboard(); - const newPvsNumber = Number(allPvs.getText()); - await deletePersistentVolumeClaim(EXAMPLE_PVC.name, EXAMPLE_PVC.namespace); - expect(newPvsNumber).toEqual(pvsNumber + 1); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/test-expand.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/test-expand.scenario.ts deleted file mode 100644 index f44e90a3022d..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/2-tests/test-expand.scenario.ts +++ /dev/null @@ -1,81 +0,0 @@ -import { execSync } from 'child_process'; -import { browser, ExpectedConditions as until } from 'protractor'; -import { clickKebabAction } from '@console/internal-integration-tests/views/crud.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { testName } from '@console/internal-integration-tests/protractor.conf'; -import { - testPVC, - testRbdPVC, - testDeployment, - testDeploymentRbd, -} from '../../mocks/expand-test-mocks'; -import { - goToPersistentVolumeClaims, - expandButton, - createNewPersistentVolumeClaim, - expandSizeOption, - capacityUnitDropdown, - inputPVCSize, -} from '../../views/pvc.view'; -import { SECOND, SIZE_UNITS } from '../../utils/consts'; -import { createObjectFromJson, sendKeys } from '../../utils/helpers'; - -const expandValue = String(Number(testPVC.size) + 1); - -const createDeployment = () => - execSync(`echo '${JSON.stringify(testDeployment)}' | kubectl create -f -`); - -const expandPVC = async (pvcName: string, value: string, sizeUnit: SIZE_UNITS) => { - await goToPersistentVolumeClaims(); - await clickKebabAction(pvcName, 'Expand PVC'); - await browser.wait(until.visibilityOf(inputPVCSize)); - await inputPVCSize.clear(); - await sendKeys(inputPVCSize, value); - await click(capacityUnitDropdown); - await click(expandSizeOption(sizeUnit)); - await click(expandButton); -}; - -const getPVCRequestedStorage = (pvcName: string) => { - const pvcJSON = JSON.parse( - execSync(`kubectl get pvc ${pvcName} -n ${testName} -o json`).toString(), - ); - return pvcJSON.spec.resources.requests.storage; -}; - -describe('Tests Expand flow for PVC', () => { - beforeAll(async () => { - await createNewPersistentVolumeClaim(testPVC, true, createDeployment); - }); - - it('Test PVC can be expanded (In MiBs)', async () => { - await expandPVC(testPVC.name, expandValue, SIZE_UNITS.MI); - await browser.sleep(5 * SECOND); - const requestedStorage = getPVCRequestedStorage(testPVC.name); - expect(requestedStorage.trim()).toEqual(`${expandValue}${SIZE_UNITS.MI}`); - }); - - it('Test PVC can be expanded (In GiBs)', async () => { - await expandPVC(testPVC.name, expandValue, SIZE_UNITS.GI); - await browser.sleep(5 * SECOND); - const requestedStorage = getPVCRequestedStorage(testPVC.name); - expect(requestedStorage.trim()).toEqual(`${expandValue}${SIZE_UNITS.GI}`); - }); - - it('Test PVC can be expanded (In TiBs)', async () => { - await expandPVC(testPVC.name, '1', SIZE_UNITS.TI); - await browser.sleep(5 * SECOND); - const requestedStorage = getPVCRequestedStorage(testPVC.name); - expect(requestedStorage.trim()).toEqual(`1${SIZE_UNITS.TI}`); - }); - - it('Test RBD PVC can be expanded (In MiBs)', async () => { - await createNewPersistentVolumeClaim(testRbdPVC, true); - await createObjectFromJson(testDeploymentRbd); - await browser.sleep(5 * SECOND); - await expandPVC(testRbdPVC.name, expandValue, SIZE_UNITS.MI); - await browser.sleep(5 * SECOND); - const requestedStorage = getPVCRequestedStorage(testRbdPVC.name); - expect(requestedStorage.trim()).toEqual(`${expandValue}${SIZE_UNITS.MI}`); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/multiple-storage-class-selection.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/multiple-storage-class-selection.scenario.ts deleted file mode 100644 index c5c611bf462e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/multiple-storage-class-selection.scenario.ts +++ /dev/null @@ -1,113 +0,0 @@ -import { execSync } from 'child_process'; -import { ExpectedConditions as until, browser } from 'protractor'; -import { K8sResourceKind } from '@console/internal/module/k8s'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { click } from '@console/shared/src/test-utils/utils'; -import { DeviceSet } from '../../../src/types'; -import { OCS_INTERNAL_CR_NAME } from '../../../src/constants'; -import { getCurrentDeviceSetIndex } from '../../../src/utils/add-capacity'; -import { testEbsSC, testNoProvisionerSC } from '../../mocks/storage-class'; -import { - goToInstalledOperators, - currentACSelector, - clickKebabAction, -} from '../../views/add-capacity.view'; -import { NS } from '../../utils/consts'; - -const fetchStorageClusterJson = () => - JSON.parse( - execSync( - `kubectl get --ignore-not-found storagecluster ${OCS_INTERNAL_CR_NAME} -n ${NS} -o json`, - { - encoding: 'utf8', - }, - ), - ); - -const addCapacity = async (uid: string, scName: string) => { - await clickKebabAction(uid, 'Add Capacity'); - await click(currentACSelector.scDropdown); - await click(currentACSelector.getSCOption(scName)); - await click(currentACSelector.confirmButton); -}; - -describe('Add capacity using multiple storage classes', () => { - const beforeCapacityAddition = { - deviceSets: null, - portability: null, - devicesCount: null, - }; - beforeAll(async () => { - execSync(`echo '${JSON.stringify(testEbsSC)}' | kubectl apply -f -`); - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | kubectl apply -f -`); - await browser.get(`${appHost}/`); - await goToInstalledOperators(); - await click(currentACSelector.ocsOp); - await browser.wait(until.presenceOf(currentACSelector.storageClusterNav)); - await click(currentACSelector.storageClusterNav); - }); - afterAll(() => { - execSync(`echo '${JSON.stringify(testEbsSC)}' | kubectl delete -f -`); - execSync(`echo '${JSON.stringify(testNoProvisionerSC)}' | kubectl delete -f -`); - }); - describe('Add capacity with a new storage class having EBS as provisioner', async () => { - const { name: scName } = testEbsSC.metadata; - let index: number; - let deviceSets: DeviceSet[]; - beforeAll(async () => { - const json: K8sResourceKind = fetchStorageClusterJson(); - beforeCapacityAddition.deviceSets = json.spec.storageDeviceSets.length; - await addCapacity(json.metadata.uid, scName); - const latestJson: K8sResourceKind = fetchStorageClusterJson(); - deviceSets = latestJson.spec.storageDeviceSets; - index = getCurrentDeviceSetIndex(deviceSets, scName); - }); - it('New device set is created', () => - expect(deviceSets.length).toBe(beforeCapacityAddition.deviceSets + 1)); - it('Device count is 1 in the new device set', () => expect(deviceSets[index].count).toBe(1)); - it('Osd portability is enabled in the new device set', () => - expect(deviceSets[index].portable).toBe(true)); - }); - describe('Add capacity with an existing storage class having EBS as provisioner', async () => { - const { name: scName } = testEbsSC.metadata; - let latestDeviceSets: DeviceSet[]; - let latestIndex: number; - beforeAll(async () => { - const json: K8sResourceKind = fetchStorageClusterJson(); - const deviceSets: DeviceSet[] = json.spec.storageDeviceSets; - const index = getCurrentDeviceSetIndex(deviceSets, scName); - beforeCapacityAddition.deviceSets = deviceSets.length; - beforeCapacityAddition.portability = deviceSets[index].portable; - beforeCapacityAddition.devicesCount = deviceSets[index].count; - await addCapacity(json.metadata.uid, scName); - const latestJson: K8sResourceKind = fetchStorageClusterJson(); - latestDeviceSets = latestJson.spec.storageDeviceSets; - latestIndex = getCurrentDeviceSetIndex(latestDeviceSets, scName); - }); - - it('New device set is not created', () => - expect(latestDeviceSets.length).toBe(beforeCapacityAddition.deviceSets)); - it('Devices count is incremented by 1 in the corresponding device set', () => - expect(latestDeviceSets[latestIndex].count).toBe(beforeCapacityAddition.devicesCount + 1)); - it('Osd portability is not modified in the corresponding device set', () => - expect(latestDeviceSets[latestIndex].portable).toBe(beforeCapacityAddition.portability)); - }); - describe(`Add capacity with a new storage class having NO-PROVISIONER as provisioner`, async () => { - const { name: scName } = testNoProvisionerSC.metadata; - let deviceSets: DeviceSet[]; - let index: number; - beforeAll(async () => { - const json: K8sResourceKind = fetchStorageClusterJson(); - beforeCapacityAddition.deviceSets = json.spec.storageDeviceSets.length; - await addCapacity(json.metadata.uid, scName); - const latestJson: K8sResourceKind = fetchStorageClusterJson(); - deviceSets = latestJson.spec.storageDeviceSets; - index = getCurrentDeviceSetIndex(deviceSets, scName); - }); - it('New device set is created', () => - expect(deviceSets.length).toBe(beforeCapacityAddition.deviceSets + 1)); - it('Device count is 1 in the new device set', () => expect(deviceSets[index].count).toBe(1)); - it('Osd portability is disabled in the new device set', () => - expect(deviceSets[index].portable).toBe(false)); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/upgrade.scenario.ts b/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/upgrade.scenario.ts deleted file mode 100644 index ae6af127ac99..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/tests/3-tests/upgrade.scenario.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { browser } from 'protractor'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { - changeChannel, - changeCatalogSourceImage, - channelChangeButton, - channel43, - channel44, - channel45, - operatorVersion, - waitUntilStorageClusterReady, -} from '../../views/upgrade.view'; -import { CHANNEL_43, CHANNEL_44, CHANNEL_45, MINUTE } from '../../utils/consts'; - -describe('Test OCS version upgrade.', () => { - beforeAll(async () => { - await browser.get(`${appHost}/`); - jasmine.DEFAULT_TIMEOUT_INTERVAL += 120 * MINUTE; - }); - - afterAll(() => { - jasmine.DEFAULT_TIMEOUT_INTERVAL -= 120 * MINUTE; - }); - - it('Test channel change from 4.2 to 4.3', async () => { - // Pre-requisites: OCS 4.2 is installed - await changeCatalogSourceImage('latest-stable-4.3.0'); - await changeChannel(channel43); - expect(channelChangeButton.getText()).toEqual(CHANNEL_43); - }); - - it('Test that OCS operator version changed to 4.3', async () => { - await waitUntilStorageClusterReady(); - expect(operatorVersion()).toContain('4.3.'); - }); - - it('Test channel change from 4.3 to 4.4', async () => { - await changeCatalogSourceImage('latest-stable-4.4.0'); - await changeChannel(channel44); - expect(channelChangeButton.getText()).toEqual(CHANNEL_44); - }); - - it('Test that OCS operator version changed to 4.4', async () => { - await waitUntilStorageClusterReady(); - expect(operatorVersion()).toContain('4.4.'); - }); - - xit('Test channel change from 4.4 to 4.5', async () => { - // await changeCatalogSourceImage('latest-stable-4.5.0'); - await changeChannel(channel45); - expect(channelChangeButton.getText()).toEqual(CHANNEL_45); - }); - - xit('Test that OCS operator version changed to 4.5', async () => { - await waitUntilStorageClusterReady(); - expect(operatorVersion()).toContain('4.5.'); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/utils/consts.ts b/frontend/packages/ceph-storage-plugin/integration-tests/utils/consts.ts deleted file mode 100644 index 3adb558016e2..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/utils/consts.ts +++ /dev/null @@ -1,110 +0,0 @@ -export const OCS_OP = 'OpenShift Container Storage'; -export const NS = 'openshift-storage'; - -export const SECOND = 1000; -export const MINUTE = 60 * SECOND; - -export enum POD_NAME_PATTERNS { - OCS = 'ocs-operator-', - ROOK = 'rook-ceph-operator-', - NOOBA_OPERATOR = 'noobaa-operator-', - NOOBAA_CORE = 'noobaa-core-', - ROOK_CEPH_MON = 'rook-ceph-mon', - ROOK_CEPH_MGR = 'rook-ceph-mgr', - CSI_CEPHFS = 'csi-cephfsplugin-', - CSI_RBD = 'csi-rbdplugin-', - ROOK_CEPH_MDS = 'rook-ceph-mds-ocs-storagecluster-cephfilesystem', - ROOK_CEPH_OSD = 'rook-ceph-osd-', - ROOK_CEPH_OSD_PREPARE = 'rook-ceph-osd-prepare-', -} - -export enum STORAGE_CLASS_PATTERNS { - RBD = 'ocs-storagecluster-ceph-rbd', - FS = 'ocs-storagecluster-cephfs', - NOOBAA = 'noobaa.io', -} - -export enum CLUSTER_STATUS { - READY = 'Ready', - PROGRESSING = 'Progressing', - HEALTH_ERROR = 'HEALTH_ERR', -} - -export const OCS_NODE_LABEL = 'cluster.ocs.openshift.io/openshift-storage'; -export const CATALOG_SRC = 'redhat-operators'; - -export const KIND = 'storagecluster'; -export const EXPAND_WAIT = 15 * MINUTE; -export const CAPACITY_UNIT = 'TiB'; -export const CAPACITY_VALUE = '2'; -export const OCS_OPERATOR_NAME = 'ocs-operatorv4'; -export const STORAGE_CLUSTER_NAME = 'ocs-storagecluster'; -export const HOST = 'host'; -export const ZONE = 'zone'; -export const OSD = 'osd'; - -export const SUCCESS = 'Succeeded'; -export const READY_FOR_USE = 'ready for use'; - -export const ocsTaint = Object.freeze({ - key: 'node.ocs.openshift.io/storage', - value: 'true', - effect: 'NoSchedule', -}); - -export enum VOLUME_ACCESS_MODES { - RWO = 'ReadWriteOnce', - RWX = 'ReadWriteMany', - ROX = 'ReadOnlyMany', -} - -export enum SIZE_UNITS { - MI = 'Mi', - GI = 'Gi', - TI = 'Ti', -} - -export enum PVC_STATUS { - PENDING = 'Pending', - BOUND = 'Bound', -} - -export enum OCP_TEXT_STATUS { - HEALTHY = 'healthy', - DEGRADED = 'health is degraded', - NOT_AVAILABLE = 'is not available', -} - -export enum OCP_HEALTH_ICON_COLORS { - GREEN = '#486b00', - YELLOW = '#f0ab00', - RED = '#c9190b', - GREEN46 = '#3e8635', -} - -export const EXAMPLE_PVC = { - name: 'expample-pvc', - namespace: NS, - size: '5', - sizeUnits: SIZE_UNITS.GI, - storageClass: STORAGE_CLASS_PATTERNS.RBD, - accessMode: VOLUME_ACCESS_MODES.RWO, -}; - -export const CHANNEL_43 = 'stable-4.3'; -export const CHANNEL_44 = 'stable-4.4'; -export const CHANNEL_45 = 'stable-4.5'; - -export const LSO_INFO_MSG = 'Local Storage Operator Not Installed'; -export const SC_STEPS_NAME = { - DISCOVERY: 'Discover Disks', - STORAGECLASS: 'Create Storage Class', - STORAGECLUSTER: 'Create Storage Cluster', -}; -export const CONFIRM_MODAL_TITLE = 'Create Storage Class'; - -export const OSD_SIZES_MAP = { - '512Gi': 0.5, - '2Ti': 2, - '4Ti': 4, -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/utils/helpers.ts b/frontend/packages/ceph-storage-plugin/integration-tests/utils/helpers.ts deleted file mode 100644 index 5567c7bc8e5e..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/utils/helpers.ts +++ /dev/null @@ -1,204 +0,0 @@ -import { execSync } from 'child_process'; -import * as _ from 'lodash'; -import { ExpectedConditions as until, browser, $ } from 'protractor'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import { OSD, SECOND, ocsTaint } from './consts'; - -export const checkIfClusterIsReady = async () => { - let stillLoading = true; - while (stillLoading) { - const scRes = JSON.parse( - execSync('kubectl get -o json -n openshift-storage storagecluster').toString(), - ); - if (scRes?.items?.[0]?.status?.phase === 'Ready') { - stillLoading = false; - } - /* eslint-disable no-await-in-loop */ - await browser.sleep(10 * SECOND); - } -}; - -export const waitUntil = async (functor, expected, count = 1) => { - const value = await functor(); - if (value < expected) { - await browser.sleep(2 * SECOND); - await waitUntil(functor, expected, count - 1); - } - return true; -}; - -export const waitFor = async (element, text, count = 1) => { - let rowNumber = 0; - while (rowNumber !== count) { - await browser.wait(until.visibilityOf(element)); - const elemText = await element.getText(); - if (elemText.includes(text)) { - rowNumber += 1; - } else { - rowNumber = 0; - } - /* eslint-disable no-await-in-loop */ - await browser.sleep(5 * SECOND); - } -}; - -export const getPodData = (list, pattern: string) => { - const pods = []; - list.forEach((item) => { - if (item.metadata.name.includes(pattern)) pods.push(item); - }); - if (pods.length === 1) return pods[0]; - return pods; -}; - -export const getPodPhase = (pod) => { - return pod.status.phase; -}; - -export const getPodRestartCount = (pod) => { - return pod.status.containerStatuses[0].restartCount; -}; - -export const getPodName = (pod) => { - return pod.metadata.name; -}; - -export const testPodIsRunning = (podPhase: string) => expect(podPhase).toBe('Running'); -export const testPodIsSucceeded = (podPhase: string) => expect(podPhase).toBe('Succeeded'); - -export const getDataFromRowAndCol = async ( - row: number, - col: number, - filter: Function, -): Promise => { - /** - * Row is the data-row you want to parse ( row count starts from 0 ) - * Col is the col you want to parse (Col count starts from 1 ) - * filter is applied to getText value of the (row, col) - */ - await browser.wait(until.visibilityOf($(`tr[data-index="${row}"] td:nth-child(${col})`))); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - const text = await $(`tr[data-index="${row}"] td:nth-child(${col})`).getText(); - const filtered = filter(text); - return filtered; -}; - -export const podNameFilter = (name) => name.split('\n')[2]; -// Works for status and readiness -export const statusFilter = (stat) => stat.split('\n')[0]; - -export const sendKeys = async (element, keys: string) => { - await browser.wait(until.visibilityOf(element)); - await element.clear(); - await element.sendKeys(keys); - await browser.sleep(200); -}; - -export const verifyNodeLabels = (nodeName: string, label: string): boolean => { - const node = JSON.parse(execSync(`oc get node ${nodeName} -o json`).toString('utf-8')); - const labels = Object.keys(node.metadata.labels); - if (labels.includes(label)) return true; - return false; -}; - -export const isPodPresent = (pods, podName) => { - const podObj = pods.items.find((pod) => getPodName(pod) === podName); - return podObj || ''; -}; - -export const refreshIfNotVisible = async (element, maxTimes = 1) => { - let isVisible = await element.isPresent(); - let count = 0; - while (count < maxTimes) { - if (!isVisible) { - /* eslint-disable no-await-in-loop */ - await browser.refresh(); - await browser.sleep(5 * SECOND); - isVisible = await element.isPresent(); - } - count += 1; - } -}; - -export const getIds = (nodes: NodeType[], type: string): number[] => - nodes.filter((node) => node.type === type).map((node) => node.id); - -export const getNewOSDIds = (nodes: NodeType[], osds: number[]): number[] => - nodes.filter((node) => node.type === OSD && osds.indexOf(node.id) === -1).map((node) => node.id); - -// created dictionary for faster acess O(1) -export const createOSDTreeMap = (nodes: NodeType[]): FormattedOsdTreeType => { - const tree = {}; - nodes.forEach((node) => { - tree[node.id] = node; - }); - return tree; -}; - -export const verifyZoneOSDMapping = ( - zones: number[], - osds: number[], - osdtree: FormattedOsdTreeType, -): boolean => { - let filteredOsds = [...osds]; - zones.forEach((zone) => { - const hostId = osdtree[zone].children[0]; - const len = osdtree[hostId].children.length; - filteredOsds = filteredOsds.filter((osd) => osd !== osdtree[hostId].children[len - 1]); - }); - - return filteredOsds.length === 0; -}; - -export const verifyNodeOSDMapping = ( - nodes: number[], - osds: number[], - osdtree: FormattedOsdTreeType, -): boolean => { - let filteredOsds = [...osds]; - nodes.forEach((node) => { - const len = osdtree[node].children.length; - filteredOsds = filteredOsds.filter((osd) => osd !== osdtree[node].children[len - 1]); - }); - - return filteredOsds.length === 0; -}; - -export const hasNoTaints = (node) => _.isEmpty(node.spec?.taints); - -export const hasOCSTaint = (node) => { - const taints = node?.spec?.taints || []; - return taints.some((taint) => _.isEqual(taint, ocsTaint)); -}; - -export const createObjectFromJson = (objectJson) => - execSync(`echo '${JSON.stringify(objectJson)}' | kubectl create -f -`); - -export type NodeType = { - id: number; - name: string; - type: string; - type_id: number; - children: number[]; - pool_weights?: {}; - device_class?: string; - crush_weight?: number; - depth?: number; - exists?: number; - status?: string; - reweight?: number; - primary_affinity?: number; -}; - -export type FormattedOsdTreeType = { - [key: string]: NodeType; -}; - -export type PvcType = { - name: string; - namespace: string; - size: string; - sizeUnits: string; - storageClass?: string; - accessMode?: string; -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/add-capacity.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/add-capacity.view.ts deleted file mode 100644 index 8c46501aaf1b..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/add-capacity.view.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { $, ExpectedConditions as until, browser } from 'protractor'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import * as sideNavView from '@console/internal-integration-tests/views/sidenav.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { currentSelectors, VERSION } from './installFlow.view'; -import { CAPACITY_UNIT, OCS_OP } from '../utils/consts'; - -/** - * All generic selectors go into Defaults - * All OCPX.Y selectors that are not compatible with > X.(Y + 1) OCP goes into its own object. - * Everything else in DEFAULTS - */ - -const DEFAULTS = { - ocsOp: $(`a[data-test-operator-row='${OCS_OP}']`), - actionForLabel: (label: string) => $(`button[data-test-action='${label}']`), - confirmButton: $('#confirm-action'), - storageClusterRow: (uid: string) => $(`tr[data-id='${uid}']`), - getSCOption: (scName: string) => $(`a[id='${scName}-link']`), - capacityValueInput: $('input.ceph-add-capacity__input'), - totalRequestedcapacity: $('div.ceph-add-capacity__input--info-text strong'), - scDropdown: $('button[id="ceph-sc-dropdown"]'), - storageClusterNav: $('a[data-test-id="horizontal-link-Storage Cluster"]'), - getProgressingStateEl: (statusCol) => statusCol.$('span.co-icon-and-text'), - getReadyStateEl: (statusCol) => statusCol.$('span.co-icon-and-text span.co-icon-and-text span'), -}; - -// TODO: NEHA, add support for other versions -export const currentACSelector = (() => { - switch (VERSION) { - default: - return DEFAULTS; - } -})(); - -export const verifyFields = async (size: number) => { - await browser.wait(until.presenceOf(currentACSelector.capacityValueInput)); - await browser.wait(until.presenceOf(currentACSelector.totalRequestedcapacity)); - expect(currentACSelector.capacityValueInput.getAttribute('value')).toBe(String(size)); - expect(currentACSelector.totalRequestedcapacity.getText()).toEqual( - `${(size * 3).toFixed(2)} ${CAPACITY_UNIT}`, - ); -}; - -export const clickKebabAction = async (uid: string, actionLabel: string) => { - await browser.wait(until.presenceOf(currentACSelector.storageClusterRow(uid))); - const kebabMenu = currentACSelector - .storageClusterRow(uid) - .$('button[data-test-id="kebab-button"]'); - await click(kebabMenu); - await browser.wait(until.presenceOf(currentACSelector.actionForLabel(actionLabel))); - await click(currentACSelector.actionForLabel(actionLabel)); -}; - -export const goToInstalledOperators = async () => { - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await sideNavView.clickNavLink(['Operators', 'Installed Operators']); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await click(currentSelectors.namespaceDropdown); - await click(currentSelectors.openshiftStorageItem); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export const selectSCDropdown = async (uid: string) => { - await goToInstalledOperators(); - await click(currentACSelector.ocsOp); - await browser.wait(until.presenceOf(currentACSelector.storageClusterNav)); - await click(currentACSelector.storageClusterNav); - await clickKebabAction(uid, 'Add Capacity'); - await click(currentACSelector.scDropdown); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/installFlow.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/installFlow.view.ts deleted file mode 100644 index 7b781fe6d87d..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/installFlow.view.ts +++ /dev/null @@ -1,284 +0,0 @@ -import * as path from 'path'; -import { $, ExpectedConditions as until, browser, $$ } from 'protractor'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import * as sideNavView from '@console/internal-integration-tests/views/sidenav.view'; -import { click, getOperatorHubCardIndex } from '@console/shared/src/test-utils/utils'; -import { OCS_OP, SECOND, OCS_OPERATOR_NAME, SUCCESS, READY_FOR_USE } from '../utils/consts'; -import { waitFor, refreshIfNotVisible, waitUntil } from '../utils/helpers'; - -enum Version { - OCP_44 = 'OCP_4.4', - OCP_45 = 'OCP_4.5', - LATEST = 'LATEST', -} - -export enum Platform { - OCP = 'OCP', - OCS = 'OCS', -} - -export enum Mode { - CONVERGED = 'CONVERGED', - EXTERNAL = 'EXTERNAL', - ATTACHED_DEVICES = 'ATTACHED_DEVICES', -} - -/** - * Env vars affect what selectors are activated and what tests are run - */ -export const LIVE = process.env.OCS_LIVE; -export const VERSION = process.env.OCP_VERSION || Version.LATEST; -export const TEST_PLATFORM = process.env.TEST_PLATFORM || Platform.OCP; -export const MODE = process.env.MODE || Mode.CONVERGED; - -/** - * All generic selectors go into Defaults - * All OCPX.Y selectors that are not compatible with > X.(Y + 1) OCP goes into its own object. - * Everything else in DEFAULTS - */ - -const DEFAULTS = { - primaryButton: $('.pf-m-primary'), - - // Operator Hub & Installed Operators - ocsOperator: $('a[data-test-operator-row="OpenShift Container Storage"]'), - ocsOperatorStatus: $('.co-clusterserviceversion-row__status'), - createLink: $('.pf-c-card__footer a'), - searchInputOperatorHub: $('input[placeholder="Filter by keyword..."]'), - searchInputOperators: $('[data-test-id="list-page-search-input"]'), - ocsOperatorInstallHeading: $('.co-clusterserviceversion-install__heading'), - - // Subscription Page - dropdownForNamespace: $('#dropdown-selectbox'), - customNamespaceRadio: $('input[value="OwnNamespace"]'), - selectNamespace: (namespace: string) => $(`#${namespace}-Project-link`), - - // Create storage cluster page - selectAllBtn: $('[data-key="0"] input'), - - CATALOG_SRC: LIVE !== '1' ? 'redhat-operators' : 'ocs-catalogsource', - OCS_NAME: 'ocs-operator', - ocsLink: (elem, catalogSource) => - $(`a[data-test="${elem}-${catalogSource}-openshift-marketplace"]`), - - // General Items - namespaceDropdown: $('.co-namespace-selector button'), - openshiftStorageItem: $('#openshift-storage-link'), - - // Size Dropdown - sizeDropdown: $('button[id="ocs-service-capacity-dropdown"]'), - optionSmallSize: $('button[id="512Gi-link"]'), - - // Namespace - label: `openshift.io/cluster-monitoring=true`, - - nodeListHandler: async () => { - // Node list fluctautes - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await browser.wait(until.visibilityOf($('[aria-label="Node Table"] tbody tr'))); - const rowCount = async () => $$('[aria-label="Node Table"] tbody tr').count(); - await waitUntil(rowCount, 3, 5); - }, - - getStorageClusterLink: async () => { - const index = await getOperatorHubCardIndex('Storage Cluster'); - return $(`article:nth-child(${index + 1}) a`); - }, - - // Node names in the Node List Table - nodeNames: $$('tbody [data-key="1"]'), - // Node locations in the Node List Table - nodeLocations: $$('tbody [data-key="3"'), - - // Select Installation Mode - independentModeButton: $('input[value="External"]'), - - // Select Attached Devices Mode - attachedDevicesMode: $('input[value="Internal - Attached Devices"]'), - - // attached devices - LSOAlert: $('.pf-c-alert__title'), - LSOWizard: $('.ceph-create-sc-wizard'), - scDropdown: $('#ceph-sc-dropdown'), - selectSC: (sc: string) => $(`#${sc}-link`), - createNewSCBtn: $('.ceph-ocs-install__create-new-sc-btn'), - currentStep: $('.ceph-create-sc-wizard .pf-m-current'), - volumeSetName: $('#create-lvs-volume-set-name'), - confirmModal: $('.pf-c-modal-box__title'), - localVolumeSetView: $('.ceph-ocs-install__form-wrapper'), - createStorageClusterView: $('.co-m-pane__form'), - confirmBtn: $('.pf-c-modal-box__footer .pf-m-primary'), - nodeList: $('.ceph-node-list__max-height'), - errorAlert: $('.pf-m-danger'), - nodesCntOnLVS: $('.ceph-ocs-install__stats div:first-child'), - nodeNamesForAD: $$('tbody [data-key="0"]'), - - fileUploadButton: $('#inputButton'), -}; - -const OCP_44 = { - ocsOperator: $('a[data-test-operator-row="OpenShift Container Storage"]'), - ocsOperatorStatus: $('.co-clusterserviceversion-row__status'), - createLink: $('.pf-c-card__footer a'), - searchInputOperators: $('input[placeholder="Filter by name..."]'), -}; - -const OCP_45 = { - independentModeButton: $('input[name="independent-mode"]'), -}; - -export const currentSelectors = (() => { - switch (VERSION) { - case Version.OCP_44: - return Object.assign(DEFAULTS, OCP_44); - case Version.OCP_45: - return Object.assign(DEFAULTS, OCP_45); - default: - return DEFAULTS; - } -})(); - -// Navigation -export const goToInstalledOperators = async () => { - await sideNavView.clickNavLink(['Operators', 'Installed Operators']); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export const goToOperatorHub = async () => { - await sideNavView.clickNavLink(['Operators', 'OperatorHub']); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export const searchInOperatorHub = async (searchParam, catalogSource) => { - await browser.wait(until.visibilityOf(currentSelectors.searchInputOperatorHub)); - await currentSelectors.searchInputOperatorHub.sendKeys(searchParam); - const ocs = await currentSelectors.ocsLink(currentSelectors.OCS_NAME, catalogSource); - await browser.wait(until.visibilityOf(ocs)); - return ocs; -}; - -export const goToWorkLoads = async () => { - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await sideNavView.clickNavLink(['Workloads', 'Pods']); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await browser.wait(until.visibilityOf(currentSelectors.namespaceDropdown)); - await $('.co-namespace-selector button').click(); - await browser.wait(until.elementToBeClickable(currentSelectors.openshiftStorageItem)); - await $('#openshift-storage-link').click(); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -// Operators page -export const selectWorkerRows = async () => { - const isAllSeleted = await currentSelectors.selectAllBtn.isSelected(); - if (isAllSeleted === false) await click(currentSelectors.selectAllBtn); - const nodeNames = await currentSelectors.nodeNames; - const nodesLocations = await currentSelectors.nodeLocations; - const selectedNodes = nodeNames.map((nodeName) => nodeName.getText()); - const workersAZ = nodesLocations.map((nodeName) => nodeName.getText()); - return { selectedNodes, workersAZ }; -}; - -export const filterInput = $('[placeholder="Filter by name..."]'); -export const goToStorageClasses = async () => { - await sideNavView.clickNavLink(['Storage', 'Storage Classes']); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export class InstallCluster { - async subscribeToOperator(catalogSource = currentSelectors.CATALOG_SRC) { - await goToOperatorHub(); - const ocsOp = await searchInOperatorHub(OCS_OP, catalogSource); - await click(ocsOp); - await browser.sleep(2 * SECOND); - await click(currentSelectors.primaryButton); - await browser.refresh(); - await browser.wait(until.and(crudView.untilNoLoadersPresent), 100 * SECOND); - await click(currentSelectors.primaryButton); - } - - async checkOCSOperatorInstallation() { - if (VERSION === 'LATEST') { - await this.checkOCSOperatorInstallationCommon(); - } else { - await browser.wait(until.visibilityOf(currentSelectors.searchInputOperators)); - await currentSelectors.searchInputOperators.sendKeys(OCS_OPERATOR_NAME); - // Sometimes operator changes few times its status so we will wait for - // for 5 Succeeded status in row to be sure we have operator is - // installed properly. - await waitFor(currentSelectors.ocsOperatorStatus, SUCCESS, 5); - const text = await currentSelectors.ocsOperatorStatus.getText(); - // Operator is installed successfully - expect(text.includes(SUCCESS)).toBe(true); - } - } - - async checkOCSOperatorInstallationCommon() { - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await browser.wait(until.presenceOf(currentSelectors.ocsOperatorInstallHeading)); - await waitFor(currentSelectors.ocsOperatorInstallHeading, READY_FOR_USE); - const text = await currentSelectors.ocsOperatorInstallHeading.getText(); - // Operator is installed successfully - expect(text.includes(READY_FOR_USE)).toBe(true); - await click(currentSelectors.primaryButton); - } - - async subscribeToLSOOperator() { - await click(currentSelectors.primaryButton); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await click(currentSelectors.primaryButton); - browser.sleep(5 * SECOND); - await browser.refresh(); - await browser.wait(until.and(crudView.untilNoLoadersPresent), 100 * SECOND); - await click(currentSelectors.primaryButton); - await this.checkOCSOperatorInstallationCommon(); - } - - async storageClusterCreationCommon() { - if (VERSION !== 'LATEST') { - await click(currentSelectors.ocsOperator); - } - // In fresh clusters APIs are not shown (Last seen in OCP 4.3) - try { - await browser.wait(until.visibilityOf(currentSelectors.createLink), 10 * SECOND); - } catch { - await refreshIfNotVisible(currentSelectors.createLink, 5); - } - const storageClusterLink = await currentSelectors.getStorageClusterLink(); - await click(storageClusterLink); - } - - async createConvergedStorageCluster() { - await this.storageClusterCreationCommon(); - await currentSelectors.nodeListHandler(); - const { selectedNodes, workersAZ } = await selectWorkerRows(); - await click(currentSelectors.sizeDropdown); - await click(currentSelectors.optionSmallSize); - await click(currentSelectors.primaryButton); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - return { selectedNodes, workersAZ }; - } - - async selectOCSOperator() { - await browser.wait( - until.visibilityOf($('.co-clusterserviceversion-logo__name__clusterserviceversion')), - ); - await click(currentSelectors.ocsOperator); - } - - async createAttachedStorageCluster() { - await this.storageClusterCreationCommon(); - await click(currentSelectors.attachedDevicesMode); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - } - - async createExternalStorageCluster() { - const UPLOAD_FILE_PATH = path.resolve(__dirname, '../mocks/testFile.json'); - await this.storageClusterCreationCommon(); - await click(currentSelectors.independentModeButton); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await currentSelectors.fileUploadButton.sendKeys(UPLOAD_FILE_PATH); - await click(currentSelectors.primaryButton); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - } -} diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/multiple-pool.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/multiple-pool.view.ts deleted file mode 100644 index 1afd18daf02c..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/multiple-pool.view.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { $, ExpectedConditions as until, browser } from 'protractor'; -import * as sideNavView from '@console/internal-integration-tests/views/sidenav.view'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { SECOND } from '../utils/consts'; -import { sendKeys } from '../utils/helpers'; -import { poolData } from '../mocks/storage-pool'; - -export const poolMessage = { - PROGRESS: - 'The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.', - POOL_START: 'Pool "foo" creation in progress', - POOL_TIMEOUT: - 'Pool "foo" creation timed out. Please check if odf operator and rook operator are running', - POOL_DUPLICATED: 'Pool "foo" already exists', - POOL_CREATED: 'Pool "foo" was successfully created', -}; - -export enum POOL_STATUS { - READY = 'Ready', -} - -export const inputProvisioner = $('button[id=storage-class-provisioner]'); -export const provisionerDropdown = $('#storage-class-provisioner'); -export const selectInput = $('[data-test-id=dropdown-text-filter]'); -export const createPoolDropdown = $('button[class=pf-c-dropdown__menu-item]'); -export const poolModal = $('modal-content'); -export const allowExpand = $('div[class=checkbox]'); -export const poolDropdownButton = $('button[id=pool-dropdown-id]'); - -export const cancelButton = $('button[data-test-id=modal-cancel-action]'); -export const createButton = $('button[data-test=confirm-action]'); -export const finishButton = $('button[id=confirm-action]'); -export const replicaDropdown = $('button[id=replica-dropdown]'); -export const poolName = $('input[name=newPoolName]'); -export const replicaSelect = $('button[data-test-id="2"]'); -export const poolForm = $('label[for=pool-name]'); -export const modalPresence = $('div[class=modal-content]'); -export const poolStatusCheck = $('div[class=pf-c-empty-state__body]'); -export const dropdownPoolName = $('div[class=pf-c-dropdown__menu-item-main]'); -export const poolDescription = $('div[class=pf-c-dropdown__menu-item-description]'); - -export const goToStorageClassView = async () => { - await sideNavView.clickNavLink(['Storage', 'Storage Classes']); - await crudView.isLoaded(); -}; - -export const selectItemFromDropdown = async (item: string) => { - await click(provisionerDropdown); - await selectInput.sendKeys(item); - await click($(`a[id="${item}-link"]`)); -}; - -export const prepareStorageClassForm = async (provisioner: string) => { - await goToStorageClassView(); - await click(crudView.createYAMLButton); - await browser.wait( - until.textToBePresentInElement($('.co-m-pane__heading'), 'Create StorageClass'), - ); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await selectItemFromDropdown(provisioner); - await browser.sleep(2 * SECOND); -}; - -export const showProvisioner = async (provisioner: string) => { - await selectItemFromDropdown(provisioner); - await browser.sleep(2 * SECOND); -}; - -export const openPoolDropdown = async () => { - await click(poolDropdownButton); - await browser.sleep(2 * SECOND); -}; - -export const createPool = async () => { - await sendKeys(poolName, poolData.metadata.name); - await click(replicaDropdown); - await click(replicaSelect); - await click(createButton); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/noobaa-sso.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/noobaa-sso.view.ts deleted file mode 100644 index eb99e43deb80..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/noobaa-sso.view.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { $, element, by, $$ } from 'protractor'; - -export const objectServiceLink = element(by.cssContainingText('a', 'Object Service')); -export const overviewLink = element(by.cssContainingText('a', 'Overview')); -export const noobaaExternalLink = $('[data-test-id="system-name-mcg"]'); -export const noobaaAddStorageResource = $$('button.btn.overview-btn').get(0); -export const noobaaAddStorageResourceModal = $( - '.modal.column.pf-u-text-align-left.pop-centered.card-shadow.modal-small.add-resources-modal', -); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/ocp-dashboard-card-healthcheck.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/ocp-dashboard-card-healthcheck.view.ts deleted file mode 100644 index 17c9f6e85ea3..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/ocp-dashboard-card-healthcheck.view.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { execSync } from 'child_process'; -import { $ } from 'protractor'; -import { getPodData } from '../utils/helpers'; -import { OCP_HEALTH_ICON_COLORS } from '../utils/consts'; - -export const mainHealthCardStatus = $('.co-status-card__health-body'); -export const smallDivInside = mainHealthCardStatus.$$('.co-dashboard-icon').get(3); -export const mainHealtGreenSvg = smallDivInside.$(`svg[fill="${OCP_HEALTH_ICON_COLORS.GREEN46}"]`); - -export const mainHealtYellowSvg = smallDivInside.$(`svg[fill="${OCP_HEALTH_ICON_COLORS.YELLOW}"]`); -export const noOutChange = (setNoOut: string) => { - const podsList = JSON.parse( - execSync('kubectl get po -n openshift-storage -o json').toString('utf-8'), - ); - const pods = podsList.items; - const opPod = getPodData(pods, 'ceph-operator'); - const opPodName = opPod.metadata.name; - execSync(`oc -n openshift-storage rsh ${opPodName} \ - ceph --conf=/var/lib/rook/openshift-storage/openshift-storage.config \ - osd ${setNoOut} noout`); -}; - -export const mainHealtRedSvg = smallDivInside.$(`svg[fill="${OCP_HEALTH_ICON_COLORS.RED}"]`); diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/pvc.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/pvc.view.ts deleted file mode 100644 index f548fd34380a..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/pvc.view.ts +++ /dev/null @@ -1,85 +0,0 @@ -import { $, $$, browser, ExpectedConditions as until } from 'protractor'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import * as sideNavView from '@console/internal-integration-tests/views/sidenav.view'; -import { click } from '@console/shared/src/test-utils/utils'; -import { PVC_STATUS, SECOND } from '../utils/consts'; -import { PvcType } from '../utils/helpers'; - -export const selectItemFromDropdown = async (item, dropdownElement) => { - await click(dropdownElement); - await click($(`#${item}-link`)); -}; - -// create pvc -export const namespaceDropdown = $$('[class="pf-c-dropdown__toggle pf-m-plain"]').get(0); -export const storageclassDropdown = $('#storageclass-dropdown'); -export const inputPVCName = $('#pvc-name'); -export const selectAccessMode = (accessMode) => $(`input[value=${accessMode}]`); -export const inputPVCSize = $('[name=requestSizeValue]'); -export const sizeUnitsDropdown = $('[data-test-id=dropdown-button]'); - -// expand pvc -export const expandButton = $('#confirm-action'); -export const capacityUnitDropdown = $('.modal-body [data-test-id="dropdown-button"]'); -export const expandSizeOption = (size: string) => $(`[data-test-dropdown-menu="${size}"]`); - -// pvc details -export const pvcName = $('[data-test-id=pvc-name]'); -export const pvcStatus = $('[data-test-id=pvc-status]'); -export const pvcSize = $('[data-test-id=pvc-capacity]'); -export const pvcAccessMode = $('[data-test-id=pvc-access-mode]'); -export const pvcVolumeMode = $('[data-test-id=pvc-volume-mode]'); -export const pvcStorageClass = $('[data-test-id=pvc-storageclass]'); -export const pvcPersistentVolume = $('[data-test-id=persistent-volume]'); -export const actionsButton = $('[data-test-id=actions-menu-button]'); -export const deletePvc = $('[data-test-action="Delete PersistentVolumeClaim"]'); - -// list of PVCs -export const nameInTable = (name) => $(`a[data-test-id=${name}]`); - -export const goToPersistentVolumeClaims = async () => { - await sideNavView.clickNavLink(['Storage', 'Persistent Volume Claims']); - await crudView.isLoaded(); -}; - -export const createNewPersistentVolumeClaim = async ( - pvc: PvcType, - waitForBinding: boolean, - performBeforeWaiting?: Function, -) => { - await goToPersistentVolumeClaims(); - await selectItemFromDropdown(pvc.namespace, namespaceDropdown); - await click(crudView.createYAMLButton); - await browser.wait( - until.textToBePresentInElement($('.co-m-pane__heading'), 'Create PersistentVolumeClaim'), - ); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - // Selects default sc if not provided - if (pvc.storageClass) { - await selectItemFromDropdown(pvc.storageClass, storageclassDropdown); - } - await inputPVCName.sendKeys(pvc.name); - // Selects RWO by default - if (pvc.accessMode) { - await click(selectAccessMode(pvc.accessMode)); - } - await inputPVCSize.sendKeys(pvc.size); - // Units should be Mi, Gi or Ti - await selectItemFromDropdown(pvc.sizeUnits, sizeUnitsDropdown); - await click(crudView.saveChangesBtn); - if (performBeforeWaiting) { - await performBeforeWaiting(); - } - if (waitForBinding === true) - await browser.wait(until.textToBePresentInElement(pvcStatus, PVC_STATUS.BOUND)); -}; - -export const deletePersistentVolumeClaim = async (name: string, namespace: string) => { - await goToPersistentVolumeClaims(); - await selectItemFromDropdown(namespace, namespaceDropdown); - await crudView.resourceRowsPresent(); - await crudView.filterForName(name); - await crudView.isLoaded(); - await crudView.deleteRow('PersistentVolumeClaim')(name); - await browser.sleep(2 * SECOND); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/storage-dashboard.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/storage-dashboard.view.ts deleted file mode 100644 index 696c740d56ff..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/storage-dashboard.view.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { $, $$, browser } from 'protractor'; -import { appHost } from '@console/internal-integration-tests/protractor.conf'; -import { isLoaded } from '@console/shared/src/test-views/dashboard-shared.view'; - -// export const clusterHealth = $('[class="co-health-card__text"]'); -export const clusterHealth = $('div.co-status-card__health-item div svg'); -export const detailsCardStructure = $$('.co-details-card__body dt'); -const clusterDetails = $$('.co-details-card__body dd'); -export const serviceName = clusterDetails.get(0); -export const clusterName = clusterDetails.get(1); -export const provider = clusterDetails.get(2); -export const ocsVersion = clusterDetails.get(3); -const clusterInventory = $$('[class="co-inventory-card__item-title"]'); -export const allNodes = clusterInventory.get(0); -export const allPvcs = clusterInventory.get(1); -export const allPvs = clusterInventory.get(2); - -export const goToStorageDashboard = async () => { - await browser.get(`${appHost}/dashboards/persistent-storage`); - await isLoaded(); -}; diff --git a/frontend/packages/ceph-storage-plugin/integration-tests/views/upgrade.view.ts b/frontend/packages/ceph-storage-plugin/integration-tests/views/upgrade.view.ts deleted file mode 100644 index 59ab8a1ef8b3..000000000000 --- a/frontend/packages/ceph-storage-plugin/integration-tests/views/upgrade.view.ts +++ /dev/null @@ -1,101 +0,0 @@ -import { execSync } from 'child_process'; -import { $, by, element, ExpectedConditions as until, browser } from 'protractor'; -import { click } from '@console/shared/src/test-utils/utils'; -import * as crudView from '@console/internal-integration-tests/views/crud.view'; -import { goToInstalledOperators, currentSelectors } from './installFlow.view'; -import { goToPersistentVolumeClaims, selectItemFromDropdown } from './pvc.view'; -import { MINUTE, NS } from '../utils/consts'; - -export const image43Command = - 'oc get -n openshift-marketplace catalogSource ocs-catalogsource -o json|sed \'s/ocs-olm-operator:.*"/ocs-olm-operator:latest-stable-4.3.0"/g\'|oc apply -f -'; -export const image44Command = - 'oc get -n openshift-marketplace catalogSource ocs-catalogsource -o json|sed \'s/ocs-olm-operator:.*"/ocs-olm-operator:latest-stable-4.4.0"/g\'|oc apply -f -'; -export const image45Command = - 'oc get -n openshift-marketplace catalogSource ocs-catalogsource -o json|sed \'s/ocs-olm-operator:.*"/ocs-olm-operator:latest-stable-4.5.0"/g\'|oc apply -f -'; - -// Subscription tab -export const channelChangeButton = element(by.cssContainingText('button', 'stable-')); -export const channel42 = $('input[value="stable-4\\.2"]'); -export const channel43 = $('input[value="stable-4\\.3"]'); -export const channel44 = $('input[value="stable-4\\.4"]'); -export const channel45 = $('input[value="stable-4\\.5"]'); -const saveChange = $('#confirm-action'); -export const installedVersion = $('a[title*=ocs-operator]'); - -// OCS Operator view -export const subscription = element(by.partialLinkText('Subscription')); -export const storageCluster = element(by.partialLinkText('Storage Cluster')); -export const namespaceDropdown = $('[data-test-id="namespace-bar-dropdown"] div div button'); - -export const goToOCSOperator = async () => { - await goToInstalledOperators(); - await selectItemFromDropdown(NS, namespaceDropdown); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); - await click(currentSelectors.ocsOperator); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export const goToOCSSubscription = async () => { - await goToOCSOperator(); - await click(subscription); - await browser.wait(until.and(crudView.untilNoLoadersPresent)); -}; - -export const isStorageClusterReady = async () => { - const storageClusterInfo = await JSON.parse( - execSync(`kubectl get storagecluster ocs-storagecluster -o json -n ${NS} `).toString(), - ); - const storageClusterState = storageClusterInfo.status.phase; - return storageClusterState === 'Ready'; -}; - -export const storageClusterVersion = async () => { - const storageClusterInfo = await JSON.parse( - execSync(`kubectl get storagecluster ocs-storagecluster -o json -n ${NS} `).toString(), - ); - return storageClusterInfo.spec.version; -}; - -export const operatorVersion = async () => { - await goToPersistentVolumeClaims(); - await goToOCSSubscription(); - return installedVersion.getText(); -}; - -export const waitUntilStorageClusterReady = async () => { - await browser.wait( - isStorageClusterReady(), - 20 * MINUTE, - 'Storage Cluster should reach Ready state in 20 minutes', - ); -}; - -export const changeCatalogSourceImage = async (newValue: string) => { - const newImageCommand = `oc get -n openshift-marketplace catalogSource ocs-catalogsource -o json|sed 's/ocs-olm-operator:.*\\"/ocs-olm-operator:${newValue}\\"/g'|oc apply -f -`; - await execSync(newImageCommand); - await browser.sleep(5 * MINUTE); - await waitUntilStorageClusterReady(); -}; - -export const changeChannel = async (channel) => { - await goToOCSSubscription(); - // refresh until channel change button becomes clickable - https://bugzilla.redhat.com/show_bug.cgi?id=1822553 - let clickSuccessful = false; - while (!clickSuccessful) { - try { - // eslint-disable-next-line no-await-in-loop - await click(channelChangeButton); - // eslint-disable-next-line no-await-in-loop - await click(channel); - clickSuccessful = true; - } catch (err) { - browser.refresh(); - } - } - await click(saveChange); - await browser.sleep(20 * MINUTE); - await waitUntilStorageClusterReady(); - // subscription page shows 404 error by this time, so we need to navigate to some other page and then back - await goToPersistentVolumeClaims(); - await goToOCSSubscription(); -}; diff --git a/frontend/packages/ceph-storage-plugin/locales/OWNERS b/frontend/packages/ceph-storage-plugin/locales/OWNERS deleted file mode 100644 index a94c4a4a8a5d..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -labels: - - kind/i18n diff --git a/frontend/packages/ceph-storage-plugin/locales/en/ceph-storage-plugin.json b/frontend/packages/ceph-storage-plugin/locales/en/ceph-storage-plugin.json deleted file mode 100644 index 8fa75a4c13a8..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/en/ceph-storage-plugin.json +++ /dev/null @@ -1,758 +0,0 @@ -{ - "Add Capacity": "Add Capacity", - "Edit BlockPool": "Edit BlockPool", - "Edit Bucket Class Resources": "Edit Bucket Class Resources", - "ObjectBucketClaim": "ObjectBucketClaim", - "Use existing claim": "Use existing claim", - "Select claim": "Select claim", - "Create new claim": "Create new claim", - "Create": "Create", - "Cancel": "Cancel", - "Overview": "Overview", - "StorageSystems": "StorageSystems", - "StorageSystem details": "StorageSystem details", - "Enabled": "Enabled", - "Disabled": "Disabled", - "Last synced": "Last synced", - "Default pool cannot be deleted": "Default pool cannot be deleted", - "BlockPool List": "BlockPool List", - "Delete BlockPool": "Delete BlockPool", - "{{replica}} Replication": "{{replica}} Replication", - "Pool name": "Pool name", - "my-block-pool": "my-block-pool", - "pool-name-help": "pool-name-help", - "Data protection policy": "Data protection policy", - "Select replication": "Select replication", - "Volume type": "Volume type", - "Select volume type": "Select volume type", - "Compression": "Compression", - "Enable compression": "Enable compression", - "Enabling compression may result in little or no space savings for encrypted or random data. Also, enabling compression may have an impact on I/O performance.": "Enabling compression may result in little or no space savings for encrypted or random data. Also, enabling compression may have an impact on I/O performance.", - "OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.": "OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.", - "Create BlockPool": "Create BlockPool", - "Close": "Close", - "Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.": "Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.", - "A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.": "A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.", - "BlockPool Creation Form": "BlockPool Creation Form", - "Name": "Name", - "Bucket Name": "Bucket Name", - "Type": "Type", - "Region": "Region", - "BackingStore Table": "BackingStore Table", - "Each BackingStore can be used for one tier at a time. Selecting a BackingStore in one tier will remove the resource from the second tier option and vice versa.": "Each BackingStore can be used for one tier at a time. Selecting a BackingStore in one tier will remove the resource from the second tier option and vice versa.", - "Bucket created for OpenShift Data Foundation's Service": "Bucket created for OpenShift Data Foundation's Service", - "Tier 1 - BackingStores": "Tier 1 - BackingStores", - "Create BackingStore ": "Create BackingStore ", - "Tier-1-Table": "Tier-1-Table", - "{{bs, number}} BackingStore_one": "{{bs, number}} BackingStore", - "{{bs, number}} BackingStore_other": "{{bs, number}} BackingStores", - "selected": "selected", - "Tier 2 - BackingStores": "Tier 2 - BackingStores", - "Tier-2-Table": "Tier-2-Table", - "General": "General", - "Placement Policy": "Placement Policy", - "Resources": "Resources", - "Review": "Review", - "Create BucketClass": "Create BucketClass", - "Create new BucketClass": "Create new BucketClass", - "BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.": "BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.", - "Next": "Next", - "Back": "Back", - "Edit BucketClass Resource": "Edit BucketClass Resource", - "{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.", - "Cancel ": "Cancel ", - "Save": "Save", - "What is a BackingStore?": "What is a BackingStore?", - "BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.", - "Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.": "Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.", - "Learn More": "Learn More", - "What is a BucketClass?": "What is a BucketClass?", - "A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching": "A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching", - "BucketClass type": "BucketClass type", - "3-63 chars": "3-63 chars", - "Starts and ends with lowercase number or letter": "Starts and ends with lowercase number or letter", - "Only lowercase letters, numbers, non-consecutive periods or hyphens": "Only lowercase letters, numbers, non-consecutive periods or hyphens", - "Avoid using the form of an IP address": "Avoid using the form of an IP address", - "Globally unique name": "Globally unique name", - "BucketClass name": "BucketClass name", - "A unique name for the bucket class within the project.": "A unique name for the bucket class within the project.", - "my-multi-cloud-mirror": "my-multi-cloud-mirror", - "BucketClass Name": "BucketClass Name", - "Description (Optional)": "Description (Optional)", - "Description of bucket class": "Description of bucket class", - "What is a Namespace Policy?": "What is a Namespace Policy?", - "Namespace policy can be set to one single read and write source, multi read sources or cached policy.": "Namespace policy can be set to one single read and write source, multi read sources or cached policy.", - "Namespace Policy Type": "Namespace Policy Type", - "What is Caching?": "What is Caching?", - "Caching is a policy that creates local copies of the data. It saves the copies locally to improve performance for frequently accessed data. Each cached copy has a TTL and is verified against the hub. Each non-read operation (upload, overwrite, delete) is performed on the hub": "Caching is a policy that creates local copies of the data. It saves the copies locally to improve performance for frequently accessed data. Each cached copy has a TTL and is verified against the hub. Each non-read operation (upload, overwrite, delete) is performed on the hub", - "Hub namespace store ": "Hub namespace store ", - "A single NamespaceStore that defines the read and write target of the namespace bucket.": "A single NamespaceStore that defines the read and write target of the namespace bucket.", - "NamespaceStore": "NamespaceStore", - "Cache data settings": "Cache data settings", - "The data will be temporarily copied on a backing store in order to later access it much more quickly.": "The data will be temporarily copied on a backing store in order to later access it much more quickly.", - "Backing store": "Backing store", - "a local backing store is recommended for better performance": "a local backing store is recommended for better performance", - "Time to live": "Time to live", - "Time to live is the time that an object is stored in a caching system before it is deleted or refreshed. Default: 0, Max: 24 hrs": "Time to live is the time that an object is stored in a caching system before it is deleted or refreshed. Default: 0, Max: 24 hrs", - "Read NamespaceStores": "Read NamespaceStores", - "Select a list of NamespaceStores that defines the read targets of the namespace bucket.": "Select a list of NamespaceStores that defines the read targets of the namespace bucket.", - "Create NamespaceStore": "Create NamespaceStore", - "{{nns, number}} namespace store_one": "{{nns, number}} namespace store", - "{{nns, number}} namespace store_other": "{{nns, number}} namespace stores", - " selected": " selected", - "Write NamespaceStore": "Write NamespaceStore", - "Select a single NamespaceStore that defines the write targets of the namespace bucket.": "Select a single NamespaceStore that defines the write targets of the namespace bucket.", - "Read and Write NamespaceStore ": "Read and Write NamespaceStore ", - "Select one NamespaceStore which defines the read and write targets of the namespace bucket.": "Select one NamespaceStore which defines the read and write targets of the namespace bucket.", - "What is a Placement Policy?": "What is a Placement Policy?", - "Data placement capabilities are built as a multi-layer structure here are the layers bottom-up:": "Data placement capabilities are built as a multi-layer structure here are the layers bottom-up:", - "Spread Tier - list of BackingStores aggregates the storage of multiple stores.": "Spread Tier - list of BackingStores aggregates the storage of multiple stores.", - "Mirroring Tier - list of spread-layers async-mirroring to all mirrors with locality optimization (will allocate on the closest region to the source endpoint). Mirroring requires at least two BackingStores.": "Mirroring Tier - list of spread-layers async-mirroring to all mirrors with locality optimization (will allocate on the closest region to the source endpoint). Mirroring requires at least two BackingStores.", - "The number of replicas can be configured via the NooBaa management console.": "The number of replicas can be configured via the NooBaa management console.", - "Tier 1 - Policy Type": "Tier 1 - Policy Type", - "Spread": "Spread", - "Spreading the data across the chosen resources. By default a replica of one copy is used and does not include failure tolerance in case of resource failure.": "Spreading the data across the chosen resources. By default a replica of one copy is used and does not include failure tolerance in case of resource failure.", - "Mirror": "Mirror", - "Full duplication of the data in each chosen resource. By default a replica of one copy per location is used. Includes failure tolerance in case of resource failure.": "Full duplication of the data in each chosen resource. By default a replica of one copy per location is used. Includes failure tolerance in case of resource failure.", - "Add Tier": "Add Tier", - "Tier 2 - Policy type": "Tier 2 - Policy type", - "Remove Tier": "Remove Tier", - "Spreading the data across the chosen resources does not include failure tolerance in case of resource failure.": "Spreading the data across the chosen resources does not include failure tolerance in case of resource failure.", - "Full duplication of the data in each chosen resource includes failure tolerance in cause of resource failure.": "Full duplication of the data in each chosen resource includes failure tolerance in cause of resource failure.", - "Namespace Policy: ": "Namespace Policy: ", - "Read and write NamespaceStore : ": "Read and write NamespaceStore : ", - "Hub namespace store: ": "Hub namespace store: ", - "Cache backing store: ": "Cache backing store: ", - "Time to live: ": "Time to live: ", - "Resources ": "Resources ", - "Selected read namespace stores: ": "Selected read namespace stores: ", - "Selected write namespace store: ": "Selected write namespace store: ", - "Placement policy details ": "Placement policy details ", - "Tier 1: ": "Tier 1: ", - "Selected BackingStores": "Selected BackingStores", - "Tier 2: ": "Tier 2: ", - "Review BucketClass": "Review BucketClass", - "BucketClass type: ": "BucketClass type: ", - "BucketClass name: ": "BucketClass name: ", - "Description: ": "Description: ", - "Provider {{provider}}": "Provider {{provider}}", - "Create new BackingStore ": "Create new BackingStore ", - "An error has occured while fetching backing stores": "An error has occured while fetching backing stores", - "Select a backing store": "Select a backing store", - "Storage targets that are used to store chunks of data on Multicloud Object Gateway buckets.": "Storage targets that are used to store chunks of data on Multicloud Object Gateway buckets.", - "A BackingStore represents a storage target to be used as the underlying storage layer in Multicloud Object Gateway buckets.": "A BackingStore represents a storage target to be used as the underlying storage layer in Multicloud Object Gateway buckets.", - "Multiple types of BackingStores are supported: AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC.": "Multiple types of BackingStores are supported: AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC.", - "BackingStore Name": "BackingStore Name", - "A unique name for the BackingStore within the project": "A unique name for the BackingStore within the project", - "Name can contain a max of 43 characters": "Name can contain a max of 43 characters", - "Provider": "Provider", - "Create BackingStore": "Create BackingStore", - "This is an Advanced subscription feature. It requires Advanced Edition subscription. Please contact the account team for more information.": "This is an Advanced subscription feature. It requires Advanced Edition subscription. Please contact the account team for more information.", - "Advanced Subscription": "Advanced Subscription", - "Storage platform": "Storage platform", - "Select a storage platform you wish to connect": "Select a storage platform you wish to connect", - "Select external system from list": "Select external system from list", - "Backing storage type": "Backing storage type", - "Use an existing StorageClass": "Use an existing StorageClass", - "OpenShift Data Foundation will use an existing StorageClass available on your hosting platform.": "OpenShift Data Foundation will use an existing StorageClass available on your hosting platform.", - "Create a new StorageClass using local storage devices": "Create a new StorageClass using local storage devices", - "OpenShift Data Foundation will use a StorageClass provided by the Local Storage Operator (LSO) on top of your attached drives. This option is available on any platform with devices attached to nodes.": "OpenShift Data Foundation will use a StorageClass provided by the Local Storage Operator (LSO) on top of your attached drives. This option is available on any platform with devices attached to nodes.", - "Connect an external storage platform": "Connect an external storage platform", - "OpenShift Data Foundation will create a dedicated StorageClass.": "OpenShift Data Foundation will create a dedicated StorageClass.", - "Deploys MultiCloud Object Gateway without block and file services.": "Deploys MultiCloud Object Gateway without block and file services.", - "Deploys OpenShift Data Foundation with block, shared fileSystem and object services.": "Deploys OpenShift Data Foundation with block, shared fileSystem and object services.", - "Deployment type": "Deployment type", - "Taint nodes": "Taint nodes", - "Selected nodes will be dedicated to OpenShift Data Foundation use only": "Selected nodes will be dedicated to OpenShift Data Foundation use only", - "Select capacity": "Select capacity", - "Requested capacity": "Requested capacity", - "Select nodes": "Select nodes", - "Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.": "Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.", - "PersistentVolumes are being provisioned on the selected nodes.": "PersistentVolumes are being provisioned on the selected nodes.", - "Error while loading PersistentVolumes.": "Error while loading PersistentVolumes.", - "Selected capacity": "Selected capacity", - "Available raw capacity": "Available raw capacity", - "The available capacity is based on all attached disks associated with the selected StorageClass <2>{{storageClassName}}": "The available capacity is based on all attached disks associated with the selected StorageClass <2>{{storageClassName}}", - "Selected nodes": "Selected nodes", - "Role": "Role", - "CPU": "CPU", - "Memory": "Memory", - "Zone": "Zone", - "Selected nodes table": "Selected nodes table", - "To support high availability when two data centers can be used, enable arbiter to get a valid quorum between the two data centers.": "To support high availability when two data centers can be used, enable arbiter to get a valid quorum between the two data centers.", - "Arbiter minimum requirements": "Arbiter minimum requirements", - "Stretch Cluster": "Stretch Cluster", - "Enable arbiter": "Enable arbiter", - "Arbiter zone": "Arbiter zone", - "An arbiter node will be automatically selected from this zone": "An arbiter node will be automatically selected from this zone", - "Select an arbiter zone": "Select an arbiter zone", - "Arbiter zone selection": "Arbiter zone selection", - "Connection details": "Connection details", - "Disks on all nodes": "Disks on all nodes", - "{{nodes, number}} node_one": "{{nodes, number}} node", - "{{nodes, number}} node_other": "{{nodes, number}} nodes", - "Please enter a positive Integer": "Please enter a positive Integer", - "LocalVolumeSet name": "LocalVolumeSet name", - "A LocalVolumeSet will be created to allow you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "A LocalVolumeSet will be created to allow you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.", - "StorageClass name": "StorageClass name", - "Filter disks by": "Filter disks by", - "Uses the available disks that match the selected filters on all nodes.": "Uses the available disks that match the selected filters on all nodes.", - "Disks on selected nodes": "Disks on selected nodes", - "Uses the available disks that match the selected filters only on selected nodes.": "Uses the available disks that match the selected filters only on selected nodes.", - "Disk type": "Disk type", - "Advanced": "Advanced", - "Volume mode": "Volume mode", - "Device type": "Device type", - "Select disk types": "Select disk types", - "Disk size": "Disk size", - "Minimum": "Minimum", - "Please enter a value less than or equal to max disk size": "Please enter a value less than or equal to max disk size", - "Maximum": "Maximum", - "Please enter a value greater than or equal to min disk size": "Please enter a value greater than or equal to min disk size", - "Units": "Units", - "Maximum disks limit": "Maximum disks limit", - "Disks limit will set the maximum number of PVs to create on a node. If the field is empty we will create PVs for all available disks on the matching nodes.": "Disks limit will set the maximum number of PVs to create on a node. If the field is empty we will create PVs for all available disks on the matching nodes.", - "All": "All", - "Local Storage Operator not installed": "Local Storage Operator not installed", - "Before we can create a StorageSystem, the Local Storage Operator needs to be installed. When installation is finished come back to OpenShift Data Foundation to create a StorageSystem.<1><0>Install": "Before we can create a StorageSystem, the Local Storage Operator needs to be installed. When installation is finished come back to OpenShift Data Foundation to create a StorageSystem.<1><0>Install", - "Checking Local Storage Operator installation": "Checking Local Storage Operator installation", - "Discovering disks on all hosts. This may take a few minutes.": "Discovering disks on all hosts. This may take a few minutes.", - "Minimum Node Requirement": "Minimum Node Requirement", - "A minimum of 3 nodes are required for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "A minimum of 3 nodes are required for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.", - "After the LocalVolumeSet is created you won't be able to edit it.": "After the LocalVolumeSet is created you won't be able to edit it.", - "Note:": "Note:", - "Create LocalVolumeSet": "Create LocalVolumeSet", - "Yes": "Yes", - "Are you sure you want to continue?": "Are you sure you want to continue?", - "Node": "Node", - "Model": "Model", - "Capacity": "Capacity", - "Selected Disks": "Selected Disks", - "Disk List": "Disk List", - "{{nodes, number}} Node_one": "{{nodes, number}} Node", - "{{nodes, number}} Node_other": "{{nodes, number}} Nodes", - "{{disks, number}} Disk_one": "{{disks, number}} Disk", - "{{disks, number}} Disk_other": "{{disks, number}} Disks", - "Selected versus Available Capacity": "Selected versus Available Capacity", - "Out of {{capacity}}": "Out of {{capacity}}", - "{{displayName}} connection details": "{{displayName}} connection details", - "Not connected": "Not connected", - "Backing storage": "Backing storage", - "Deployment type: {{deployment}}": "Deployment type: {{deployment}}", - "Backing storage type: {{name}}": "Backing storage type: {{name}}", - "External storage platform: {{storagePlatform}}": "External storage platform: {{storagePlatform}}", - "Capacity and nodes": "Capacity and nodes", - "Cluster capacity: {{capacity}}": "Cluster capacity: {{capacity}}", - "Selected nodes: {{nodeCount, number}} node_one": "Selected nodes: {{nodeCount, number}} node", - "Selected nodes: {{nodeCount, number}} node_other": "Selected nodes: {{nodeCount, number}} nodes", - "CPU and memory: {{cpu, number}} CPU and {{memory}} memory": "CPU and memory: {{cpu, number}} CPU and {{memory}} memory", - "Zone: {{zoneCount, number}} zone_one": "Zone: {{zoneCount, number}} zone", - "Zone: {{zoneCount, number}} zone_other": "Zone: {{zoneCount, number}} zones", - "Arbiter zone: {{zone}}": "Arbiter zone: {{zone}}", - "Taint nodes: {{ocsTaintsStatus}}": "Taint nodes: {{ocsTaintsStatus}}", - "Security": "Security", - "Encryption: Enabled": "Encryption: Enabled", - "External key management service: {{kmsStatus}}": "External key management service: {{kmsStatus}}", - "Security and network": "Security and network", - "Encryption: {{encryptionStatus}}": "Encryption: {{encryptionStatus}}", - "Network: {{networkType}}": "Network: {{networkType}}", - "Encryption level": "Encryption level", - "The StorageCluster encryption level can be set to include all components under the cluster (including StorageClass and PVs) or to include only StorageClass encryption. PV encryption can use an auth token that will be used with the KMS configuration to allow multi-tenancy.": "The StorageCluster encryption level can be set to include all components under the cluster (including StorageClass and PVs) or to include only StorageClass encryption. PV encryption can use an auth token that will be used with the KMS configuration to allow multi-tenancy.", - "Cluster-wide encryption": "Cluster-wide encryption", - "Encryption for the entire cluster (block and file)": "Encryption for the entire cluster (block and file)", - "StorageClass encryption": "StorageClass encryption", - "An encryption key will be generated for each persistent volume (block) created using an encryption enabled StorageClass.": "An encryption key will be generated for each persistent volume (block) created using an encryption enabled StorageClass.", - "Connection settings": "Connection settings", - "Connect to an external key management service": "Connect to an external key management service", - "Data encryption for block and file storage. MultiCloud Object Gateway is always encrypted.": "Data encryption for block and file storage. MultiCloud Object Gateway is always encrypted.", - "MultiCloud Object Gateway is always encrypted.": "MultiCloud Object Gateway is always encrypted.", - "Enable data encryption for block and file storage": "Enable data encryption for block and file storage", - "Enable encryption": "Enable encryption", - "Encryption": "Encryption", - "An error has occurred: {{error}}": "An error has occurred: {{error}}", - "IP address": "IP address", - "Rest API IP address of IBM FlashSystem.": "Rest API IP address of IBM FlashSystem.", - "The endpoint is not a valid IP address": "The endpoint is not a valid IP address", - "Username": "Username", - "Password": "Password", - "Hide password": "Hide password", - "Reveal password": "Reveal password", - "The uploaded file is not a valid JSON file": "The uploaded file is not a valid JSON file", - "External storage system metadata": "External storage system metadata", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External storage system metadata field.": "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External storage system metadata field.", - "Download script": "Download script", - "Browse": "Browse", - "Clear": "Clear", - "Upload helper script": "Upload helper script", - "An error has occurred": "An error has occurred", - "Create StorageSystem": "Create StorageSystem", - "Create a StorageSystem to represent your OpenShift Data Foundation system and all its required storage and computing resources.": "Create a StorageSystem to represent your OpenShift Data Foundation system and all its required storage and computing resources.", - "{{nodeCount, number}} node_one": "{{nodeCount, number}} node", - "{{nodeCount, number}} node_other": "{{nodeCount, number}} nodes", - "selected ({{cpu}} CPU and {{memory}} on ": "selected ({{cpu}} CPU and {{memory}} on ", - "{{zoneCount, number}} zone_one": "{{zoneCount, number}} zone", - "{{zoneCount, number}} zone_other": "{{zoneCount, number}} zones", - "Search by node name...": "Search by node name...", - "Search by node label...": "Search by node label...", - "Not found": "Not found", - "Compression eligibility": "Compression eligibility", - "Compression eligibility indicates the percentage of incoming data that is compressible": "Compression eligibility indicates the percentage of incoming data that is compressible", - "Compression savings": "Compression savings", - "Compression savings indicates the total savings gained from compression for this pool, including replicas": "Compression savings indicates the total savings gained from compression for this pool, including replicas", - "Compression ratio": "Compression ratio", - "Compression ratio indicates the achieved compression on eligible data for this pool": "Compression ratio indicates the achieved compression on eligible data for this pool", - "Compression status": "Compression status", - "Storage efficiency": "Storage efficiency", - "Details": "Details", - "Replicas": "Replicas", - "Inventory": "Inventory", - "Not available": "Not available", - "Image states info": "Image states info", - "What does each state mean?": "What does each state mean?", - "<0>Starting replay: Initiating image (PV) replication process.": "<0>Starting replay: Initiating image (PV) replication process.", - "<0>Replaying: Image (PV) replication is ongoing or idle between clusters.": "<0>Replaying: Image (PV) replication is ongoing or idle between clusters.", - "<0>Stopping replay: Image (PV) replication process is shutting down.": "<0>Stopping replay: Image (PV) replication process is shutting down.", - "<0>Stopped: Image (PV) replication process has shut down.": "<0>Stopped: Image (PV) replication process has shut down.", - "<0>Error: Image (PV) replication process stopped due to an error.": "<0>Error: Image (PV) replication process stopped due to an error.", - "<0>Unknown: Unable to determine image (PV) state due to an error. Check your network connection and remote cluster mirroring daemon.": "<0>Unknown: Unable to determine image (PV) state due to an error. Check your network connection and remote cluster mirroring daemon.", - "image states info": "image states info", - "Image States": "Image States", - "Mirroring": "Mirroring", - "Mirroring status": "Mirroring status", - "Overall image health": "Overall image health", - "Show image states": "Show image states", - "Last checked": "Last checked", - "Raw Capacity shows the total physical capacity from all storage media within the storage subsystem": "Raw Capacity shows the total physical capacity from all storage media within the storage subsystem", - "Start replay": "Start replay", - "Stop reply": "Stop reply", - "Replaying": "Replaying", - "Stopped": "Stopped", - "Error": "Error", - "Syncing": "Syncing", - "Unknown": "Unknown", - "Status": "Status", - "Performance": "Performance", - "IOPS": "IOPS", - "Throughput": "Throughput", - "Not enough usage data": "Not enough usage data", - "used": "used", - "available": "available", - "Other": "Other", - "All other capacity usage that are not a part of the top 5 consumers.": "All other capacity usage that are not a part of the top 5 consumers.", - "Available": "Available", - "Breakdown Chart": "Breakdown Chart", - "Warning": "Warning", - "Raw capacity": "Raw capacity", - "Used": "Used", - "Available versus Used Capacity": "Available versus Used Capacity", - "Used of {{capacity}}": "Used of {{capacity}}", - "Not Available": "Not Available", - "Rebuilding data resiliency": "Rebuilding data resiliency", - "{{formattedProgress, number}}%": "{{formattedProgress, number}}%", - "Activity": "Activity", - "Estimating {{formattedEta}} to completion": "Estimating {{formattedEta}} to completion", - "Object_one": "Object", - "Object_other": "Objects", - "Buckets": "Buckets", - "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).", - "NooBaa Bucket": "NooBaa Bucket", - "Break by": "Break by", - "Total": "Total", - "Projects": "Projects", - "BucketClasses": "BucketClasses", - "Service type": "Service type", - "Cluster-wide": "Cluster-wide", - "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.", - "Capacity breakdown": "Capacity breakdown", - "This card shows used capacity for different resources. The available capacity is based on cloud services therefore it cannot be shown.": "This card shows used capacity for different resources. The available capacity is based on cloud services therefore it cannot be shown.", - "Type: {{serviceType}}": "Type: {{serviceType}}", - "Service Type Dropdown": "Service Type Dropdown", - "Service Type Dropdown Toggle": "Service Type Dropdown Toggle", - "By: {{serviceType}}": "By: {{serviceType}}", - "Break By Dropdown": "Break By Dropdown", - "Providers": "Providers", - "Accounts": "Accounts", - "Metric": "Metric", - "I/O Operations": "I/O Operations", - "Logical Used Capacity": "Logical Used Capacity", - "Physical vs. Logical used capacity": "Physical vs. Logical used capacity", - "Egress": "Egress", - "Latency": "Latency", - "Bandwidth": "Bandwidth", - "Service Type": "Service Type", - "Type: {{selectedService}}": "Type: {{selectedService}}", - "{{selectedMetric}} by {{selectedBreakdown}}": "{{selectedMetric}} by {{selectedBreakdown}}", - "thousands": "thousands", - "millions": "millions", - "billions": "billions", - "Total Reads {{totalRead}}": "Total Reads {{totalRead}}", - "Total Writes {{totalWrite}}": "Total Writes {{totalWrite}}", - "Total Logical Used Capacity {{logicalCapacity}}": "Total Logical Used Capacity {{logicalCapacity}}", - "Total Physical Used Capacity {{physicalcapacity}}": "Total Physical Used Capacity {{physicalcapacity}}", - "Shows an overview of the data consumption per provider or account collected from the day of the entity creation.": "Shows an overview of the data consumption per provider or account collected from the day of the entity creation.", - "(in {{suffixLabel}})": "(in {{suffixLabel}})", - "Data Consumption Graph": "Data Consumption Graph", - "GET {{GETLatestValue}}": "GET {{GETLatestValue}}", - "PUT {{PUTLatestValue}}": "PUT {{PUTLatestValue}}", - "OpenShift Data Foundation": "OpenShift Data Foundation", - "OpenShift Container Storage": "OpenShift Container Storage", - "Service name": "Service name", - "System name": "System name", - "Multicloud Object Gateway": "Multicloud Object Gateway", - "RADOS Object Gateway": "RADOS Object Gateway", - "Version": "Version", - "Resource Providers": "Resource Providers", - "A list of all Multicloud Object Gateway resources that are currently in use. Those resources are used to store data according to the buckets' policies and can be a cloud-based resource or a bare metal resource.": "A list of all Multicloud Object Gateway resources that are currently in use. Those resources are used to store data according to the buckets' policies and can be a cloud-based resource or a bare metal resource.", - "Object Service": "Object Service", - "Data Resiliency": "Data Resiliency", - "Object Service Status": "Object Service Status", - "The object service includes 2 services.": "The object service includes 2 services.", - "The data resiliency includes 2 services": "The data resiliency includes 2 services", - "Services": "Services", - "Object Gateway (RGW)": "Object Gateway (RGW)", - "All resources are unhealthy": "All resources are unhealthy", - "Object Bucket has an issue": "Object Bucket has an issue", - "Many buckets have issues": "Many buckets have issues", - "Some buckets have issues": "Some buckets have issues", - "{{capacityRatio, number}}:1": "{{capacityRatio, number}}:1", - "OpenShift Data Foundation can be configured to use compression. The efficiency rate reflects the actual compression ratio when using such a configuration.": "OpenShift Data Foundation can be configured to use compression. The efficiency rate reflects the actual compression ratio when using such a configuration.", - "Savings": "Savings", - "Savings shows the uncompressed and non-deduped data that would have been stored without those techniques.": "Savings shows the uncompressed and non-deduped data that would have been stored without those techniques.", - "Storage Efficiency": "Storage Efficiency", - "OpenShift Container Storage Overview": "OpenShift Container Storage Overview", - "Block and File": "Block and File", - "Object": "Object", - "BlockPools": "BlockPools", - "Storage Classes": "Storage Classes", - "Pods": "Pods", - "{{metricType}}": "{{metricType}}", - "Break by dropdown": "Break by dropdown", - "Service Name": "Service Name", - "Cluster Name": "Cluster Name", - "Mode": "Mode", - "Storage Cluster": "Storage Cluster", - "Utilization": "Utilization", - "Used Capacity": "Used Capacity", - "Expanding StorageCluster": "Expanding StorageCluster", - "Upgrading OpenShift Data Foundation's Operator": "Upgrading OpenShift Data Foundation's Operator", - "Used Capacity Breakdown": "Used Capacity Breakdown", - "This card shows the used capacity for different Kubernetes resources. The figures shown represent the Usable storage, meaning that data replication is not taken into consideration.": "This card shows the used capacity for different Kubernetes resources. The figures shown represent the Usable storage, meaning that data replication is not taken into consideration.", - "Cluster name": "Cluster name", - "Internal": "Internal", - "Raw capacity is the absolute total disk space available to the array subsystem.": "Raw capacity is the absolute total disk space available to the array subsystem.", - "Troubleshoot": "Troubleshoot", - "Active health checks": "Active health checks", - "Progressing": "Progressing", - "The Compression Ratio represents the compressible data effectiveness metric inclusive of all compression-enabled pools.": "The Compression Ratio represents the compressible data effectiveness metric inclusive of all compression-enabled pools.", - "The Savings metric represents the actual disk capacity saved inclusive of all compression-enabled pools and associated replicas.": "The Savings metric represents the actual disk capacity saved inclusive of all compression-enabled pools and associated replicas.", - "Performance metrics over time showing IOPS, Latency and more. Each metric is a link to a detailed view of this metric.": "Performance metrics over time showing IOPS, Latency and more. Each metric is a link to a detailed view of this metric.", - "Recovery": "Recovery", - "Disk State": "Disk State", - "OpenShift Data Foundation status": "OpenShift Data Foundation status", - "Filesystem": "Filesystem", - "Disks List": "Disks List", - "Start Disk Replacement": "Start Disk Replacement", - "<0>{{diskName}} can be replaced with a disk of same type.": "<0>{{diskName}} can be replaced with a disk of same type.", - "Troubleshoot disk <1>{{diskName}}": "Troubleshoot disk <1>{{diskName}}", - "here": "here", - "Online": "Online", - "Offline": "Offline", - "NotResponding": "NotResponding", - "PreparingToReplace": "PreparingToReplace", - "ReplacementFailed": "ReplacementFailed", - "ReplacementReady": "ReplacementReady", - "Connection name": "Connection name", - "This is a required field": "This is a required field", - "A unique name for the key management service within the project.": "A unique name for the key management service within the project.", - "Service instance ID": "Service instance ID", - "Service API key": "Service API key", - "Customer root key": "Customer root key", - "IBM Base URL": "IBM Base URL", - "IBM Token URL": "IBM Token URL", - "Connect to a Key Management Service": "Connect to a Key Management Service", - "Key management service provider": "Key management service provider", - "kms-provider-name": "kms-provider-name", - "Token": "Token", - "Create a secret with the token for every namespace using encrypted PVCs.": "Create a secret with the token for every namespace using encrypted PVCs.", - "Hide token": "Hide token", - "Reveal token": "Reveal token", - "Authentication method": "Authentication method", - "authentication-method": "authentication-method", - "Please enter a URL": "Please enter a URL", - "Please enter a valid port": "Please enter a valid port", - "Address": "Address", - "Port": "Port", - "Advanced settings": "Advanced settings", - "Raw Capacity": "Raw Capacity", - "x {{ replica, number }} replicas =": "x {{ replica, number }} replicas =", - "No StorageClass selected": "No StorageClass selected", - "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.", - "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.", - "Adding capacity for <1>{{name}}, may increase your expenses.": "Adding capacity for <1>{{name}}, may increase your expenses.", - "StorageClass": "StorageClass", - "Currently Used:": "Currently Used:", - "Add": "Add", - "Key Management Service Advanced Settings": "Key Management Service Advanced Settings", - "Vault enterprise namespaces are isolated environments that functionally exist as Vaults within a Vault. They have separate login paths and support creating and managing data isolated to their namespace.": "Vault enterprise namespaces are isolated environments that functionally exist as Vaults within a Vault. They have separate login paths and support creating and managing data isolated to their namespace.", - "Maximum file size exceeded. File limit is 4MB.": "Maximum file size exceeded. File limit is 4MB.", - "A PEM-encoded CA certificate file used to verify the Vault server's SSL certificate.": "A PEM-encoded CA certificate file used to verify the Vault server's SSL certificate.", - "A PEM-encoded client certificate. This certificate is used for TLS communication with the Vault server.": "A PEM-encoded client certificate. This certificate is used for TLS communication with the Vault server.", - "An unencrypted, PEM-encoded private key which corresponds to the matching client certificate provided with VAULT_CLIENT_CERT.": "An unencrypted, PEM-encoded private key which corresponds to the matching client certificate provided with VAULT_CLIENT_CERT.", - "The name to use as the SNI host when OpenShift Data Foundation connecting via TLS to the Vault server": "The name to use as the SNI host when OpenShift Data Foundation connecting via TLS to the Vault server", - "Backend Path": "Backend Path", - "path/": "path/", - "Authentication Path": "Authentication Path", - "Authentication Namespace": "Authentication Namespace", - "TLS Server Name": "TLS Server Name", - "Vault Enterprise Namespace": "Vault Enterprise Namespace", - "The name must be accurate and must match the service namespace": "The name must be accurate and must match the service namespace", - "CA Certificate": "CA Certificate", - "Upload a .PEM file here...": "Upload a .PEM file here...", - "Client Certificate": "Client Certificate", - "Client Private Key": "Client Private Key", - "Attach OBC to a Deployment": "Attach OBC to a Deployment", - "Deployment Name": "Deployment Name", - "Attach": "Attach", - "<0><0>{{poolName}} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):": "<0><0>{{poolName}} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):", - "<0>Deleting <1>{{poolName}} will remove all the saved data of this pool. Are you sure want to delete?": "<0>Deleting <1>{{poolName}} will remove all the saved data of this pool. Are you sure want to delete?", - "BlockPool Delete Modal": "BlockPool Delete Modal", - "Try Again": "Try Again", - "Finish": "Finish", - "Go To Pvc List": "Go To Pvc List", - "BlockPool Update Form": "BlockPool Update Form", - "replacement disallowed: disk {{diskName}} is {{replacingDiskStatus}}": "replacement disallowed: disk {{diskName}} is {{replacingDiskStatus}}", - "replacement disallowed: disk {{diskName}} is {{replacementStatus}}": "replacement disallowed: disk {{diskName}} is {{replacementStatus}}", - "Disk Replacement": "Disk Replacement", - "This action will start preparing the disk for replacement.": "This action will start preparing the disk for replacement.", - "Data rebalancing is in progress": "Data rebalancing is in progress", - "See data resiliency status": "See data resiliency status", - "Are you sure you want to replace <1>{{diskName}}?": "Are you sure you want to replace <1>{{diskName}}?", - "Replace": "Replace", - "Create NamespaceStore ": "Create NamespaceStore ", - "Represents an underlying storage to be used as read or write target for the data in the namespace buckets.": "Represents an underlying storage to be used as read or write target for the data in the namespace buckets.", - "Provider {{provider}} | Region: {{region}}": "Provider {{provider}} | Region: {{region}}", - "Create new NamespaceStore ": "Create new NamespaceStore ", - "An error has occurred while fetching namespace stores": "An error has occurred while fetching namespace stores", - "Select a namespace store": "Select a namespace store", - "Namespace store name": "Namespace store name", - "A unique name for the namespace store within the project": "A unique name for the namespace store within the project", - "Persistent volume claim": "Persistent volume claim", - "Folder": "Folder", - "If the name you write exists, we will be using the existing folder if not we will create a new folder ": "If the name you write exists, we will be using the existing folder if not we will create a new folder ", - "Namespace Store Table": "Namespace Store Table", - "Service account keys are needed for Google Cloud Storage authentication. The keys can be found in the service accounts page in the GCP console.": "Service account keys are needed for Google Cloud Storage authentication. The keys can be found in the service accounts page in the GCP console.", - "Learn more": "Learn more", - "Where can I find Google Cloud credentials?": "Where can I find Google Cloud credentials?", - "Upload a .json file with the service account keys provided by Google Cloud Storage.": "Upload a .json file with the service account keys provided by Google Cloud Storage.", - "Secret Key": "Secret Key", - "Upload JSON": "Upload JSON", - "Uploaded File Name": "Uploaded File Name", - "Upload File": "Upload File", - "Switch to Secret": "Switch to Secret", - "Select Secret": "Select Secret", - "Switch to upload JSON": "Switch to upload JSON", - "Cluster Metadata": "Cluster Metadata", - "Target Bucket": "Target Bucket", - "Number of Volumes": "Number of Volumes", - "Volume Size": "Volume Size", - "Target blob container": "Target blob container", - "Target bucket": "Target bucket", - "Account name": "Account name", - "Access key": "Access key", - "Account key": "Account key", - "Secret key": "Secret key", - "Region Dropdown": "Region Dropdown", - "Endpoint": "Endpoint", - "Endpoint Address": "Endpoint Address", - "Secret": "Secret", - "Switch to Credentials": "Switch to Credentials", - "Access Key Field": "Access Key Field", - "Secret Key Field": "Secret Key Field", - "ObjectBucketClaim Name": "ObjectBucketClaim Name", - "my-object-bucket": "my-object-bucket", - "If not provided a generic name will be generated.": "If not provided a generic name will be generated.", - "Defines the object-store service and the bucket provisioner.": "Defines the object-store service and the bucket provisioner.", - "BucketClass": "BucketClass", - "Select BucketClass": "Select BucketClass", - "Create ObjectBucketClaim": "Create ObjectBucketClaim", - "Edit YAML": "Edit YAML", - "Attach to Deployment": "Attach to Deployment", - "Disabled because the ObjectBucketClaim is being deleted.": "Disabled because the ObjectBucketClaim is being deleted.", - "Object Bucket Claim Details": "Object Bucket Claim Details", - "Object Bucket": "Object Bucket", - "Namespace": "Namespace", - "OBCTableHeader": "OBCTableHeader", - "Object Bucket Claims": "Object Bucket Claims", - "Object Bucket Claim Data": "Object Bucket Claim Data", - "Hide Values": "Hide Values", - "Reveal Values": "Reveal Values", - "Data": "Data", - "Create Object Bucket": "Create Object Bucket", - "Object Bucket Name": "Object Bucket Name", - "ob-name-help": "ob-name-help", - "The corresponding ObjectBucketClaim must be deleted first.": "The corresponding ObjectBucketClaim must be deleted first.", - "Object Bucket Details": "Object Bucket Details", - "Object Bucket Claim": "Object Bucket Claim", - "OBTableHeader": "OBTableHeader", - "Object Buckets": "Object Buckets", - "Uses the available disks that match the selected filters on all nodes selected in the previous step.": "Uses the available disks that match the selected filters on all nodes selected in the previous step.", - "A LocalVolumeSet allows you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "A LocalVolumeSet allows you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.", - "OpenShift Container Storage's StorageCluster requires a minimum of 3 nodes for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "OpenShift Container Storage's StorageCluster requires a minimum of 3 nodes for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.", - "After the LocalVolumeSet and StorageClass are created you won't be able to go back to this step.": "After the LocalVolumeSet and StorageClass are created you won't be able to go back to this step.", - "Create StorageClass": "Create StorageClass", - "Selected Capacity": "Selected Capacity", - "Selected Nodes": "Selected Nodes", - "Review StorageCluster": "Review StorageCluster", - "Storage and nodes": "Storage and nodes", - "Arbiter zone:": "Arbiter zone:", - "None": "None", - "selected based on the created StorageClass:": "selected based on the created StorageClass:", - "Total CPU and memory of {{cpu, number}} CPU and {{memory}}": "Total CPU and memory of {{cpu, number}} CPU and {{memory}}", - "Configure": "Configure", - "Enable Encryption": "Enable Encryption", - "Connect to external key management service: {{name}}": "Connect to external key management service: {{name}}", - "Encryption Level: {{level}}": "Encryption Level: {{level}}", - "Using {{networkLabel}}": "Using {{networkLabel}}", - "Discover disks": "Discover disks", - "Review and create": "Review and create", - "Info Alert": "Info Alert", - "Internal - Attached devices": "Internal - Attached devices", - "Can be used on any platform where there are attached devices to the nodes, using the Local Storage Operator. The infrastructure StorageClass is provided by Local Storage Operator, on top of the attached drives.": "Can be used on any platform where there are attached devices to the nodes, using the Local Storage Operator. The infrastructure StorageClass is provided by Local Storage Operator, on top of the attached drives.", - "Before we can create a StorageCluster, the Local Storage operator needs to be installed. When installation is finished come back to OpenShift Container Storage to create a StorageCluster.<1><0>Install": "Before we can create a StorageCluster, the Local Storage operator needs to be installed. When installation is finished come back to OpenShift Container Storage to create a StorageCluster.<1><0>Install", - "Node Table": "Node Table", - "StorageCluster exists": "StorageCluster exists", - "Back to operator page": "Back to operator page", - "Go to cluster page": "Go to cluster page", - "<0>A StorageCluster <1>{{clusterName}} already exists.<3>You cannot create another StorageCluster.": "<0>A StorageCluster <1>{{clusterName}} already exists.<3>You cannot create another StorageCluster.", - "Connect to external cluster": "Connect to external cluster", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External cluster metadata field.": "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External cluster metadata field.", - "Download Script": "Download Script", - "A bucket will be created to provide the OpenShift Data Foundation's Service.": "A bucket will be created to provide the OpenShift Data Foundation's Service.", - "Bucket created for OpenShift Container Storage's Service": "Bucket created for OpenShift Container Storage's Service", - "Create External StorageCluster": "Create External StorageCluster", - "External cluster metadata": "External cluster metadata", - "Upload JSON File": "Upload JSON File", - "Upload Credentials file": "Upload Credentials file", - "JSON data": "JSON data", - "Create Button": "Create Button", - "Create StorageCluster": "Create StorageCluster", - "OpenShift Container Storage runs as a cloud-native service for optimal integration with applications in need of storage and handles the scenes such as provisioning and management.": "OpenShift Container Storage runs as a cloud-native service for optimal integration with applications in need of storage and handles the scenes such as provisioning and management.", - "Select mode:": "Select mode:", - "If not labeled, the selected nodes are labeled <1>{{label}} to make them target hosts for OpenShift Data Foundation's components.": "If not labeled, the selected nodes are labeled <1>{{label}} to make them target hosts for OpenShift Data Foundation's components.", - "Mark nodes as dedicated": "Mark nodes as dedicated", - "This will taint the nodes with the<1>key: node.ocs.openshift.io/storage, <4>value: true, and <7>effect: NoSchedule": "This will taint the nodes with the<1>key: node.ocs.openshift.io/storage, <4>value: true, and <7>effect: NoSchedule", - "Selected nodes will be dedicated to OpenShift Container Storage use only": "Selected nodes will be dedicated to OpenShift Container Storage use only", - "OpenShift Container Storage deployment in two data centers, with an arbiter node to settle quorum decisions.": "OpenShift Container Storage deployment in two data centers, with an arbiter node to settle quorum decisions.", - "To support high availability when two data centers can be used, enable arbiter to get the valid quorum between two data centers.": "To support high availability when two data centers can be used, enable arbiter to get the valid quorum between two data centers.", - "Select arbiter zone": "Select arbiter zone", - "Network": "Network", - "The default SDN networking uses a single network for all data operations such read/write and also for control plane, such as data replication. Multus allows a network separation between the data operations and the control plane operations.": "The default SDN networking uses a single network for all data operations such read/write and also for control plane, such as data replication. Multus allows a network separation between the data operations and the control plane operations.", - "Default (SDN)": "Default (SDN)", - "Custom (Multus)": "Custom (Multus)", - "Public Network Interface": "Public Network Interface", - "Select a network": "Select a network", - "Cluster Network Interface": "Cluster Network Interface", - "Requested Cluster Capacity:": "Requested Cluster Capacity:", - "StorageClass:": "StorageClass:", - "Select Capacity": "Select Capacity", - "Requested Capacity": "Requested Capacity", - "Select Nodes": "Select Nodes", - "create internal mode StorageCluster wizard": "create internal mode StorageCluster wizard", - "Can be used on any platform, except bare metal. It means that OpenShift Container Storage uses an infrastructure StorageClass, provided by the hosting platform. For example, gp2 on AWS, thin on VMWare, etc.": "Can be used on any platform, except bare metal. It means that OpenShift Container Storage uses an infrastructure StorageClass, provided by the hosting platform. For example, gp2 on AWS, thin on VMWare, etc.", - "{{title}} steps": "{{title}} steps", - "{{title}} content": "{{title}} content", - "{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}} / {{replica}} replicas", - "Available capacity:": "Available capacity:", - "Filesystem name": "Filesystem name", - "Enter filesystem name": "Enter filesystem name", - "CephFS filesystem name into which the volume shall be created": "CephFS filesystem name into which the volume shall be created", - "no compression": "no compression", - "with compression": "with compression", - "Replica {{poolSize}} {{compressionText}}": "Replica {{poolSize}} {{compressionText}}", - "Create New Pool": "Create New Pool", - "Storage Pool": "Storage Pool", - "Select a Pool": "Select a Pool", - "Storage pool into which volume data shall be stored": "Storage pool into which volume data shall be stored", - "Error retrieving Parameters": "Error retrieving Parameters", - "my-storage-pool": "my-storage-pool", - "An encryption key will be generated for each PersistentVolume created using this StorageClass.": "An encryption key will be generated for each PersistentVolume created using this StorageClass.", - "Key service": "Key service", - "Select an existing connection": "Select an existing connection", - "KMS service {{value}} already exist": "KMS service {{value}} already exist", - "Choose existing KMS connection": "Choose existing KMS connection", - "Create new KMS connection": "Create new KMS connection", - "PV expansion operation is not supported for encrypted PVs.": "PV expansion operation is not supported for encrypted PVs.", - "Enable Thick Provisioning": "Enable Thick Provisioning", - "By enabling thick-provisioning, volumes will allocate the requested capacity upon volume creation. Volume creation will be slower when thick-provisioning is enabled.": "By enabling thick-provisioning, volumes will allocate the requested capacity upon volume creation. Volume creation will be slower when thick-provisioning is enabled.", - "{{resource}} details": "{{resource}} details", - "Kind": "Kind", - "Labels": "Labels", - "Last updated": "Last updated", - "Storage Systems": "Storage Systems", - "Used capacity": "Used capacity", - "Storage status represents the health status of {{operatorName}}'s StorageCluster.": "Storage status represents the health status of {{operatorName}}'s StorageCluster.", - "Health": "Health", - "Standard": "Standard", - "Data will be consumed by a Multi-cloud object gateway, deduped, compressed, and encrypted. The encrypted chunks would be saved on the selected BackingStores. Best used when the applications would always use the OpenShift Data Foundation endpoints to access the data.": "Data will be consumed by a Multi-cloud object gateway, deduped, compressed, and encrypted. The encrypted chunks would be saved on the selected BackingStores. Best used when the applications would always use the OpenShift Data Foundation endpoints to access the data.", - "Data is stored on the NamespaceStores without performing de-duplication, compression, or encryption. BucketClasses of namespace type allow connecting to existing data and serving from them. These are best used for existing data or when other applications (and cloud-native services) need to access the data from outside OpenShift Data Foundation.": "Data is stored on the NamespaceStores without performing de-duplication, compression, or encryption. BucketClasses of namespace type allow connecting to existing data and serving from them. These are best used for existing data or when other applications (and cloud-native services) need to access the data from outside OpenShift Data Foundation.", - "Single NamespaceStore": "Single NamespaceStore", - "The namespace bucket will read and write its data to a selected namespace store": "The namespace bucket will read and write its data to a selected namespace store", - "Multi NamespaceStores": "Multi NamespaceStores", - "The namespace bucket will serve reads from several selected backing stores, creating a virtual namespace on top of them and will write to one of those as its chosen write target": "The namespace bucket will serve reads from several selected backing stores, creating a virtual namespace on top of them and will write to one of those as its chosen write target", - "Cache NamespaceStore": "Cache NamespaceStore", - "The caching bucket will serve data from a large raw data out of a local caching tiering.": "The caching bucket will serve data from a large raw data out of a local caching tiering.", - "Create storage class": "Create storage class", - "Create local volume set": "Create local volume set", - "Logical used capacity per account": "Logical used capacity per account", - "Egress Per Provider": "Egress Per Provider", - "I/O Operations count": "I/O Operations count", - "The StorageClass used by OpenShift Data Foundation to write its data and metadata.": "The StorageClass used by OpenShift Data Foundation to write its data and metadata.", - "Infrastructure StorageClass created by Local Storage Operator and used by OpenShift Container Storage to write its data and metadata.": "Infrastructure StorageClass created by Local Storage Operator and used by OpenShift Container Storage to write its data and metadata.", - "The amount of capacity that would be dynamically allocated on the selected StorageClass.": "The amount of capacity that would be dynamically allocated on the selected StorageClass.", - "If you wish to use the Arbiter stretch cluster, a minimum of 4 nodes (2 different zones, 2 nodes per zone) and 1 additional zone with 1 node is required. All nodes must be pre-labeled with zones in order to be validated on cluster creation.": "If you wish to use the Arbiter stretch cluster, a minimum of 4 nodes (2 different zones, 2 nodes per zone) and 1 additional zone with 1 node is required. All nodes must be pre-labeled with zones in order to be validated on cluster creation.", - "Selected nodes are based on the StorageClass <1>{{scName}} and with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "Selected nodes are based on the StorageClass <1>{{scName}} and with a recommended requirement of 14 CPU and 34 GiB RAM per node.", - "Selected nodes are based on the StorageClass <1>{{scName}} and fulfill the stretch cluster requirements with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "Selected nodes are based on the StorageClass <1>{{scName}} and fulfill the stretch cluster requirements with a recommended requirement of 14 CPU and 34 GiB RAM per node.", - "Loading...": "Loading...", - "Pool {{name}} creation in progress": "Pool {{name}} creation in progress", - "Pool {{name}} was successfully created": "Pool {{name}} was successfully created", - "An error occurred. Pool {{name}} was not created": "An error occurred. Pool {{name}} was not created", - "Pool {{name}} creation timed out. Please check if odf operator and rook operator are running": "Pool {{name}} creation timed out. Please check if odf operator and rook operator are running", - "The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.": "The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.", - "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.": "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.", - "Pool {{name}} was created with errors.": "Pool {{name}} was created with errors.", - "Delete": "Delete", - "StorageClasses": "StorageClasses", - "hr": "hr", - "min": "min", - "A minimal cluster deployment will be performed.": "A minimal cluster deployment will be performed.", - "The selected nodes do not match OpenShift Data Foundation's StorageCluster requirement of an aggregated 30 CPUs and 72 GiB of RAM. If the selection cannot be modified a minimal cluster will be deployed.": "The selected nodes do not match OpenShift Data Foundation's StorageCluster requirement of an aggregated 30 CPUs and 72 GiB of RAM. If the selection cannot be modified a minimal cluster will be deployed.", - "Back to nodes selection": "Back to nodes selection", - "Select a StorageClass to continue": "Select a StorageClass to continue", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing PersistentVolumes that will be used to provide the OpenShift Data Foundation service.": "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing PersistentVolumes that will be used to provide the OpenShift Data Foundation service.", - "Create new StorageClass": "Create new StorageClass", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing persistent volumes that will be used to provide the OpenShift Data Foundation service.": "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing persistent volumes that will be used to provide the OpenShift Data Foundation service.", - "All required fields are not set": "All required fields are not set", - "In order to create the StorageCluster you must set the StorageClass, select at least 3 nodes (preferably in 3 different zones) and meet the minimum or recommended requirement": "In order to create the StorageCluster you must set the StorageClass, select at least 3 nodes (preferably in 3 different zones) and meet the minimum or recommended requirement", - "The StorageCluster requires a minimum of 3 nodes for the initial deployment. Please choose a different StorageClass or go to create a new LocalVolumeSet that matches the minimum node requirement.": "The StorageCluster requires a minimum of 3 nodes for the initial deployment. Please choose a different StorageClass or go to create a new LocalVolumeSet that matches the minimum node requirement.", - "Create new volume set instance": "Create new volume set instance", - "Select at least 1 encryption level or disable encryption.": "Select at least 1 encryption level or disable encryption.", - "Fill out the details in order to connect to key management system": "Fill out the details in order to connect to key management system", - "This is a required field.": "This is a required field.", - "Both public and cluster network attachment definition cannot be empty": "Both public and cluster network attachment definition cannot be empty", - "A public or cluster network attachment definition must be selected to use Multus.": "A public or cluster network attachment definition must be selected to use Multus.", - "The number of selected zones is less than the minimum requirement of 3. If not modified a host-based failure domain deployment will be enforced.": "The number of selected zones is less than the minimum requirement of 3. If not modified a host-based failure domain deployment will be enforced.", - "When the nodes in the selected StorageClass are spread across fewer than 3 availability zones, the StorageCluster will be deployed with the host based failure domain.": "When the nodes in the selected StorageClass are spread across fewer than 3 availability zones, the StorageCluster will be deployed with the host based failure domain.", - "Cluster-Wide and StorageClass": "Cluster-Wide and StorageClass", - "Cluster-Wide": "Cluster-Wide", - "Select at least 2 Backing Store resources": "Select at least 2 Backing Store resources", - "Select at least 1 Backing Store resource": "Select at least 1 Backing Store resource", - "x {{replica}} replicas = {{osdSize, number}} TiB": "x {{replica}} replicas = {{osdSize, number}} TiB", - "SmallScale": "SmallScale", - "0.5 TiB": "0.5 TiB", - "2 TiB": "2 TiB", - "LargeScale": "LargeScale", - "4 TiB": "4 TiB", - "{{osdSize, number}} TiB": "{{osdSize, number}} TiB", - "Help": "Help", - "Object_plural": "Object_plural" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/en/console-shared.json b/frontend/packages/ceph-storage-plugin/locales/en/console-shared.json deleted file mode 100644 index dd4fe441f2fb..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/en/console-shared.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "View {{title}} metrics in query browser": "View {{title}} metrics in query browser", - "Not available": "Not available", - "{{humanAvailable}} available of {{humanLimit}} total limit": "{{humanAvailable}} available of {{humanLimit}} total limit", - "{{humanAvailable}} available of {{humanMax}}": "{{humanAvailable}} available of {{humanMax}}", - "{{humanAvailable}} available": "{{humanAvailable}} available" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/ja/ceph-storage-plugin.json b/frontend/packages/ceph-storage-plugin/locales/ja/ceph-storage-plugin.json deleted file mode 100644 index 196a78e5fd3f..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/ja/ceph-storage-plugin.json +++ /dev/null @@ -1,757 +0,0 @@ -{ - "Add Capacity": "容量の追加", - "Edit BlockPool": "BlockPool の編集", - "Edit Bucket Class Resources": "バケットクラスリソースの編集", - "ObjectBucketClaim": "ObjectBucketClaim", - "Use existing claim": "既存の要求の使用", - "Select claim": "要求の選択", - "Create new claim": "新規要求の作成", - "Create": "作成", - "Cancel": "キャンセル", - "Overview": "概要", - "StorageSystems": "StorageSystem", - "StorageSystem details": "StorageSystem の詳細", - "Enabled": "有効化", - "Disabled": "無効", - "Last synced": "最終同期", - "Default pool cannot be deleted": "デフォルトのプールは削除できません", - "BlockPool List": "BlockPool の一覧", - "Delete BlockPool": "BlockPool の削除", - "{{replica}} Replication": "{{replica}} レプリケーション", - "Pool name": "プール名", - "my-block-pool": "my-block-pool", - "pool-name-help": "pool-name-help", - "Data protection policy": "データ保護ポリシー", - "Select replication": "レプリケーションの選択", - "Volume type": "ボリュームタイプ", - "Select volume type": "ボリュームモードの選択", - "Compression": "圧縮", - "Enable compression": "圧縮を有効にする", - "Enabling compression may result in little or no space savings for encrypted or random data. Also, enabling compression may have an impact on I/O performance.": "圧縮を有効にすると、暗号化データやランダムなデータ用に確保される領域がほぼなくなることがあります。また、圧縮を有効にすると、I/O パフォーマンスに影響する可能性があります。", - "OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.": "OpenShift Data Foundation の StorageCluster は利用できません。StorageCluster を使用する準備ができたら再試行してください。", - "Create BlockPool": "BlockPool の作成", - "Close": "閉じる", - "Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.": "プールの作成は、OpenShift Data Foundation の外部 RHCS StorageSystem ではサポートされません。", - "A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.": "BlockPool は、アプリケーションとワークロードに柔軟性の高い容量を提供する論理エンティティーです。プールは、データアクセスの耐障害性およびストレージ効率を確保するためのポリシーをサポートする手段を提供します。", - "BlockPool Creation Form": "BlockPool の作成フォーム", - "Name": "名前", - "Bucket Name": "バケット名", - "Type": "タイプ", - "Region": "リージョン", - "BackingStore Table": "BackingStore テーブル", - "Each BackingStore can be used for one tier at a time. Selecting a BackingStore in one tier will remove the resource from the second tier option and vice versa.": "各 BackingStore は一度に 1 つの層に使用できます。ある層で BackingStore を選択すると、2 番目の層のオプションからリソースが削除されます (その逆も同様です)。", - "Bucket created for OpenShift Data Foundation's Service": "OpenShift Data Foundation サービス用に作成されるバケット", - "Tier 1 - BackingStores": "階層 1 - BackingStore", - "Create BackingStore ": "BackingStore の作成 ", - "Tier-1-Table": "階層 1 テーブル", - "{{bs, number}} BackingStore_one": "BackingStore {{bs, number}} 個", - "{{bs, number}} BackingStore_other": "BackingStore {{bs, number}} 個", - "selected": "選択済み", - "Tier 2 - BackingStores": "階層 2 - BackingStore", - "Tier-2-Table": "階層 2 テーブル", - "General": "一般", - "Placement Policy": "配置ポリシー", - "Resources": "リソース", - "Review": "確認", - "Create BucketClass": "BucketClass の作成", - "Create new BucketClass": "新規 BucketClass の作成", - "BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.": "BucketClass は、OBC の階層ポリシーおよびデータ配置を定義するバケットのクラスを表す CRD です。", - "Next": "次へ", - "Back": "戻る", - "Edit BucketClass Resource": "BucketClass リソースの編集", - "{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "{{storeType}} は、Multicloud Object Gateway バケットでデータの基礎となるストレージとして使用されるストレージターゲットを表します。", - "Cancel ": "キャンセル ", - "Save": "保存", - "What is a BackingStore?": "BackingStore とは", - "BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "BackingStore は、Multicloud Object Gateway バケットでデータの基礎となるストレージとして使用されるストレージターゲットを表します。", - "Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.": "複数のタイプの BackingStore がサポートされます (asws-s3 s3 と互換性のある google-cloud-storage azure-blob obc PVC)。", - "Learn More": "詳細を参照してください", - "What is a BucketClass?": "BucketClass とは", - "A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching": "特定のバケットクラスで作成されるすべてのバケット (OBC) に適用されるポリシーセット。これらのポリシーには配置、namespace、およびキャッシュが含まれます", - "BucketClass type": "BucketClass のタイプ", - "3-63 chars": "3-63 文字", - "Starts and ends with lowercase number or letter": "小文字の数字または文字で開始し、終了します", - "Only lowercase letters, numbers, non-consecutive periods or hyphens": "小文字、数字、連続しないピリオドまたはハイフンのみ", - "Avoid using the form of an IP address": "IP アドレスの形式は使用しないでください", - "Globally unique name": "グローバルに一意の名前", - "BucketClass name": "BucketClass 名", - "A unique name for the bucket class within the project.": "プロジェクト内のバケットクラスの一意の名前。", - "my-multi-cloud-mirror": "my-multi-cloud-mirror", - "BucketClass Name": "BucketClass 名", - "Description (Optional)": "説明 (オプション)", - "Description of bucket class": "バケットクラスの説明", - "What is a Namespace Policy?": "namespace ポリシーとは", - "Namespace policy can be set to one single read and write source, multi read sources or cached policy.": "namespace ポリシーは、単一の読み取り/書き込みソース、複数の読み取りソース、またはキャッシュポリシーに設定できます。", - "Namespace Policy Type": "namespace ポリシータイプ", - "What is Caching?": "キャッシングとは", - "Caching is a policy that creates local copies of the data. It saves the copies locally to improve performance for frequently accessed data. Each cached copy has a TTL and is verified against the hub. Each non-read operation (upload, overwrite, delete) is performed on the hub": "キャッシュは、データのローカルコピーを作成するポリシーです。これは、頻繁にアクセスされるデータのパフォーマンスを強化するためにコピーをローカルに保存します。キャッシュされる各コピーには TTL があり、ハブに対して検証されます。それぞれの読み取り以外の操作 (アップロード、上書き、削除) はハブで実行されます", - "Hub namespace store ": "ハブの namespace ストア ", - "A single NamespaceStore that defines the read and write target of the namespace bucket.": "namespace バケットの読み取りおよび書き込みターゲットを定義する単一の NamespaceStore。", - "NamespaceStore": "NamespaceStore", - "Cache data settings": "キャッシュデータの設定", - "The data will be temporarily copied on a backing store in order to later access it much more quickly.": "後により迅速にアクセスできるように、データはバッキングストアに一時的にコピーされます。", - "Backing store": "バッキングストア", - "a local backing store is recommended for better performance": "パフォーマンスを強化するために、ローカルバッキングストアの使用が推奨されます", - "Time to live": "有効期間 (Time to live)", - "Time to live is the time that an object is stored in a caching system before it is deleted or refreshed. Default: 0, Max: 24 hrs": "有効期間 (Time to live) は、オブジェクトが削除または更新される前にキャッシュシステムに保存される時間です。デフォルト: 0、最大: 24 時間", - "Read NamespaceStores": "NamespaceStore の読み取り", - "Select a list of NamespaceStores that defines the read targets of the namespace bucket.": "namespace バケットの読み取りターゲットを定義する NamespaceStore の一覧を選択します。", - "Create NamespaceStore": "NamespaceStore の作成", - "{{nns, number}} namespace store_one": "namespace ストア {{nns, number}} 個", - "{{nns, number}} namespace store_other": "namespace ストア {{nns, number}} 個", - " selected": " 選択済み", - "Write NamespaceStore": "NamespaceStore ストアへの書き込み", - "Select a single NamespaceStore that defines the write targets of the namespace bucket.": "namespace バケットの書き込みターゲットを定義する NamespaceStoreを 1 つ選択します。", - "Read and Write NamespaceStore ": "NamespaceStore の読み取りと書き込み ", - "Select one NamespaceStore which defines the read and write targets of the namespace bucket.": "namespace バケットの読み取りおよび書き込みターゲットを定義する NamespaceStore を 1 つ選択します。", - "What is a Placement Policy?": "配置ポリシーとは", - "Data placement capabilities are built as a multi-layer structure here are the layers bottom-up:": "データ配置機能は複数レイヤー構造としてビルドされ、階層はボトムアップで展開されます", - "Spread Tier - list of BackingStores aggregates the storage of multiple stores.": "分散階層: 複数ストアの保管場所を集約する BackingStore の一覧。", - "Mirroring Tier - list of spread-layers async-mirroring to all mirrors with locality optimization (will allocate on the closest region to the source endpoint). Mirroring requires at least two BackingStores.": "ミラーリング階層: ローカリティーの最適化 (ソースエンドポイントに最も近いリージョンでの割り当てを行う) によるすべてのミラーへの非同期のミラーリングを行う分散階層の一覧。ミラーリングには、2 つ以上の BackingStore が必要です。", - "The number of replicas can be configured via the NooBaa management console.": "レプリカの数は NooBaa 管理コンソールで設定できます。", - "Tier 1 - Policy Type": "階層 1 - ポリシータイプ", - "Spread": "分散", - "Spreading the data across the chosen resources. By default a replica of one copy is used and does not include failure tolerance in case of resource failure.": "選択されたリソース全体にデータを分散します。デフォルトでは、1 つのコピーのレプリカが使用され、これにはリソース障害の発生時の障害耐性は組み込まれません。", - "Mirror": "ミラー", - "Full duplication of the data in each chosen resource. By default a replica of one copy per location is used. Includes failure tolerance in case of resource failure.": "選択された各リソースのデータの完全な複製。デフォルトでは、場所ごとに 1 つのコピーのレプリカが使用されます。これには、リソース障害の発生時の障害耐性が組み込まれます。", - "Add Tier": "階層の追加", - "Tier 2 - Policy type": "階層 2 - ポリシータイプ", - "Remove Tier": "階層の削除", - "Spreading the data across the chosen resources does not include failure tolerance in case of resource failure.": "選択されたリソース全体へのデータの分散には、リソース障害の発生時の障害耐性は組み込まれません。", - "Full duplication of the data in each chosen resource includes failure tolerance in cause of resource failure.": "選択された各リソースのデータの完全な複製には、リソース障害による障害耐性が組み込まれます。", - "Namespace Policy: ": "namespace ポリシー: ", - "Read and write NamespaceStore : ": "NamespaceStore の読み取りと書き込み:", - "Hub namespace store: ": "ハブの namespace ストア: ", - "Cache backing store: ": "バッキングストアのキャッシュ: ", - "Time to live: ": "有効期間 (Time to live): ", - "Resources ": "リソース ", - "Selected read namespace stores: ": "選択された読み取り namespace ストア: ", - "Selected write namespace store: ": "選択された書き込み namespace ストア: ", - "Placement policy details ": "配置ポリシーの詳細 ", - "Tier 1: ": "階層 1: ", - "Selected BackingStores": "選択された BackingStore", - "Tier 2: ": "階層 2: ", - "Review BucketClass": "BucketClass の確認", - "BucketClass type: ": "BucketClass のタイプ: ", - "BucketClass name: ": "BucketClass 名: ", - "Description: ": "説明: ", - "Provider {{provider}}": "プロバイダー {{provider}}", - "Create new BackingStore ": "新規 BackingStore の作成 ", - "An error has occured while fetching backing stores": "バッキングストアの取得中にエラーが発生しました", - "Select a backing store": "バッキングストアの選択", - "Storage targets that are used to store chunks of data on Multicloud Object Gateway buckets.": "Multicloud Object Gateway バケットのデータのブロックを保存するために使用されるストレージターゲット。", - "A BackingStore represents a storage target to be used as the underlying storage layer in Multicloud Object Gateway buckets.": "BackingStore は、Multicloud Object Gateway バケットで基礎となるストレージ階層として使用されるストレージターゲットを表します。", - "Multiple types of BackingStores are supported: AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC.": "複数のタイプの BackingStore がサポートされます (AWS S3、S3 と互換性のある Google Cloud Storage、Azure Blob PVC)。", - "BackingStore Name": "BackingStore 名", - "A unique name for the BackingStore within the project": "プロジェクト内の BackingStore の一意の名前", - "Name can contain a max of 43 characters": "名前には最大 43 文字を含めることができます", - "Provider": "プロバイダー", - "Create BackingStore": "BackingStore の作成", - "This is an Advanced subscription feature. It requires Advanced Edition subscription. Please contact the account team for more information.": "これは高度なサブスクリプション機能です。これには Advanced Edition サブスクリプションが必要になります。詳細は、アカウントチームにお問い合わせください。", - "Advanced Subscription": "高度な Subscription", - "Storage platform": "ストレージプラットフォーム", - "Select a storage platform you wish to connect": "接続するストレージプラットフォームの選択", - "Select external system from list": "一覧からの外部システムの選択", - "Backing storage type": "バッキングストレージのタイプ", - "Use an existing StorageClass": "既存の StorageClass の使用", - "OpenShift Data Foundation will use an existing StorageClass available on your hosting platform.": "OpenShift Data Foundation は、ホストプラットフォームで利用可能な、既存の StorageClass を使用します。", - "Create a new StorageClass using local storage devices": "ローカルデバイスを使用した新規 StorageClass の作成", - "OpenShift Data Foundation will use a StorageClass provided by the Local Storage Operator (LSO) on top of your attached drives. This option is available on any platform with devices attached to nodes.": "OpenShift Data Foundation は、アタッチされたドライブの上層にあるローカルストレージ Operator (LSO) に同梱される StorageClass を使用します。このオプションは、ノードにアタッチされたデバイスがプラットフォームにある場合に利用できます。", - "Connect an external storage platform": "外部ストレージプラットフォームの接続", - "OpenShift Data Foundation will create a dedicated StorageClass.": "OpenShift Data Foundation は専用の StorageClass を作成します。", - "Deploys MultiCloud Object Gateway without block and file services.": "ブロックサービスおよびファイルサービスなしで MultiCloud Object Gateway をデプロイします。", - "Deploys OpenShift Data Foundation with block, shared fileSystem and object services.": "ブロックサービス、共有ファイルシステムサービス、およびオブジェクトサービスと共に OpenShift Data Foundation をデプロイします。", - "Deployment type": "Deployment タイプ", - "Taint nodes": "テイントノード", - "Selected nodes will be dedicated to OpenShift Data Foundation use only": "選択されたノードは OpenShift Data Foundation のみが使用します", - "Select capacity": "容量の選択", - "Requested capacity": "要求された容量", - "Select nodes": "ノードの選択", - "Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.": "3 つ以上のノードを (可能であれば) 3 つの異なるゾーンで選択します。ノードごとに最低でも 14 の CPU および 34 GiB のメモリーから始めることが推奨されます。", - "PersistentVolumes are being provisioned on the selected nodes.": "PersistentVolume は選択したノードでプロビジョニングされます。", - "Error while loading PersistentVolumes.": "PersistentVolume の読み込み中にエラーが発生しました。", - "Selected capacity": "選択された容量", - "Available raw capacity": "利用可能な Raw 容量", - "The available capacity is based on all attached disks associated with the selected StorageClass <2>{{storageClassName}}": "利用可能な容量は、選択した StorageClass の <2>{{storageClassName}} に関連付けられたすべての割り当て済みディスクに基づいています", - "Selected nodes": "選択されたノード", - "Role": "Role", - "CPU": "CPU", - "Memory": "メモリー", - "Zone": "ゾーン", - "Selected nodes table": "選択されたノードのテーブル", - "To support high availability when two data centers can be used, enable arbiter to get a valid quorum between the two data centers.": "2 つのデータセンターを使用できる場合に高可用性をサポートするには、Arbiter が 2 つのデータセンター間で有効なクォーラム (定足数) を取得できるようにします。", - "Arbiter minimum requirements": "Arbiter の最小要件", - "Stretch Cluster": "クラスターの展開", - "Enable arbiter": "Arbiter の有効化", - "Arbiter zone": "Arbiter ゾーン", - "An arbiter node will be automatically selected from this zone": "Arbiter ノードがこのゾーンから自動的に選択されます", - "Select an arbiter zone": "Arbiter ゾーンの選択", - "Arbiter zone selection": "Arbiter ゾーンの選択", - "Connection details": "接続の詳細", - "Disks on all nodes": "すべてのノードのディスク", - "{{nodes, number}} node_one": "ノード {{nodes, number}} 個", - "{{nodes, number}} node_other": "ノード {{nodes, number}} 個", - "Please enter a positive Integer": "正の整数を入力してください", - "LocalVolumeSet name": "LocalVolumeSet 名", - "A LocalVolumeSet will be created to allow you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "LocalVolumeSet が作成され、ディスクセットのフィルターやグループ化だけでなく、専用の StorageClass を作成してそこからストレージを使用できます。", - "StorageClass name": "StorageClass 名", - "Filter disks by": "ディスクのフィルター", - "Uses the available disks that match the selected filters on all nodes.": "全ノードに対して選択したフィルターと一致し、利用可能なディスクを使用します。", - "Disks on selected nodes": "選択されたノードのディスク", - "Uses the available disks that match the selected filters only on selected nodes.": "選択されたノードのみの選択されたフィルターに一致する利用可能なディスクを使用します。", - "Disk type": "ディスクタイプ", - "Advanced": "詳細", - "Volume mode": "ボリュームモード", - "Device type": "デバイスタイプ", - "Select disk types": "ディスクタイプの選択", - "Disk size": "ディスクサイズ", - "Minimum": "最小", - "Please enter a value less than or equal to max disk size": "最大ディスクサイズと同じか、それ以下の値を入力してください", - "Maximum": "最大", - "Please enter a value greater than or equal to min disk size": "最大ディスクサイズと同じか、それ以上の値を入力してください", - "Units": "単位", - "Maximum disks limit": "最大ディスク制限", - "Disks limit will set the maximum number of PVs to create on a node. If the field is empty we will create PVs for all available disks on the matching nodes.": "ディスク制限は、ノードで作成する PV の最大数を設定します。このフィールドが空の場合、一致するノードで利用可能なすべてのディスクについて PV を作成します。", - "All": "すべて", - "Local Storage Operator not installed": "ローカルストレージ Operator がインストールされていません", - "Before we can create a StorageSystem, the Local Storage Operator needs to be installed. When installation is finished come back to OpenShift Data Foundation to create a StorageSystem.<1><0>Install": "StorageSystem を作成する前に、ローカルストレージ Operator のインストールが必要になります。インストールの完了後に OpenShift Container Storage に戻り、StorageSystem を作成します。<1><0>Install", - "Checking Local Storage Operator installation": "ローカルストレージ Operator がインストールされているかの確認", - "Discovering disks on all hosts. This may take a few minutes.": "全ホストでディスクを検索中です。これには数分かかる場合があります。", - "Minimum Node Requirement": "最小の Node 要件", - "A minimum of 3 nodes are required for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "初期デプロイメントには最低でも 3 ノードが必要です。{{nodes}} ノードのみが選択されたフィルターに一致します。ノードを追加するにはフィルターを調整してください。", - "After the LocalVolumeSet is created you won't be able to edit it.": "LocalVolumeSet の作成後は、編集することができません。", - "Note:": "注:", - "Create LocalVolumeSet": "LocalVolumeSet の作成", - "Yes": "Yes", - "Are you sure you want to continue?": "続行してよいですか?", - "Node": "ノード", - "Model": "モデル", - "Capacity": "容量", - "Selected Disks": "選択されたディスク", - "Disk List": "ディスク一覧", - "{{nodes, number}} Node_one": "ノード {{nodes, number}} 個", - "{{nodes, number}} Node_other": "ノード {{nodes, number}} 個", - "{{disks, number}} Disk_one": "ディスク {{disks, number}} 個", - "{{disks, number}} Disk_other": "ディスク {{disks, number}} 個", - "Selected versus Available Capacity": "選された容量 vs. 利用可能な容量", - "Out of {{capacity}}": "/ {{capacity}}", - "{{displayName}} connection details": "{{displayName}} の接続の詳細", - "Not connected": "未接続", - "Backing storage": "バッキングストレージ", - "Deployment type: {{deployment}}": "Deployment タイプ: {{deployment}}", - "Backing storage type: {{name}}": "バッキングストレージタイプ: {{name}}", - "External storage platform: {{storagePlatform}}": "外部ストレージプラットフォーム: {{storagePlatform}}", - "Capacity and nodes": "容量およびノード", - "Cluster capacity: {{capacity}}": "クラスター容量: {{capacity}}", - "Selected nodes: {{nodeCount, number}} node_one": "選択されたノード: ノード {{nodeCount, number}} 個", - "Selected nodes: {{nodeCount, number}} node_other": "選択されたノード: ノード {{nodeCount, number}} 個", - "CPU and memory: {{cpu, number}} CPU and {{memory}} memory": "CPU およびメモリー: CPU {{cpu, number}} 個およびメモリー {{memory}}", - "Zone: {{zoneCount, number}} zone_one": "ゾーン: ゾーン {{zoneCount, number}} 個", - "Zone: {{zoneCount, number}} zone_other": "ゾーン: ゾーン {{zoneCount, number}} 個", - "Arbiter zone: {{zone}}": "Arbiter ゾーン: {{zone}}", - "Taint nodes: {{ocsTaintsStatus}}": "テイントノード: {{ocsTaintsStatus}}", - "Security": "セキュリティー", - "Encryption: Enabled": "暗号化: 有効", - "External key management service: {{kmsStatus}}": "外部キー管理サービス: {{kmsStatus}}", - "Security and network": "セキュリティーおよびネットワーク", - "Encryption: {{encryptionStatus}}": "暗号化: {{encryptionStatus}}", - "Network: {{networkType}}": "ネットワーク: {{networkType}}", - "Encryption level": "暗号化レベル", - "The StorageCluster encryption level can be set to include all components under the cluster (including StorageClass and PVs) or to include only StorageClass encryption. PV encryption can use an auth token that will be used with the KMS configuration to allow multi-tenancy.": "StorageCluster の暗号化レベルは、クラスターの下にあるすべてのコンポーネント (StorageClass および PV を含む) を組み込むか、または StorageClass の暗号化のみを組み込むように設定できます。PV 暗号化は、KMS 設定と共に使用する認証トークンを使用して、マルチテナンシーを許可できます。", - "Cluster-wide encryption": "クラスター全体の暗号化", - "Encryption for the entire cluster (block and file)": "クラスター全体の暗号化 (ブロックおよびファイル)", - "StorageClass encryption": "StorageClass の暗号化", - "An encryption key will be generated for each persistent volume (block) created using an encryption enabled StorageClass.": "暗号化キーは、暗号化が有効な StorageClass を使用して作成される永続ボリューム (ブロックのみ) ごとに生成されます。", - "Connection settings": "接続設定", - "Connect to an external key management service": "外部キー管理サービスへの接続", - "Data encryption for block and file storage. MultiCloud Object Gateway is always encrypted.": "ブロックおよびファイルストレージのデータ暗号化。MultiCloud Object Gateway は常に暗号化されます。", - "MultiCloud Object Gateway is always encrypted.": "Multicloud Object Gateway は常に暗号化されます。", - "Enable data encryption for block and file storage": "ブロックおよびファイルストレージのデータ暗号化を有効にする", - "Enable encryption": "暗号の有効化", - "Encryption": "暗号化", - "An error has occurred: {{error}}": "エラーが発生しました: {{error}}", - "IP address": "IP アドレス", - "Rest API IP address of IBM FlashSystem.": "IBM FlashSystem の REST API IP アドレス。", - "The endpoint is not a valid IP address": "このエンドポイントは有効な IP アドレスではありません", - "Username": "ユーザー名", - "Password": "パスワード", - "Hide password": "パスワードの非表示", - "Reveal password": "パスワードの表示", - "The uploaded file is not a valid JSON file": "アップロードされたファイルは有効な JSON ファイルではありません", - "External storage system metadata": "外部ストレージシステムのメタデータ", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External storage system metadata field.": "<1>{{SCRIPT_NAME}} スクリプトをダウンロードして RHCS クラスターで実行します。次に、外部ストレージシステムのメタデータフィールドに結果 (JSON) をアップロードします。", - "Download script": "スクリプトのダウンロード", - "Browse": "参照", - "Clear": "消去", - "Upload helper script": "ヘルパースクリプトのアップロード", - "An error has occurred": "エラーが発生しました", - "Create StorageSystem": "StorageSystem の作成", - "Create a StorageSystem to represent your OpenShift Data Foundation system and all its required storage and computing resources.": "StorageSystem を作成し、OpenShift Data Foundation システムとその必要なストレージおよびコンピューティングリソースすべてを表します。", - "{{nodeCount, number}} node_one": "ノード {{nodeCount, number}} 個", - "{{nodeCount, number}} node_other": "{{nodeCount, number}} 個", - "selected ({{cpu}} CPU and {{memory}} on ": "選択済み ({{cpu}} CPU および {{memory}} ", - "{{zoneCount, number}} zone_one": "ゾーン {{zoneCount, number}} 個", - "{{zoneCount, number}} zone_other": "ゾーン {{zoneCount, number}} 個", - "Search by node name...": "ノード名で検索...", - "Search by node label...": "ノードラベルで検索...", - "Not found": "見つかりません", - "Compression eligibility": "圧縮の適格性", - "Compression eligibility indicates the percentage of incoming data that is compressible": "圧縮の適格性では、圧縮可能な受信データの割合を示します", - "Compression savings": "圧縮による節約", - "Compression savings indicates the total savings gained from compression for this pool, including replicas": "圧縮による節約では、レプリカなど、このプールの圧縮から節約できた容量の合計を表します", - "Compression ratio": "圧縮比率", - "Compression ratio indicates the achieved compression on eligible data for this pool": "圧縮比率は、このプールで適格なデータで実現可能な圧縮率を表します", - "Compression status": "圧縮ステータス", - "Storage efficiency": "ストレージの効率性", - "Details": "詳細", - "Replicas": "レプリカ", - "Inventory": "インベントリー", - "Not available": "利用不可", - "Image states info": "イメージの状態情報", - "What does each state mean?": "各状態の意味", - "<0>Starting replay: Initiating image (PV) replication process.": "<0>再生の開始: イメージ (PV) のレプリケーションプロセスを開始します。", - "<0>Replaying: Image (PV) replication is ongoing or idle between clusters.": "<0>再生: イメージ (PV) のレプリケーションは、クラスター間で処理中またはアイドル状態になります。", - "<0>Stopping replay: Image (PV) replication process is shutting down.": "<0>再生の停止: イメージ (PV) のレプリケーションプロセスがシャットダウンされます。", - "<0>Stopped: Image (PV) replication process has shut down.": "<0>停止中: イメージ (PV) のレプリケーションプロセスがシャットダウンされました。", - "<0>Error: Image (PV) replication process stopped due to an error.": "<0>エラー: エラーが原因で、イメージ (PV) のレプリケーションプロセスが停止しました。", - "<0>Unknown: Unable to determine image (PV) state due to an error. Check your network connection and remote cluster mirroring daemon.": "<0>不明: エラーにより、イメージ (PV) の状態を判別できませんでした。ネットワーク接続およびリモートクラスターミラーリングデーモンを確認してください。", - "image states info": "イメージの状態情報", - "Image States": "イメージの状態", - "Mirroring": "ミラーリング", - "Mirroring status": "ミラーリングステータス", - "Overall image health": "全体的なイメージの正常性", - "Show image states": "イメージの状態表示", - "Last checked": "最終確認日時", - "Raw Capacity shows the total physical capacity from all storage media within the storage subsystem": "Raw 容量は、ストレージサブシステム内のすべてのストレージメディアからの合計物理容量を表示します", - "Start replay": "再生の開始", - "Stop reply": "再生の停止", - "Replaying": "再生", - "Stopped": "停止中", - "Error": "エラー", - "Syncing": "同期中", - "Unknown": "不明", - "Status": "ステータス", - "Performance": "パフォーマンス", - "IOPS": "IOPS", - "Throughput": "スループット", - "Not enough usage data": "使用データが十分にありません", - "used": "使用済み", - "available": "利用可能", - "Other": "その他", - "All other capacity usage that are not a part of the top 5 consumers.": "上位 5 コンシューマーの一部ではないその他すべての容量の使用量。", - "Available": "利用可能", - "Breakdown Chart": "内訳図", - "Warning": "警告", - "Raw capacity": "RAW 容量", - "Used": "使用済み", - "Available versus Used Capacity": "利用可能な容量 vs. 使用済みの容量", - "Used of {{capacity}}": "{{capacity}} の使用済み分", - "Not Available": "利用不可", - "Rebuilding data resiliency": "データ回復性の再構築", - "{{formattedProgress, number}}%": "{{formattedProgress, number}}%", - "Activity": "アクティビティー", - "Estimating {{formattedEta}} to completion": "{{formattedEta}} の完了までの予測", - "Object_one": "オブジェクト", - "Object_other": "オブジェクト", - "Buckets": "バケット", - "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "バケットカードは、Multicloud Object Gateway で管理される S3 バケットの数と、Multicloud Object Gateway および RGW (デプロイされている場合) の両方で管理される ObjectBucketClaim と ObjectBucket の数を表します。", - "NooBaa Bucket": "NooBaa バケット", - "Break by": "Break by", - "Total": "合計", - "Projects": "プロジェクト", - "BucketClasses": "BucketClass", - "Service type": "Service タイプ", - "Cluster-wide": "クラスター全体", - "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "S3 クライアントまたは NooBaa UI システムで作成されたオブジェクトバケット要求以外の要求。", - "Capacity breakdown": "容量の内訳", - "This card shows used capacity for different resources. The available capacity is based on cloud services therefore it cannot be shown.": "このカードには、異なるリソースについて使用される容量が表示されます。利用可能な容量はクラウドサービスをベースとするため、表示されません。", - "Type: {{serviceType}}": "タイプ: {{serviceType}}", - "Service Type Dropdown": "Service タイプのドロップダウン", - "Service Type Dropdown Toggle": "Service タイプのドロップダウンのトグル", - "By: {{serviceType}}": ": {{serviceType}}", - "Break By Dropdown": "Break By ドロップダウン", - "Providers": "プロバイダー", - "Accounts": "アカウント", - "Metric": "メトリクス", - "I/O Operations": "I/O 操作", - "Logical Used Capacity": "論理使用容量", - "Physical vs. Logical used capacity": "物理 vs. 論理使用容量", - "Egress": "Egress", - "Latency": "レイテンシー", - "Bandwidth": "帯域幅", - "Service Type": "Service タイプ", - "Type: {{selectedService}}": "タイプ: {{selectedService}}", - "{{selectedMetric}} by {{selectedBreakdown}}": "{{selectedMetric}} ({{selectedBreakdown}} 別)", - "thousands": "千", - "millions": "百万", - "billions": "十億", - "Total Reads {{totalRead}}": "合計読み取り {{totalRead}}", - "Total Writes {{totalWrite}}": "合計書き込み {{totalWrite}}", - "Total Logical Used Capacity {{logicalCapacity}}": "合計論理使用容量 {{logicalCapacity}}", - "Total Physical Used Capacity {{physicalcapacity}}": "合計物理使用容量 {{physicalcapacity}}", - "Shows an overview of the data consumption per provider or account collected from the day of the entity creation.": "エンティティーの作成時から収集されるプロバイダーまたはアカウントごとのデータ消費の概要を示します。", - "(in {{suffixLabel}})": "({{suffixLabel}})", - "Data Consumption Graph": "データ使用率のグラフ", - "GET {{GETLatestValue}}": "GET {{GETLatestValue}}", - "PUT {{PUTLatestValue}}": "PUT {{PUTLatestValue}}", - "OpenShift Data Foundation": "OpenShift Data Foundation", - "OpenShift Container Storage": "OpenShift Container Storage", - "Service name": "Service 名", - "System name": "システム名", - "Multicloud Object Gateway": "Multicloud Object Gateway", - "RADOS Object Gateway": "RADOS Object Gateway", - "Version": "バージョン", - "Resource Providers": "リソースプロバイダー", - "A list of all Multicloud Object Gateway resources that are currently in use. Those resources are used to store data according to the buckets' policies and can be a cloud-based resource or a bare metal resource.": "現在使用中のすべての Multicloud Object Gateway リソースの一覧。これらのリソースは、バケットポリシーに従ってデータを保存するために使用されます。これらは、クラウドベースのリソースまたはベアメタルリソースです。", - "Object Service": "オブジェクト Service", - "Data Resiliency": "データ回復性", - "Object Service Status": "オブジェクト Service のステータス", - "The object service includes 2 services.": "オブジェクトサービスには 2 つのサービスが含まれます。", - "The data resiliency includes 2 services": "データ回復性には 2 つのサービスが含まれます", - "Services": "Service", - "Object Gateway (RGW)": "Object Gateway (RGW)", - "All resources are unhealthy": "すべてのリソースが正常ではありません", - "Object Bucket has an issue": "オブジェクトバケットに問題があります", - "Many buckets have issues": "数多くのバケットに問題があります", - "Some buckets have issues": "一部のバケットに問題があります", - "{{capacityRatio, number}}:1": "{{capacityRatio, number}}:1", - "OpenShift Data Foundation can be configured to use compression. The efficiency rate reflects the actual compression ratio when using such a configuration.": "OpenShift Container Storage は圧縮を使用するように設定できます。圧縮効率は、この設定を使用する際の実際の圧縮率を反映しています。", - "Savings": "Savings (節約)", - "Savings shows the uncompressed and non-deduped data that would have been stored without those techniques.": "Savings (節約) では、これらのテクノロジーが使用されないない場合には保存されている非圧縮データおよび非重複排除データが表示されます。", - "Storage Efficiency": "ストレージの効率性", - "OpenShift Container Storage Overview": "OpenShift Container Storage の概要", - "Block and File": "ブロックおよびファイル", - "Object_0": "オブジェクト", - "BlockPools": "BlockPools", - "Storage Classes": "ストレージクラス", - "Pods": "Pods", - "{{metricType}}": "{{metricType}}", - "Break by dropdown": "内訳ドロップダウン", - "Service Name": "Service 名", - "Cluster Name": "クラスター名", - "Mode": "モード", - "Storage Cluster": "ストレージクラスター", - "Utilization": "使用状況", - "Used Capacity": "使用済みの容量", - "Expanding StorageCluster": "StorageCluster の拡張", - "Upgrading OpenShift Data Foundation's Operator": "OpenShift Data Foundation の Operator のアップグレード", - "Used Capacity Breakdown": "使用済み容量の内訳", - "This card shows the used capacity for different Kubernetes resources. The figures shown represent the Usable storage, meaning that data replication is not taken into consideration.": "このカードには、異なる Kubernetes リソースについて使用される容量が表示されます。表示される図は使用可能なストレージを示します。ここでは、データのレプリケーションは考慮されません。", - "Cluster name": "クラスター名", - "Internal": "内部", - "Raw capacity is the absolute total disk space available to the array subsystem.": "Raw 容量は、アレイサブシステムで利用可能な絶対的なディスク容量の合計です。", - "Troubleshoot": "トラブルシューティング", - "Active health checks": "有効なヘルスチェック", - "Progressing": "進行中", - "The Compression Ratio represents the compressible data effectiveness metric inclusive of all compression-enabled pools.": "Compression Ratio (圧縮率) は、圧縮可能なすべてのプールを含む圧縮可能なデータの有効性を示すメトリクスを表します。", - "The Savings metric represents the actual disk capacity saved inclusive of all compression-enabled pools and associated replicas.": "Savings (節約) メトリクスは、圧縮可能なすべてのプールと関連するレプリカを含む、保存された実際のディスク容量を表します。", - "Performance metrics over time showing IOPS, Latency and more. Each metric is a link to a detailed view of this metric.": "IOPS、レイテンシーなどを示す一定期間のパフォーマンスメトリクス。各メトリクスはこのメトリクスの詳細ビューへのリンクになります。", - "Recovery": "リカバリー", - "Disk State": "ディスクの状態", - "OpenShift Data Foundation status": "OpenShift Data Foundation のステータス", - "Filesystem": "ファイルシステム", - "Disks List": "ディスクの一覧", - "Start Disk Replacement": "ディスク置き換えの開始", - "<0>{{diskName}} can be replaced with a disk of same type.": "<0>{{diskName}} は同じタイプのディスクに置き換えることが可能です。", - "Troubleshoot disk <1>{{diskName}}": "ディスク <1>{{diskName}} のトラブルシューティング", - "here": "こちら", - "Online": "オンライン", - "Offline": "オフライン", - "NotResponding": "NotResponding", - "PreparingToReplace": "PreparingToReplace", - "ReplacementFailed": "ReplacementFailed", - "ReplacementReady": "ReplacementReady", - "Connection name": "接続名", - "This is a required field": "これは必須フィールドです", - "A unique name for the key management service within the project.": "プロジェクト内のキー管理サービスの一意の名前。", - "Service instance ID": "Service インスタンス ID", - "Service API key": "Service API キー", - "Customer root key": "顧客の root キー", - "IBM Base URL": "IBM ベース URL", - "IBM Token URL": "IBM トークン URL", - "Connect to a Key Management Service": "キー管理サービスに接続", - "Key management service provider": "キー管理サービスプロバイダー", - "kms-provider-name": "kms-provider-name", - "Token": "トークン", - "Create a secret with the token for every namespace using encrypted PVCs.": "暗号化された PVC を使用して、すべての namespace のトークンでシークレットを作成します。", - "Hide token": "トークンの非表示", - "Reveal token": "トークンの表示", - "Authentication method": "認証方法", - "authentication-method": "認証方法", - "Please enter a URL": "URL を入力してください", - "Please enter a valid port": "有効なポートを入力してください", - "Address": "アドレス", - "Port": "ポート", - "Advanced settings": "詳細設定", - "Raw Capacity": "RAW 容量", - "x {{ replica, number }} replicas =": "x {{ replica, number }} レプリカ =", - "No StorageClass selected": "StorageClass が選択されていません", - "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "Arbiter ストレッチクラスターには、少なくとも 4 つのノード (2 つの異なるゾーンのそれぞれにノードが 2 つずつ配置される) が必要です。異なる StorageClass を選択するか、または最小ノード要件に適合する新規の LocalVolumeSet を作成してください。", - "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "StorageCluster には、少なくとも 3 つのノードが必要です。別の StorageClass を選択するか、または最小ノード要件に一致する新規の LocalVolumeSet を作成してください。", - "Adding capacity for <1>{{name}}, may increase your expenses.": "<1>{{name}} の容量を追加すると、費用の負担が増える可能性があります。", - "StorageClass": "StorageClass", - "Currently Used:": "現在使用中:", - "Add": "追加", - "Key Management Service Advanced Settings": "キー管理サービスの詳細設定", - "Vault enterprise namespaces are isolated environments that functionally exist as Vaults within a Vault. They have separate login paths and support creating and managing data isolated to their namespace.": "Vault enterprise namespace は、Vault 内の Vault として機能的に存在する分離された環境です。それらには個別のログインパスがあり、その namespace に分離されたデータの作成および管理をサポートします。", - "Maximum file size exceeded. File limit is 4MB.": "ファイルの最大サイズを超過しました。ファイルの上限は 4MB です。", - "A PEM-encoded CA certificate file used to verify the Vault server's SSL certificate.": "Vault サーバーの SSL 証明書の検証に使用される PEM エンコードされた CA 証明書ファイル。", - "A PEM-encoded client certificate. This certificate is used for TLS communication with the Vault server.": "PEM エンコードされたクライアント証明書。この証明書は、Vault サーバーとの TLS 通信に使用されます。", - "An unencrypted, PEM-encoded private key which corresponds to the matching client certificate provided with VAULT_CLIENT_CERT.": "VAULT_CLIENT_CERT で提供される一致するクライアント証明書に対応する PEM エンコードされたプライベートキー。", - "The name to use as the SNI host when OpenShift Data Foundation connecting via TLS to the Vault server": "OpenShift Data Foundation が TLS 経由で Vault サーバーに接続する際に SNI ホストとして使用する名前", - "Backend Path": "バックエンドパス", - "path/": "パス/", - "Authentication Path": "認証パス", - "Authentication Namespace": "Authentication namespace", - "TLS Server Name": "TLS サーバー名", - "Vault Enterprise Namespace": "Vault Enterprise namespace", - "The name must be accurate and must match the service namespace": "名前は正確で、サービスの namespace と一致する必要があります", - "CA Certificate": "CA 証明書", - "Upload a .PEM file here...": ".PEM ファイルをここにアップロードします...", - "Client Certificate": "クライアント証明書", - "Client Private Key": "クライアントプライベートキー", - "Attach OBC to a Deployment": "OBC の Deployment への割り当て", - "Deployment Name": "Deployment 名", - "Attach": "割り当て", - "<0><0>{{poolName}} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):": "<0><0>{{poolName}} は削除できません。プールが PVC にバインドされている場合は削除できません。すべてのリソースを StorageClass から切り離してください: ", - "<0>Deleting <1>{{poolName}} will remove all the saved data of this pool. Are you sure want to delete?": "<0><1>{{poolName}} を削除すると、このプールのすべての保存されたデータが削除されます。削除してもよいですか?", - "BlockPool Delete Modal": "BlockPool の削除モーダル", - "Try Again": "再試行", - "Finish": "終了", - "Go To Pvc List": "PVC 一覧に移動", - "BlockPool Update Form": "BlockPool 更新フォーム", - "replacement disallowed: disk {{diskName}} is {{replacingDiskStatus}}": "置き換え禁止: ディスク {{diskName}} は {{replacingDiskStatus}} です", - "replacement disallowed: disk {{diskName}} is {{replacementStatus}}": "置き換え禁止: ディスク {{diskName}} は {{replacementStatus}} です", - "Disk Replacement": "ディスクの置き換え", - "This action will start preparing the disk for replacement.": "このアクションにより、置き換え用のディスクの準備が開始します。", - "Data rebalancing is in progress": "データリバランスが進行中です", - "See data resiliency status": "データ回復性のステータスの表示", - "Are you sure you want to replace <1>{{diskName}}?": "<1>{{diskName}} を置き換えてもよいですか?", - "Replace": "置き換え", - "Create NamespaceStore ": "NamespaceStore の作成 ", - "Represents an underlying storage to be used as read or write target for the data in the namespace buckets.": "namespace バケットのデータの読み取りまたは書き込みターゲットとして使用する基礎となるストレージを表します。", - "Provider {{provider}} | Region: {{region}}": "プロバイダー {{provider}} | リージョン: {{region}}", - "Create new NamespaceStore ": "新規 NamespaceStore の作成 ", - "An error has occurred while fetching namespace stores": "namespace ストアの取得中にエラーが発生しました", - "Select a namespace store": "namespace ストアの選択", - "Namespace store name": "namespace ストア名", - "A unique name for the namespace store within the project": "プロジェクト内の namespace の一意の名前", - "Persistent volume claim": "永続ボリューム要求 (PVC)", - "Folder": "フォルダー", - "If the name you write exists, we will be using the existing folder if not we will create a new folder ": "指定した名前が存在する場合は、既存のフォルダーを使用します。存在しない場合には、新しいフォルダーを作成します", - "Namespace Store Table": "namespace ストアテーブル", - "Service account keys are needed for Google Cloud Storage authentication. The keys can be found in the service accounts page in the GCP console.": "Google Cloud Storage 認証にはサービスアカウントキーが必要です。キーは、GCP コンソールのサービスアカウントページで確認できます。", - "Learn more": "詳細はドキュメントを参照してください", - "Where can I find Google Cloud credentials?": "Google Cloud 認証情報の確認方法", - "Upload a .json file with the service account keys provided by Google Cloud Storage.": "Google Cloud Storage が提供するサービスアカウントキーで .json ファイルをアップロードします。", - "Secret Key": "シークレットキー", - "Upload JSON": "JSON のアップロード", - "Uploaded File Name": "アップロードしたファイル名", - "Upload File": "ファイルのアップロード", - "Switch to Secret": "シークレットへの切り替え", - "Select Secret": "シークレットの選択", - "Switch to upload JSON": "JSON アップロードへの切り替え", - "Cluster Metadata": "クラスターのメタデータ", - "Target Bucket": "ターゲットバケット", - "Number of Volumes": "ボリューム数", - "Volume Size": "ボリュームのサイズ", - "Target blob container": "ターゲット Blob コンテナー", - "Target bucket": "ターゲットバケット", - "Account name": "アカウント名", - "Access key": "アクセスキー", - "Account key": "アカウントキー", - "Secret key": "シークレットキー", - "Region Dropdown": "リージョンのドロップダウン", - "Endpoint": "エンドポイント", - "Endpoint Address": "Endpoint アドレス", - "Secret": "シークレット", - "Switch to Credentials": "認証情報への切り替え", - "Access Key Field": "アクセスキーフィールド", - "Secret Key Field": "シークレットキーフィールド", - "ObjectBucketClaim Name": "ObjectBucketClaim 名", - "my-object-bucket": "my-object-bucket", - "If not provided a generic name will be generated.": "指定されていない場合は、汎用的な名前が生成されます。", - "Defines the object-store service and the bucket provisioner.": "オブジェクトストアサービスおよびバケットプロビジョナーを定義します。", - "BucketClass": "BucketClass", - "Select BucketClass": "BucketClass の選択", - "Create ObjectBucketClaim": "ObjectBucketClaim の作成", - "Edit YAML": "YAML の編集", - "Attach to Deployment": "Deployment への割り当て", - "Disabled because the ObjectBucketClaim is being deleted.": "ObjectBucketClaim が削除されているため無効化されました。", - "Object Bucket Claim Details": "オブジェクトバケット要求の詳細", - "Object Bucket": "オブジェクトバケット", - "Namespace": "Namespace", - "OBCTableHeader": "OBCTableHeader", - "Object Bucket Claims": "オブジェクトバケット要求", - "Object Bucket Claim Data": "オブジェクトバケット要求データ", - "Hide Values": "値を非表示にする", - "Reveal Values": "値を表示する", - "Data": "データ", - "Create Object Bucket": "オブジェクトバケットの作成", - "Object Bucket Name": "オブジェクトバケット名", - "ob-name-help": "ob-name-help", - "The corresponding ObjectBucketClaim must be deleted first.": "最初に対応する ObjectBucketClaim を削除する必要があります。", - "Object Bucket Details": "オブジェクトバケットの詳細", - "Object Bucket Claim": "オブジェクトバケット要求", - "OBTableHeader": "OBTableHeader", - "Object Buckets": "オブジェクトバケット", - "Uses the available disks that match the selected filters on all nodes selected in the previous step.": "直前の手順で選択したすべてのノードの選択されたフィルターに一致する利用可能なディスクを使用します。", - "A LocalVolumeSet allows you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "LocalVolumeSet を使用すると、ディスクのセットのフィルターやグループ化だけでなく、専用の StorageClass を作成してそこからストレージを使用できます。", - "OpenShift Container Storage's StorageCluster requires a minimum of 3 nodes for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "OpenShift Container Storage の StorageCluster では、初期デプロイメントには少なくとも 3 つのノードが必要です。{{nodes}} ノードのみが選択されたフィルターに一致します。ノードを追加するにはフィルターを調整してください。", - "After the LocalVolumeSet and StorageClass are created you won't be able to go back to this step.": "LocalVolumeSet と StorageClass の作成後は、この手順に戻ることができません。", - "Create StorageClass": "StorageClass の作成", - "Selected Capacity": "選択された容量", - "Selected Nodes": "選択された Node", - "Review StorageCluster": "StorageCluster の確認", - "Storage and nodes": "ストレージおよびノード", - "Arbiter zone:": "Arbiter ゾーン:", - "None": "なし", - "selected based on the created StorageClass:": "作成された StorageClass に基づいて選択されています", - "Total CPU and memory of {{cpu, number}} CPU and {{memory}}": "{{cpu, number}} CPU および {{memory}} の合計 CPU およびメモリー", - "Configure": "設定", - "Enable Encryption": "暗号の有効化", - "Connect to external key management service: {{name}}": "外部キー管理サービス {{name}} に接続", - "Encryption Level: {{level}}": "暗号化レベル: {{level}}", - "Using {{networkLabel}}": "{{networkLabel}} の使用", - "Discover disks": "ディスクの検出", - "Review and create": "確認および作成", - "Info Alert": "info 警告", - "Internal - Attached devices": "内部 - 割り当て済みデバイス", - "Can be used on any platform where there are attached devices to the nodes, using the Local Storage Operator. The infrastructure StorageClass is provided by Local Storage Operator, on top of the attached drives.": "ローカルストレージ Operator を使用して、ノードに割り当てられているデバイスがあるプラットフォームで使用できます。このインフラストラクチャーの StorageClass は、割り当てられたドライブ上でローカルストレージ Operator によって提供されます。", - "Before we can create a StorageCluster, the Local Storage operator needs to be installed. When installation is finished come back to OpenShift Container Storage to create a StorageCluster.<1><0>Install": "StorageCluster を作成する前に、ローカルストレージ Operator のインストールが必要になります。インストールの完了後に OpenShift Container Storage に戻り、StorageCluster を作成します。<1><0>Install", - "Node Table": "Node テーブル", - "StorageCluster exists": "StorageCluster があります", - "Back to operator page": "Operator ページに戻る", - "Go to cluster page": "クラスターページに移動する", - "<0>A StorageCluster <1>{{clusterName}} already exists.<3>You cannot create another StorageCluster.": "<0>StorageCluster <1>{{clusterName}} はすでに存在します。<3>別の StorageCluster を作成できません。", - "Connect to external cluster": "外部クラスターへの接続", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External cluster metadata field.": "<1>{{SCRIPT_NAME}} スクリプトをダウンロードして RHCS クラスターで実行します。次に、External cluster metadata フィールドに結果 (JSON) をアップロードします。", - "Download Script": "スクリプトのダウンロード", - "A bucket will be created to provide the OpenShift Data Foundation's Service.": "バケットは、OpenShift Data Foundation のサービスを提供するために作成されます。", - "Bucket created for OpenShift Container Storage's Service": "OpenShift Container Storage サービス用に作成されるバケット", - "Create External StorageCluster": "外部 StorageCluster の作成", - "External cluster metadata": "外部クラスターのメタデータ", - "Upload JSON File": "JSON ファイルのアップロード", - "Upload Credentials file": "認証情報ファイルのアップロード", - "JSON data": "JSON データ", - "Create Button": "作成ボタン", - "Create StorageCluster": "StorageCluster の作成", - "OpenShift Container Storage runs as a cloud-native service for optimal integration with applications in need of storage and handles the scenes such as provisioning and management.": "OpenShift Container Storage はストレージを必要とするアプリケーションとの統合の最適化のためにクラウドネイティブサービスとして実行され、プロビジョニングや管理などを処理します。", - "Select mode:": "モードの選択", - "If not labeled, the selected nodes are labeled <1>{{label}} to make them target hosts for OpenShift Data Foundation's components.": "ラベルが指定されていない場合には、OpenShift Data Foundation のコンポーネントのターゲットホストとして設定するために、選択したノードに <1>{{label}} というラベルが付けられます。", - "Mark nodes as dedicated": "ノードを専用としてマークする", - "This will taint the nodes with the<1>key: node.ocs.openshift.io/storage, <4>value: true, and <7>effect: NoSchedule": "これにより、ノードに <1>key: node.ocs.openshift.io/storage、<4>value: true、および <7>effect: NoSchedule のテイントが付けられます", - "Selected nodes will be dedicated to OpenShift Container Storage use only": "選択されたノードは OpenShift Container Storage のみが使用します", - "OpenShift Container Storage deployment in two data centers, with an arbiter node to settle quorum decisions.": "クォーラムの決定に使用されるアービターノードを備えた 2 つのデータセンターの OpenShift Container Storage デプロイメント。", - "To support high availability when two data centers can be used, enable arbiter to get the valid quorum between two data centers.": "2 つのデータセンターを使用できる場合に高可用性をサポートするには、Arbiter が 2 つのデータセンター間で有効なクォーラム (定足数) を取得できるようにします。", - "Select arbiter zone": "Arbiter ゾーンの選択", - "Network": "Network", - "The default SDN networking uses a single network for all data operations such read/write and also for control plane, such as data replication. Multus allows a network separation between the data operations and the control plane operations.": "デフォルトの SDN ネットワークは、読み取り/書き込みなどのすべてのデータ操作や、データレプリケーションなどのコントロールプレーンに単一のネットワークを使用します。Multus は、データ操作とコントロールプレーンの操作間でのネットワーク分離を可能にします。", - "Default (SDN)": "デフォルト (SDN)", - "Custom (Multus)": "カスタム (Multus)", - "Public Network Interface": "パブリックネットワークインターフェイス", - "Select a network": "ネットワークの選択", - "Cluster Network Interface": "ネットワークインターフェイス", - "Requested Cluster Capacity:": "要求されたクラスター容量:", - "StorageClass:": "StorageClass:", - "Select Capacity": "容量の選択", - "Requested Capacity": "要求された容量", - "Select Nodes": "Node の選択", - "create internal mode StorageCluster wizard": "内部モード StorageCluster の作成ウィザード", - "Can be used on any platform, except bare metal. It means that OpenShift Container Storage uses an infrastructure StorageClass, provided by the hosting platform. For example, gp2 on AWS, thin on VMWare, etc.": "ベアメタル以外のすべてのプラットフォームで使用できるので、OpenShift Container Storage はホストプラットフォームが提供するインフラストラクチャーの StorageClass を使用できます (例: AWS の gp2、VMWare の thin など)。", - "{{title}} steps": "{{title}} の手順", - "{{title}} content": "{{title}} コンテンツ", - "{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}}/{{replica}} レプリカ", - "Available capacity:": "現在利用可能な容量:", - "Filesystem name": "ファイルシステム名", - "Enter filesystem name": "ファイルシステム名の入力", - "CephFS filesystem name into which the volume shall be created": "ボリュームを作成する CephFS ファイルシステム名", - "no compression": "圧縮なし", - "with compression": "圧縮あり", - "Replica {{poolSize}} {{compressionText}}": "レプリカ {{poolSize}} {{compressionText}}", - "Create New Pool": "新規プールの作成", - "Storage Pool": "ストレージプール", - "Select a Pool": "プールの選択", - "Storage pool into which volume data shall be stored": "ボリュームデータを保存するストレージプール", - "Error retrieving Parameters": "パラメーターの取得エラー", - "my-storage-pool": "my-storage-pool", - "An encryption key will be generated for each PersistentVolume created using this StorageClass.": "暗号化キーは、この StorageClass を使用して作成される PersistentVolume ごとに生成されます。", - "Key service": "キーサービス", - "Select an existing connection": "既存の接続の選択", - "KMS service {{value}} already exist": "KMS サービス {{value}} はすでに存在します", - "Choose existing KMS connection": "既存の KMS 接続の選択", - "Create new KMS connection": "新規 KMS 接続の作成", - "PV expansion operation is not supported for encrypted PVs.": "PV 拡張操作は、暗号化された PV ではサポートされません。", - "Enable Thick Provisioning": "シックプロビジョニングの有効化", - "By enabling thick-provisioning, volumes will allocate the requested capacity upon volume creation. Volume creation will be slower when thick-provisioning is enabled.": "シックプロビジョニングを有効にすると、ボリュームはボリュームの作成時に必要な容量を割り当てます。シックプロビジョニングが有効な場合はボリュームの作成速度が遅くなります。", - "{{resource}} details": "{{resource}} の詳細", - "Kind": "種類", - "Labels": "ラベル", - "Last updated": "最終更新", - "Storage Systems": "ストレージシステム", - "Used capacity": "使用された容量", - "Storage status represents the health status of {{operatorName}}'s StorageCluster.": "ストレージのステータスは、{{operatorName}} の StorageCluster の正常性のステータスを表します。", - "Health": "正常性", - "Standard": "標準", - "Data will be consumed by a Multi-cloud object gateway, deduped, compressed, and encrypted. The encrypted chunks would be saved on the selected BackingStores. Best used when the applications would always use the OpenShift Data Foundation endpoints to access the data.": "データは、マルチクラウドオブジェクトゲートウェイによって消費され、重複排除され、圧縮され、暗号化されます。暗号化されたチャンクは、選択された BackingStore に保存されます。アプリケーションが常に OpenShift Data Foundation エンドポイントを使用してデータにアクセスする場合に使用するのが最も適しています。", - "Data is stored on the NamespaceStores without performing de-duplication, compression, or encryption. BucketClasses of namespace type allow connecting to existing data and serving from them. These are best used for existing data or when other applications (and cloud-native services) need to access the data from outside OpenShift Data Foundation.": "データは、NamespaceStore に重複排除、圧縮、暗号化なしで保存されます。BucketClass の namespace タイプを使用すると、既存のデータに接続し、そこから送信できます。これらは既存データに使用するか、または他のアプリケーション (およびクラウドネイティブサービス) が OpenShift Data Foundation 外からデータにアクセスする必要がある場合に使用するのに最適です。", - "Single NamespaceStore": "単一の NamespaceStore", - "The namespace bucket will read and write its data to a selected namespace store": "namespace バケットは、そのデータを読み取り、選択した namespace ストアに書き込みます", - "Multi NamespaceStores": "複数の NamespaceStore", - "The namespace bucket will serve reads from several selected backing stores, creating a virtual namespace on top of them and will write to one of those as its chosen write target": "namespace バケットは、選択された複数のバッキングストアからの読み取りデータを提供し、それらの上に仮想 namespace を作成し、選択された書き込みターゲットとしてそれらのいずれかに書き込みます", - "Cache NamespaceStore": "NamespaceStore のキャッシュ", - "The caching bucket will serve data from a large raw data out of a local caching tiering.": "キャッシュバケットは、ローカルのキャッシュ階層から大規模な未加工データのデータを提供します。", - "Create storage class": "ストレージクラスの作成", - "Create local volume set": "ローカルボリュームセットの作成", - "Logical used capacity per account": "アカウントごとに使用される論理使用容量", - "Egress Per Provider": "プロバイダーごとの egress", - "I/O Operations count": "I/O 操作数", - "The StorageClass used by OpenShift Data Foundation to write its data and metadata.": "OpenShift Data Foundation がデータおよびメタデータを書き込むのに使用される StorageClass。", - "Infrastructure StorageClass created by Local Storage Operator and used by OpenShift Container Storage to write its data and metadata.": "データおよびメタデータを作成するために、ローカルストレージ Operator によって作成され、OpenShift Container Storage によって作成されるインフラストラクチャーの StorageClass。", - "The amount of capacity that would be dynamically allocated on the selected StorageClass.": "選択された StorageClass で動的に割り当てられる容量。", - "If you wish to use the Arbiter stretch cluster, a minimum of 4 nodes (2 different zones, 2 nodes per zone) and 1 additional zone with 1 node is required. All nodes must be pre-labeled with zones in order to be validated on cluster creation.": "Arbiter ストレッチクラスターを使用する場合は、少なくとも 4 つのノード (2 つの異なるゾーン、ゾーンあたり 2 つのノード) と 1 つのノードに 1 つの追加のゾーンが必要になります。クラスターの作成時に検証するには、すべてのノードにゾーンのラベルを事前に設定する必要があります。", - "Selected nodes are based on the StorageClass <1>{{scName}} and with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "選択されたノードは StorageClass <1>{{scName}} をベースとしており、推奨要件 (ノードごとに 14 の CPU および 34 GiB の RAM) を満たしています。", - "Selected nodes are based on the StorageClass <1>{{scName}} and fulfill the stretch cluster requirements with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "選択されたノードは StorageClass <1>{{scName}} をベースとしており、ストレッチクラスターの要件および推奨要件 (ノードごとに 14 の CPU および 34 GiB の RAM) を満たしています。", - "Loading...": "読み込み中...", - "Pool {{name}} creation in progress": "プール {{name}} の作成が進行中です", - "Pool {{name}} was successfully created": "プール {{name}} が正常に作成されました", - "An error occurred. Pool {{name}} was not created": "エラーが発生しました。プール {{name}} は作成されませんでした", - "Pool {{name}} creation timed out. Please check if odf operator and rook operator are running": "プール {{name}} の作成がタイムアウトしました。odf operator および rook Operator が実行されているかどうかを確認してください", - "The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.": "StorageCluster の作成が進行中であるか、または失敗しました。StorageCluster を使用する準備ができたら再試行してください。", - "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.": "プール管理タスクは、デフォルトのプールおよび OpenShift Container Storage の外部モードではサポートされません。", - "Pool {{name}} was created with errors.": "プール {{name}} がエラーを出して作成されました。", - "Delete": "削除", - "StorageClasses": "StorageClasses", - "hr": "時間", - "min": "分", - "A minimal cluster deployment will be performed.": "最小のクラスターデプロイメントが実行されます。", - "The selected nodes do not match OpenShift Data Foundation's StorageCluster requirement of an aggregated 30 CPUs and 72 GiB of RAM. If the selection cannot be modified a minimal cluster will be deployed.": "選択されたノードは、集約された 30 CPU および 72 GiB の RAM の OpenShift Data Foundation の StorageCluster の要件に適合しません。選択内容を変更できない場合は、最小クラスターがデプロイされます。", - "Back to nodes selection": "ノードの選択に戻る", - "Select a StorageClass to continue": "StorageClass を選択して続行します", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing PersistentVolumes that will be used to provide the OpenShift Data Foundation service.": "これは必須フィールドです。StorageClass は、基礎となるインフラストラクチャーからストレージを要求し、OpenShift Data Foundation サービスを提供するために使用されるバッキング PersistentVolume を作成するために使用されます。", - "Create new StorageClass": "新規 StorageClass の作成", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing persistent volumes that will be used to provide the OpenShift Data Foundation service.": "これは必須フィールドです。StorageClass は、基礎となるインフラストラクチャーからストレージを要求し、OpenShift Data Foundation サービスを提供するために使用されるバッキング永続ボリュームを作成するために使用されます。", - "All required fields are not set": "すべての必須フィールドは設定されていません", - "In order to create the StorageCluster you must set the StorageClass, select at least 3 nodes (preferably in 3 different zones) and meet the minimum or recommended requirement": "StorageCluster を作成するには、StorageClass を設定し、3 つ以上のノードを (できれば 3 つの異なるゾーンで) 選択し、最低または推奨される要件を満たす必要があります", - "The StorageCluster requires a minimum of 3 nodes for the initial deployment. Please choose a different StorageClass or go to create a new LocalVolumeSet that matches the minimum node requirement.": "StorageCluster には、初期デプロイメントに少なくとも 3 つのノードが必要です。別の StorageClass を選択するか、最小ノード要件に適合する新規の LocalVolumeSet を作成してください。", - "Create new volume set instance": "新規ボリュームセットインスタンスの作成", - "Select at least 1 encryption level or disable encryption.": "1 つ以上の暗号化レベルを選択するか、または暗号化を無効にします。", - "Fill out the details in order to connect to key management system": "キー管理システムに接続するための詳細を入力します", - "This is a required field.": "これは必須フィールドです。", - "Both public and cluster network attachment definition cannot be empty": "パブリックおよびクラスターネットワーク接続定義はどちらも空にできません", - "A public or cluster network attachment definition must be selected to use Multus.": "Multus ネットワークを使用するには、パブリックまたはクラスターネットワーク接続定義を選択する必要があります。", - "The number of selected zones is less than the minimum requirement of 3. If not modified a host-based failure domain deployment will be enforced.": "選択されたゾーンの数は最小要件の 3 未満になります。変更されない場合、ホストベースの障害ドメインのデプロイメントが実行されます。", - "When the nodes in the selected StorageClass are spread across fewer than 3 availability zones, the StorageCluster will be deployed with the host based failure domain.": "選択した StorageClass のノードが 3 つ未満のアベイラビリティーゾーンに分散される場合、StorageCluster はホストベースの障害ドメインを使用してデプロイされます。", - "Cluster-Wide and StorageClass": "クラスター全体および StorageClass", - "Cluster-Wide": "クラスター全体", - "Select at least 2 Backing Store resources": "2 つ以上のバッキングストアリソースの選択", - "Select at least 1 Backing Store resource": "1 つ以上のバッキングストアリソースの選択", - "x {{replica}} replicas = {{osdSize, number}} TiB": "x {{replica}} レプリカ = {{osdSize, number}} TiB", - "SmallScale": "SmallScale", - "0.5 TiB": "0.5 TiB", - "2 TiB": "2 TiB", - "LargeScale": "LargeScale", - "4 TiB": "4 TiB", - "{{osdSize, number}} TiB": "{{osdSize, number}} TiB", - "Help": "ヘルプ" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/ja/console-shared.json b/frontend/packages/ceph-storage-plugin/locales/ja/console-shared.json deleted file mode 100644 index aadb5e7abdab..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/ja/console-shared.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "View {{title}} metrics in query browser": "クエリーブラウザーの {{title}} メトリクスの表示", - "Not available": "利用不可", - "{{humanAvailable}} available of {{humanLimit}} total limit": "利用可能: {{humanAvailable}}/{{humanLimit}} 合計制限", - "{{humanAvailable}} available of {{humanMax}}": "利用可能: {{humanAvailable}}/{{humanMax}}", - "{{humanAvailable}} available": "利用可能: {{humanAvailable}}" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/ko/ceph-storage-plugin.json b/frontend/packages/ceph-storage-plugin/locales/ko/ceph-storage-plugin.json deleted file mode 100644 index ed5335208a34..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/ko/ceph-storage-plugin.json +++ /dev/null @@ -1,757 +0,0 @@ -{ - "Add Capacity": "용량 추가", - "Edit BlockPool": "블록 풀 편집", - "Edit Bucket Class Resources": "버킷 클래스 리소스 편집", - "ObjectBucketClaim": "개체 버킷 클레임", - "Use existing claim": "기존 클레임 사용", - "Select claim": "클레임 선택", - "Create new claim": "새 클레임 생성", - "Create": "만들기", - "Cancel": "취소", - "Overview": "개요", - "StorageSystems": "스토리지 시스템", - "StorageSystem details": "스토리지 시스템 세부 정보", - "Enabled": "활성화됨", - "Disabled": "비활성화됨", - "Last synced": "마지막으로 동기화됨", - "Default pool cannot be deleted": "기본 풀은 삭제할 수 없음", - "BlockPool List": "블록 풀 목록", - "Delete BlockPool": "블록 풀 삭제", - "{{replica}} Replication": "{{replica}} 복제", - "Pool name": "풀 이름", - "my-block-pool": "my-block-pool", - "pool-name-help": "pool-name-help", - "Data protection policy": "데이터 보호 정책", - "Select replication": "복제 선택", - "Volume type": "볼륨 유형", - "Select volume type": "볼륨 유형 선택", - "Compression": "압축", - "Enable compression": "압축 활성화", - "Enabling compression may result in little or no space savings for encrypted or random data. Also, enabling compression may have an impact on I/O performance.": "압축을 활성화하면 암호화된 데이터 또는 임의의 데이터 공간이 거의/전혀 절약되지 않을 수 있습니다. 또한 압축을 활성화하면 I/O 성능에 영향을 미칠 수 있습니다.", - "OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.": "OpenShift Data Foundation의 스토리지 클러스터를 사용할 수 없습니다. 스토리지 클러스터가 사용할 준비가 되면 다시 시도하십시오.", - "Create BlockPool": "블록 풀 생성", - "Close": "닫기", - "Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.": "OpenShift Data Foundation의 외부 RHCS 스토리지 시스템에는 풀 생성이 지원되지 않습니다.", - "A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.": "블록 풀은 애플리케이션과 워크로드에 탄력적인 용량을 제공하는 논리적 엔티티입니다. 풀은 액세스 데이터 복원력 및 스토리지 효율성을 위한 정책을 지원하는 수단을 제공합니다.", - "BlockPool Creation Form": "블록 풀 생성 양식", - "Name": "이름", - "Bucket Name": "버킷 이름", - "Type": "유형", - "Region": "리전", - "BackingStore Table": "백업 저장 테이블", - "Each BackingStore can be used for one tier at a time. Selecting a BackingStore in one tier will remove the resource from the second tier option and vice versa.": "각 백업 저장소는 한 번에 한 계층에 사용할 수 있습니다. 한 계층에서 백업 저장소를 선택하면 두 번째 계층 옵션에서 리소스가 제거되고 그 반대의 경우도 마찬가지입니다.", - "Bucket created for OpenShift Data Foundation's Service": "OpenShift Data Foundation 서비스 용으로 생성된 버킷", - "Tier 1 - BackingStores": "계층 1 - 백업 저장소", - "Create BackingStore ": "백업 저장소 만들기 ", - "Tier-1-Table": "계층 1-표", - "{{bs, number}} BackingStore_one": "백업 저장소 {{bs, number}}개", - "{{bs, number}} BackingStore_other": "백업 저장소{{bs, number}}개", - "selected": "선택", - "Tier 2 - BackingStores": "계층 2 - 백업 저장소", - "Tier-2-Table": "계층-2-표", - "General": "일반", - "Placement Policy": "배치 정책", - "Resources": "리소스", - "Review": "리뷰", - "Create BucketClass": "버킷 클래스 만들기", - "Create new BucketClass": "새 버킷 클래스 만들기", - "BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.": "버킷 클래스는 OBC에 대한 계층화 정책 및 데이터 배치를 정의하는 버킷의 클래스를 나타내는 CRD입니다.", - "Next": "다음", - "Back": "이전", - "Edit BucketClass Resource": "버킷 클래스 리소스 편집", - "{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "{{storeType}}은 Multicloud Object Gateway 버킷의 데이터에 대한 기본 스토리지로 사용할 스토리지 대상을 나타냅니다.", - "Cancel ": "취소 ", - "Save": "저장", - "What is a BackingStore?": "백업 저장소란 무엇입니까?", - "BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "백업 저장소는 Multicloud Object Gateway 버킷의 데이터에 대한 기본 스토리지로 사용할 스토리지 대상을 나타냅니다.", - "Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.": "여러 유형의 백업 저장소가 지원됩니다: asws-s3 s3-compatiblegoogle-cloud-storage azure-blob obc PVC.", - "Learn More": "더 알아보기", - "What is a BucketClass?": "버킷 클래스란 무엇입니까?", - "A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching": "특정 버킷 클래스로 생성 된 모든 버킷 (OBC)에 적용되는 정책 세트입니다. 이러한 정책에는 배치, 네임 스페이스 및 캐싱이 포함됩니다.", - "BucketClass type": "버킷 클래스 유형", - "3-63 chars": "3-63 자", - "Starts and ends with lowercase number or letter": "소문자 숫자 또는 문자로 시작하고 끝냅니다.", - "Only lowercase letters, numbers, non-consecutive periods or hyphens": "소문자, 숫자, 연속되지 않는 마침표 또는 하이픈만 사용할 수 있습니다.", - "Avoid using the form of an IP address": "IP 주소 형식을 사용하지 마십시오.", - "Globally unique name": "전역 고유 이름", - "BucketClass name": "버킷 클래스 이름", - "A unique name for the bucket class within the project.": "프로젝트 내 버킷 클래스의 고유 이름입니다.", - "my-multi-cloud-mirror": "my-multi-cloud-mirror", - "BucketClass Name": "버킷 클래스 이름", - "Description (Optional)": "설명 (선택 사항)", - "Description of bucket class": "버킷 클래스 설명", - "What is a Namespace Policy?": "네임 스페이스 정책이란 무엇입니까?", - "Namespace policy can be set to one single read and write source, multi read sources or cached policy.": "네임 스페이스 정책은 단일 읽기 및 쓰기 소스, 다중 읽기 소스 또는 캐시 정책으로 설정할 수 있습니다.", - "Namespace Policy Type": "네임 스페이스 정책 유형", - "What is Caching?": "캐싱이란?", - "Caching is a policy that creates local copies of the data. It saves the copies locally to improve performance for frequently accessed data. Each cached copy has a TTL and is verified against the hub. Each non-read operation (upload, overwrite, delete) is performed on the hub": "캐싱은 데이터의 로컬 복사본을 만드는 정책입니다. 자주 액세스하는 데이터의 성능을 향상시키기 위해 복사본을 로컬에 저장합니다. 캐시된 각 사본에는 TTL이 있으며 허브에 대해 검증됩니다. 모든 읽기이외의 작업 (업로드, 덮어 쓰기, 삭제)은 허브에서 수행됩니다.", - "Hub namespace store ": "Hub 네임 스페이스 저장소", - "A single NamespaceStore that defines the read and write target of the namespace bucket.": "단일 네임 스페이스 저장소는 네임 스페이스 버킷의 읽기 및 쓰기 대상을 정의합니다", - "NamespaceStore": "네임 스페이스 저장소", - "Cache data settings": "캐시 데이터 설정", - "The data will be temporarily copied on a backing store in order to later access it much more quickly.": "나중에 더 빠르게 액세스할 수 있도록 임시로 백업 저장소에 데이터를 복사합니다.", - "Backing store": "백업 저장소", - "a local backing store is recommended for better performance": "성능을 개선하려면 로컬 백업 저장소를 사용하는 것이 좋습니다", - "Time to live": "수명", - "Time to live is the time that an object is stored in a caching system before it is deleted or refreshed. Default: 0, Max: 24 hrs": "수명은 객체가 삭제되거나 새로 고쳐지기 전에 캐싱 시스템에 저장되는 시간을 나타냅니다. 기본값: 0, 최대: 24 시간", - "Read NamespaceStores": "네임 스페이스 저장소 읽기", - "Select a list of NamespaceStores that defines the read targets of the namespace bucket.": "네임 스페이스 저장소 목록 선택하고 네임 스페이스 버킷의 읽기 대상 정의", - "Create NamespaceStore": "네임 스페이스 저장소 만들기", - "{{nns, number}} namespace store_one": "{{nns, number}} 네임스페이스 store_one", - "{{nns, number}} namespace store_other": "{{nns, number}} 네임스페이스 store_other", - " selected": " 선택", - "Write NamespaceStore": "네임 스페이스 저장소 쓰기", - "Select a single NamespaceStore that defines the write targets of the namespace bucket.": " 네임 스페이스 버킷의 쓰기 대상을 정의하는 단일 네임 스페이스 저장소를 선택합니다.", - "Read and Write NamespaceStore ": "네임 스페이스 저장소 읽기 및 쓰기 ", - "Select one NamespaceStore which defines the read and write targets of the namespace bucket.": "네임 스페이스 버킷의 읽기 및 쓰기 대상을 정의하는 하나의 네임스페이스 저장소를 선택합니다.", - "What is a Placement Policy?": "배치 정책이란 무엇입니까?", - "Data placement capabilities are built as a multi-layer structure here are the layers bottom-up:": "데이터 배치 기능은 다중 계층 구조로 구축되며 계층은 상향식으로 전개됩니다.", - "Spread Tier - list of BackingStores aggregates the storage of multiple stores.": "분산 계층 - 여러 저장소 저장 위치를 집약하는 백업 저장소 목록입니다.", - "Mirroring Tier - list of spread-layers async-mirroring to all mirrors with locality optimization (will allocate on the closest region to the source endpoint). Mirroring requires at least two BackingStores.": "미러링 계층-로컬 최적화 (소스 엔드포인트에 가장 가까운 지역에 할당됨)를 사용하여 모든 미러에 비동기 미러링하는 분산 계층 목록으로 미러링에는 최소 두 개의 백업 저장소가 필요합니다.", - "The number of replicas can be configured via the NooBaa management console.": "NooBaa 관리 콘솔을 통해 복제 수를 구성할 수 있습니다.", - "Tier 1 - Policy Type": "계층 1-정책 유형", - "Spread": "분산", - "Spreading the data across the chosen resources. By default a replica of one copy is used and does not include failure tolerance in case of resource failure.": "선택한 리소스에 데이터를 분산합니다. 기본적으로 하나의 복사본의 복제가 사용되며 리소스 장애 발생시 내결함성이 포함되지 않습니다.", - "Mirror": "미러", - "Full duplication of the data in each chosen resource. By default a replica of one copy per location is used. Includes failure tolerance in case of resource failure.": "선택한 각 리소스 데이터의 전체 복제. 기본적으로 각 위치에 하나의 복사본 복제가 사용됩니다. 리소스 장애 발생시 내결함성이 포함됩니다.", - "Add Tier": "계층 추가", - "Tier 2 - Policy type": "계층 2-정책 유형", - "Remove Tier": "계층 삭제", - "Spreading the data across the chosen resources does not include failure tolerance in case of resource failure.": "선택한 리소스 전체의 데이터를 분산하는 데 리소스 장애 발생시 내결함성이 포함되지 않습니다.", - "Full duplication of the data in each chosen resource includes failure tolerance in cause of resource failure.": "선택한 각 리소스 데이터의 전체 복제에는 리소스 장애로 인한 결함 허용이 포함됩니다.", - "Namespace Policy: ": "네임 스페이스 정책: ", - "Read and write NamespaceStore : ": "네임 스페이스 저장소 읽기 및 쓰기 :", - "Hub namespace store: ": "Hub 네임 스페이스 저장소: ", - "Cache backing store: ": "캐시 백업 저장소: ", - "Time to live: ": "수명 : ", - "Resources ": "리소스 ", - "Selected read namespace stores: ": "선택한 읽기 네임 스페이스 저장소: ", - "Selected write namespace store: ": "선택한 쓰기 네임 스페이스 저장소: ", - "Placement policy details ": "배치 정책 세부 정보 ", - "Tier 1: ": "계층 1: ", - "Selected BackingStores": "선택한 백업 저장소", - "Tier 2: ": "계층 2: ", - "Review BucketClass": "버킷 클래스 검토", - "BucketClass type: ": "버킷 클래스 유형: ", - "BucketClass name: ": "버킷 클래스 이름: ", - "Description: ": "설명: ", - "Provider {{provider}}": "공급자 {{provider}}", - "Create new BackingStore ": "새 백업 저장소 만들기 ", - "An error has occured while fetching backing stores": "백업 저장소를 가져 오는 중에 오류가 발생했습니다", - "Select a backing store": "백업 저장소 선택", - "Storage targets that are used to store chunks of data on Multicloud Object Gateway buckets.": "Multicloud Object Gateway 버킷에 데이터 블록을 저장하는 데 사용되는 스토리지 대상입니다.", - "A BackingStore represents a storage target to be used as the underlying storage layer in Multicloud Object Gateway buckets.": "백업 저장소는 Multicloud Object Gateway 버킷의 데이터에 대한 기본 스토리지로 사용할 스토리지 대상을 나타냅니다.", - "Multiple types of BackingStores are supported: AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC.": "여러 유형의 백업 저장소가 지원됩니다. AWS S3 S3와 호환되는 Google Cloud Storage Azure Blob PVC.", - "BackingStore Name": "백업 저장소 이름", - "A unique name for the BackingStore within the project": "프로젝트의 백업 저장소의 고유 이름", - "Name can contain a max of 43 characters": "이름은 최대 43자를 포함할 수 있습니다.", - "Provider": "공급자", - "Create BackingStore": "백업 저장소 만들기", - "This is an Advanced subscription feature. It requires Advanced Edition subscription. Please contact the account team for more information.": "이는 고급 서브스크립션 기능입니다. Advanced Edition 서브스크립션이 필요합니다. 자세한 내용은 계정 팀에 문의하십시오.", - "Advanced Subscription": "고급 서브스크립션", - "Storage platform": "스토리지 플랫폼", - "Select a storage platform you wish to connect": "연결할 스토리지 플랫폼을 선택하십시오.", - "Select external system from list": "목록에서 외부 시스템 선택", - "Backing storage type": "백업 저장소 유형", - "Use an existing StorageClass": "기존 스토리지 클래스 사용", - "OpenShift Data Foundation will use an existing StorageClass available on your hosting platform.": "OpenShift Data Foundation은 호스팅 플랫폼에서 사용할 수 있는 기존 스토리지 클래스를 사용합니다.", - "Create a new StorageClass using local storage devices": "로컬 스토리지 장치를 사용하여 새 스토리지 클래스 생성", - "OpenShift Data Foundation will use a StorageClass provided by the Local Storage Operator (LSO) on top of your attached drives. This option is available on any platform with devices attached to nodes.": "OpenShift Data Foundation은 연결된 드라이브 위에 LSO(Local Storage Operator)가 제공하는 스토리지 클래스를 사용합니다. 이 옵션은 노드에 연결된 장치가 있는 모든 플랫폼에서 사용할 수 있습니다.", - "Connect an external storage platform": "외부 스토리지 플랫폼 연결", - "OpenShift Data Foundation will create a dedicated StorageClass.": "OpenShift Data Foundation은 전용 스토리지 클래스를 생성합니다.", - "Deploys MultiCloud Object Gateway without block and file services.": "블록 및 파일 서비스 없이 MultiCloud Object Gateway를 배포합니다.", - "Deploys OpenShift Data Foundation with block, shared fileSystem and object services.": "블록, 공유 파일 시스템 및 개체 서비스와 함께 OpenShift Data Foundation을 배포합니다.", - "Deployment type": "배포 유형", - "Taint nodes": "테인트 노드", - "Selected nodes will be dedicated to OpenShift Data Foundation use only": "선택한 노드는 OpenShift Data Foundation 전용으로 사용됩니다.", - "Select capacity": "용량 선택", - "Requested capacity": "요청된 용량", - "Select nodes": "노드 선택", - "Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.": "가급적 3 개의 다른 영역에서 3 개 이상의 노드를 선택합니다. 노드 당 최소 14 개의 CPU와 34GiB로 시작하는 것이 좋습니다.", - "PersistentVolumes are being provisioned on the selected nodes.": "선택한 노드에서 영구 볼륨이 프로비저닝되고 있습니다.", - "Error while loading PersistentVolumes.": "영구 볼륨을 로드하는 동안 오류가 발생했습니다.", - "Selected capacity": "선택한 용량", - "Available raw capacity": "사용 가능한 원시 용량", - "The available capacity is based on all attached disks associated with the selected StorageClass <2>{{storageClassName}}": "사용 가능한 용량은 선택된 스토리지 클래스 <2>{{storageClassName}}와 연결된 모든 연결된 디스크를 기반으로 합니다.", - "Selected nodes": "선택된 노드", - "Role": "역할", - "CPU": "CPU", - "Memory": "메모리", - "Zone": "영역", - "Selected nodes table": "선택된 노드 테이블", - "To support high availability when two data centers can be used, enable arbiter to get a valid quorum between the two data centers.": "두 개의 데이터 센터를 사용할 수 있는 경우 고가용성을 지원하려면 Arbiter가 두 데이터 센터간에 유효한 쿼럼을 취득할 수 있도록합니다.", - "Arbiter minimum requirements": "Arbiter의 최소 요구 사항", - "Stretch Cluster": "클러스터 확장", - "Enable arbiter": "Arbiter 활성화", - "Arbiter zone": "Arbiter 영역", - "An arbiter node will be automatically selected from this zone": "이 영역에서 arbiter 노드가 자동으로 선택됩니다.", - "Select an arbiter zone": "Arbiter 영역 선택", - "Arbiter zone selection": "Arbiter 영역 선택", - "Connection details": "연결 세부 정보", - "Disks on all nodes": "모든 노드의 디스크", - "{{nodes, number}} node_one": "{{nodes, number}} 노드", - "{{nodes, number}} node_other": "{{nodes, number}} 노드", - "Please enter a positive Integer": "양의 정수를 입력하십시오", - "LocalVolumeSet name": "로컬 볼륨 세트 이름", - "A LocalVolumeSet will be created to allow you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "로컬 볼륨 세트가 생성되어 디스크 세트를 필터링하고 그룹화한 후 전용 스토리지 클래스를 생성하여 디스크에서 스토리지를 사용할 수 있습니다.", - "StorageClass name": "스토리지 클래스 이름", - "Filter disks by": "디스크 필터링 기준", - "Uses the available disks that match the selected filters on all nodes.": "모든 노드에서 선택한 필터와 일치하는 사용 가능한 디스크가 사용됩니다.", - "Disks on selected nodes": "선택된 노드의 디스크", - "Uses the available disks that match the selected filters only on selected nodes.": "선택한 노드에서만 선택한 필터와 일치하는 사용 가능한 디스크가 사용됩니다.", - "Disk type": "디스크 유형", - "Advanced": "고급 옵션", - "Volume mode": "볼륨 모드", - "Device type": "장치 유형", - "Select disk types": "디스크 유형 선택", - "Disk size": "디스크 크기", - "Minimum": "최소 크기", - "Please enter a value less than or equal to max disk size": "최대 디스크 크기보다 작거나 같은 값을 입력하십시오.", - "Maximum": "최대 크기", - "Please enter a value greater than or equal to min disk size": "최소 디스크 크기보다 크거나 같은 값을 입력하십시오.", - "Units": "단위", - "Maximum disks limit": "최대 디스크 제한", - "Disks limit will set the maximum number of PVs to create on a node. If the field is empty we will create PVs for all available disks on the matching nodes.": "디스크 제한은 노드에 생성할 최대 PV 수를 설정합니다. 이 필드가 비어 있으면 일치하는 노드에서 사용 가능한 모든 디스크에 대한 PV가 생성됩니다.", - "All": "모두", - "Local Storage Operator not installed": "로컬 스토리지 Operator가 설치되어 있지 않음", - "Before we can create a StorageSystem, the Local Storage Operator needs to be installed. When installation is finished come back to OpenShift Data Foundation to create a StorageSystem.<1><0>Install": "스토리지 시스템을 생성하려면 먼저 로컬 스토리지 Operator를 설치해야합니다. 설치가 완료되면 OpenShift Data Foundation로 돌아와 스토리지 시스템을 생성합니다.<1><0>설치", - "Checking Local Storage Operator installation": "로컬 스토리지 Operator 설치 확인", - "Discovering disks on all hosts. This may take a few minutes.": "모든 호스트에서 디스크 검색. 이 작업은 몇 분 정도 걸릴 수 있습니다.", - "Minimum Node Requirement": "최소 노드 요구 사항", - "A minimum of 3 nodes are required for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "초기 배포에는 최소 3 개의 노드가 필요합니다. {{nodes}} 노드만 선택한 필터와 일치합니다. 노드를 추가하려면 필터를 조정하십시오.", - "After the LocalVolumeSet is created you won't be able to edit it.": "로컬 볼륨 세트가 생성된 후에는 편집할 수 없습니다.", - "Note:": "참고:", - "Create LocalVolumeSet": "로컬 볼륨 세트 만들기", - "Yes": "예", - "Are you sure you want to continue?": "계속 진행하시겠습니까?", - "Node": "노드", - "Model": "모델", - "Capacity": "용량", - "Selected Disks": "선택한 디스크", - "Disk List": "디스크 목록", - "{{nodes, number}} Node_one": "{{nodes, number}} 노드", - "{{nodes, number}} Node_other": "노드 {{nodes, number}} 개", - "{{disks, number}} Disk_one": "{{disks, number}} 디스크", - "{{disks, number}} Disk_other": "디스크 {{disks, number}} 개", - "Selected versus Available Capacity": "선택한 용량 vs. 사용 가능한 용량", - "Out of {{capacity}}": "/ {{capacity}}", - "{{displayName}} connection details": "{{displayName}} 연결 세부 정보", - "Not connected": "연결되지 않음", - "Backing storage": "백업 저장소", - "Deployment type: {{deployment}}": "배포 유형: {{deployment}}", - "Backing storage type: {{name}}": "백업 저장소 유형: {{name}}", - "External storage platform: {{storagePlatform}}": "외부 스토리지 메타데이터: {{storagePlatform}}", - "Capacity and nodes": "용량 및 노드", - "Cluster capacity: {{capacity}}": "클러스터 용량: {{capacity}}", - "Selected nodes: {{nodeCount, number}} node_one": "선택된 노드: {{nodeCount, number}} 노드", - "Selected nodes: {{nodeCount, number}} node_other": "선택된 노드: {{nodeCount, number}} 노드", - "CPU and memory: {{cpu, number}} CPU and {{memory}} memory": "CPU 및 메모리:{{cpu, number}} CPU 및 {{memory}} 메모리", - "Zone: {{zoneCount, number}} zone_one": "영역: {{zoneCount, number}} 영역", - "Zone: {{zoneCount, number}} zone_other": "영역: {{zoneCount, number}} 영역", - "Arbiter zone: {{zone}}": "Arbiter 영역: {{zone}}", - "Taint nodes: {{ocsTaintsStatus}}": "테인트 노드: {{ocsTaintsStatus}}", - "Security": "보안", - "Encryption: Enabled": "암호화: 사용", - "External key management service: {{kmsStatus}}": "외부 키 관리 서비스: {{kmsStatus}}", - "Security and network": "보안 및 네트워크", - "Encryption: {{encryptionStatus}}": "암호화: {{encryptionStatus}}", - "Network: {{networkType}}": "네트워크: {{networkType}}", - "Encryption level": "암호화 수준", - "The StorageCluster encryption level can be set to include all components under the cluster (including StorageClass and PVs) or to include only StorageClass encryption. PV encryption can use an auth token that will be used with the KMS configuration to allow multi-tenancy.": "스토리지 클러스터 암호화 수준은 클러스터 아래의 모든 구성 요소 (스토리지 클래스 및 PV 포함)를 포함하거나 스토리지 클래스 암호화만 포함하도록 설정할 수 있습니다. PV 암호화는 KMS 구성과 함께 사용되는 인증 토큰을 사용하여 멀티 테넌시를 허용할 수 있습니다.", - "Cluster-wide encryption": "클러스터 전체 암호화", - "Encryption for the entire cluster (block and file)": "클러스터 전체 (블록 및 파일)의 암호화", - "StorageClass encryption": "스토리지 클래스 암호화", - "An encryption key will be generated for each persistent volume (block) created using an encryption enabled StorageClass.": "암호화가 활성화된 스토리지 클래스를 사용하여 생성된 각 영구 볼륨 (블록 전용)에 대한 암호화 키가 생성됩니다.", - "Connection settings": "연결 설정", - "Connect to an external key management service": "외부 키 관리 서비스에 연결", - "Data encryption for block and file storage. MultiCloud Object Gateway is always encrypted.": "블록 및 파일 저장을 위한 데이터 암호화. MultiCloud Object Gateway는 항상 암호화됩니다.", - "MultiCloud Object Gateway is always encrypted.": "MultiCloud Object Gateway는 항상 암호화됩니다.", - "Enable data encryption for block and file storage": "블록 및 파일 스토리지에 대한 데이터 암호화 활성화", - "Enable encryption": "암호 활성화", - "Encryption": "암호화", - "An error has occurred: {{error}}": "오류가 발생했습니다: {{error}}", - "IP address": "IP 주소", - "Rest API IP address of IBM FlashSystem.": "IBM FlashSystem의 API IP 주소 재설정입니다.", - "The endpoint is not a valid IP address": "엔드포인트가 유효한 IP 주소가 아닙니다.", - "Username": "사용자 이름", - "Password": "암호", - "Hide password": "암호 숨기기", - "Reveal password": "암호 표시", - "The uploaded file is not a valid JSON file": "업로드된 파일은 유효한 JSON 파일이 아닙니다.", - "External storage system metadata": "외부 스토리지 시스템 메타데이터", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External storage system metadata field.": "<1>{{SCRIPT_NAME}} 스크립트를 다운로드하고 RHCS 클러스터에서 실행한 다음 외부 스토리지 시스템 메타 데이터 필드에 결과 (JSON)를 업로드합니다.", - "Download script": "스크립트 다운로드", - "Browse": "검색", - "Clear": "지우기", - "Upload helper script": "도우말 스크립트 업로드", - "An error has occurred": "오류가 발생했습니다", - "Create StorageSystem": "스토리지 시스템 만들기", - "Create a StorageSystem to represent your OpenShift Data Foundation system and all its required storage and computing resources.": "OpenShift Data Foundation 시스템과 필요한 모든 스토리지 및 컴퓨팅 리소스를 표시하는 스토리지 시스템을 만듭니다.", - "{{nodeCount, number}} node_one": "{{nodeCount, number}} 노드", - "{{nodeCount, number}} node_other": "{{nodeCount, number}} 노드", - "selected ({{cpu}} CPU and {{memory}} on ": "선택됨 ({{cpu}} CPU 및{{memory}}", - "{{zoneCount, number}} zone_one": "{{zoneCount, number}} 영역", - "{{zoneCount, number}} zone_other": "{{zoneCount, number}} 영역", - "Search by node name...": "노드 이름으로 검색 ...", - "Search by node label...": "노드 라벨로 검색 ...", - "Not found": "찾을 수 없음", - "Compression eligibility": "압축 가능", - "Compression eligibility indicates the percentage of incoming data that is compressible": "압축 가능 용량은 압축 가능한 수신 데이터의 백분율을 나타냅니다.", - "Compression savings": "압축 저장", - "Compression savings indicates the total savings gained from compression for this pool, including replicas": "압축 저장 용량은 복제본을 포함하여 이 풀에 대해 압축을 통해 얻은 총 저장 용량을 나타냅니다.", - "Compression ratio": "압축률", - "Compression ratio indicates the achieved compression on eligible data for this pool": "압축률은 이 풀의 적합한 데이터에 대해 달성된 압축률을 나타냅니다.", - "Compression status": "압축 상태", - "Storage efficiency": "스토리지 효율성", - "Details": "세부 정보", - "Replicas": "복제", - "Inventory": "인벤토리", - "Not available": "사용할 수 없음", - "Image states info": "이미지 상태 정보", - "What does each state mean?": "각 상태는 무엇을 의미합니까?", - "<0>Starting replay: Initiating image (PV) replication process.": "<0>재생 시작: 이미지(PV) 복제 프로세스를 시작합니다.", - "<0>Replaying: Image (PV) replication is ongoing or idle between clusters.": "<0>재생 중: 이미지(PV) 복제가 클러스터 간에 진행 중이거나 유휴 상태입니다.", - "<0>Stopping replay: Image (PV) replication process is shutting down.": "<0>재생 중지 중: 이미지(PV) 복제 프로세스가 종료되고 있습니다.", - "<0>Stopped: Image (PV) replication process has shut down.": "<0>중지됨: 이미지(PV) 복제 프로세스가 종료되었습니다.", - "<0>Error: Image (PV) replication process stopped due to an error.": "<0>오류: 오류로 인해 이미지(PV) 복제 프로세스가 중지되었습니다.", - "<0>Unknown: Unable to determine image (PV) state due to an error. Check your network connection and remote cluster mirroring daemon.": "<0>알 수 없음: 오류로 인해 이미지(PV) 상태를 확인할 수 없습니다. 네트워크 연결 및 원격 클러스터 미러링 데몬을 확인하십시오.", - "image states info": "이미지 상태 정보", - "Image States": "이미지 상태", - "Mirroring": "미러링", - "Mirroring status": "미러링 상태", - "Overall image health": "전체 이미지 상태", - "Show image states": "이미지 상태 표시", - "Last checked": "마지막 확인", - "Raw Capacity shows the total physical capacity from all storage media within the storage subsystem": "원시 용량은 스토리지 하위 시스템에 있는 모든 스토리지 미디어의 총 물리적 용량을 보여줍니다.", - "Start replay": "재생 시작", - "Stop reply": "재생 중지", - "Replaying": "재생 중", - "Stopped": "중지됨", - "Error": "오류", - "Syncing": "동기화 중", - "Unknown": "알 수 없음", - "Status": "상태", - "Performance": "성능", - "IOPS": "IOPS", - "Throughput": "처리량", - "Not enough usage data": "사용 데이터가 충분하지 않습니다.", - "used": "사용됨", - "available": "사용 가능", - "Other": "기타", - "All other capacity usage that are not a part of the top 5 consumers.": "상위 5 개 소비자에 속하지 않는 기타 모든 용량 사용량.", - "Available": "사용 가능", - "Breakdown Chart": "분류 차트", - "Warning": "경고", - "Raw capacity": "원시 용량", - "Used": "사용됨", - "Available versus Used Capacity": "사용 가능한 용량 vs 사용된 용량", - "Used of {{capacity}}": "{{capacity}} 사용됨", - "Not Available": "사용할 수 없음", - "Rebuilding data resiliency": "데이터 복구 기능 다시 빌드", - "{{formattedProgress, number}}%": "{{formattedProgress, number}}%", - "Activity": "활동", - "Estimating {{formattedEta}} to completion": "완료까지 {{formattedEta}} 예상", - "Object_one": "개체", - "Object_other": "개체", - "Buckets": "버킷", - "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "버킷 카드는 멀티 클라우드 개체 게이트웨이에서에서 관리되는 S3 버킷 수와 개체 버킷 클레임 및 클라우드 개체 게이트웨이 및 RGW (배포 된 경우)에서 관리되는 개체 버킷 수입니다.", - "NooBaa Bucket": "NooBaa 버킷", - "Break by": "분류 대상", - "Total": "합계", - "Projects": "프로젝트", - "BucketClasses": "버킷 클래스", - "Service type": "서비스 유형", - "Cluster-wide": "클러스터 전체", - "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "S3 클라이언트 또는 NooBaa UI 시스템을 통해 생성된 모든 개체 버킷 요청 이외의 클레임.", - "Capacity breakdown": "용량 분석", - "This card shows used capacity for different resources. The available capacity is based on cloud services therefore it cannot be shown.": "이 카드는 다양한 리소스에 사용된 용량을 보여줍니다. 사용 가능한 용량은 클라우드 서비스를 기반으로 하므로 표시할 수 없습니다.", - "Type: {{serviceType}}": "유형: {{serviceType}}", - "Service Type Dropdown": "서비스 유형 드롭 다운", - "Service Type Dropdown Toggle": "서비스 유형 드롭 다운 토글", - "By: {{serviceType}}": "{{serviceType}} 별", - "Break By Dropdown": "드롭 다운으로 분류", - "Providers": "제공자", - "Accounts": "계정", - "Metric": "지표", - "I/O Operations": "I/O 작업", - "Logical Used Capacity": "논리적 사용 용량", - "Physical vs. Logical used capacity": "물리적 및 논리적 사용 용량", - "Egress": "Egress", - "Latency": "지연 시간", - "Bandwidth": "대역폭", - "Service Type": "서비스 유형", - "Type: {{selectedService}}": "유형: {{selectedService}}", - "{{selectedMetric}} by {{selectedBreakdown}}": "{{selectedMetric}} ({{selectedBreakdown}} 별)", - "thousands": "수천", - "millions": "수백만", - "billions": "수십억", - "Total Reads {{totalRead}}": "총 읽기 {{totalRead}}", - "Total Writes {{totalWrite}}": "총 쓰기 {{totalWrite}}", - "Total Logical Used Capacity {{logicalCapacity}}": "총 논리적 사용 용량 {{logicalCapacity}}", - "Total Physical Used Capacity {{physicalcapacity}}": "총 물리적 사용 용량 {{physicalcapacity}}", - "Shows an overview of the data consumption per provider or account collected from the day of the entity creation.": "엔터티 생성 일부터 수집 된 공급자 또는 계정 별 데이터 소비에 대한 개요를 표시합니다.", - "(in {{suffixLabel}})": "({{suffixLabel}})", - "Data Consumption Graph": "데이터 소비 그래프", - "GET {{GETLatestValue}}": "GET {{GETLatestValue}}", - "PUT {{PUTLatestValue}}": "PUT {{PUTLatestValue}}", - "OpenShift Data Foundation": "OpenShift Data Foundation", - "OpenShift Container Storage": "Openshift Container Storage", - "Service name": "서비스 이름", - "System name": "시스템 이름", - "Multicloud Object Gateway": "멀티 클라우드 개체 게이트웨이", - "RADOS Object Gateway": "RADOS 개체 게이트웨이", - "Version": "버전", - "Resource Providers": "리소스 공급자", - "A list of all Multicloud Object Gateway resources that are currently in use. Those resources are used to store data according to the buckets' policies and can be a cloud-based resource or a bare metal resource.": "현재 사용중인 모든 Multicloud Object Gateway 리소스 목록입니다. 이러한 리소스는 버킷 정책에 따라 데이터를 저장하는 데 사용되며 클라우드 기반 리소스 또는 베어 메탈 리소스입니다.", - "Object Service": "개체 서비스", - "Data Resiliency": "데이터 복구 가능성", - "Object Service Status": "개체 서비스 상태", - "The object service includes 2 services.": "개체 서비스에는 2 개의 서비스가 포함되어 있습니다.", - "The data resiliency includes 2 services": "데이터 복구 기능에는 2 개의 서비스가 포함되어 있습니다.", - "Services": "서비스", - "Object Gateway (RGW)": "개체 게이트웨이 (RGW)", - "All resources are unhealthy": "모든 리소스가 비정상입니다.", - "Object Bucket has an issue": "개체 버킷에 문제가 있습니다.", - "Many buckets have issues": "많은 버킷에 문제가 있습니다.", - "Some buckets have issues": "일부 버킷에 문제가 있습니다.", - "{{capacityRatio, number}}:1": "{{capacityRatio, number}}:1", - "OpenShift Data Foundation can be configured to use compression. The efficiency rate reflects the actual compression ratio when using such a configuration.": "OpenShift Data Foundation은 압축을 사용하도록 구성할 수 있습니다. 이러한 구성을 사용할 때 효율성 비율은 실제 압축 비율을 반영합니다.", - "Savings": "저장", - "Savings shows the uncompressed and non-deduped data that would have been stored without those techniques.": "저장은 이러한 기술이 사용되지 않는 경우 저장되어 있는 압축되지 않은 데이터와 중복되지 않은 데이터를 표시합니다.", - "Storage Efficiency": "스토리지 효율성", - "OpenShift Container Storage Overview": "OpenShift Container Storage 개요", - "Block and File": "블록 및 파일", - "Object_0": "개체", - "BlockPools": "블록 풀", - "Storage Classes": "스토리지 클래스", - "Pods": "Pod", - "{{metricType}}": "{{metricType}}", - "Break by dropdown": "드롭 다운으로 분류", - "Service Name": "서비스 이름", - "Cluster Name": "클러스터 이름", - "Mode": "모드", - "Storage Cluster": "스토리지 클러스터", - "Utilization": "사용", - "Used Capacity": "사용된 용량", - "Expanding StorageCluster": "스토리지 클러스터 확장", - "Upgrading OpenShift Data Foundation's Operator": "OpenShift Data Foundation Operator 업그레이드", - "Used Capacity Breakdown": "사용된 용량 분석", - "This card shows the used capacity for different Kubernetes resources. The figures shown represent the Usable storage, meaning that data replication is not taken into consideration.": "이 카드는 다양한 Kubernetes 리소스에 사용된 용량을 보여줍니다. 표시된 컨텐츠는 사용 가능한 스토리지를 나타내며 데이터 복제는 고려되지 않습니다.", - "Cluster name": "클러스터 이름", - "Internal": "내부", - "Raw capacity is the absolute total disk space available to the array subsystem.": "원시 용량은 어레이 하위 시스템에서 사용할 수있는 절대적인 총 디스크 공간입니다.", - "Troubleshoot": "문제 해결", - "Active health checks": "활성 상태 점검", - "Progressing": "진행 중", - "The Compression Ratio represents the compressible data effectiveness metric inclusive of all compression-enabled pools.": "압축 비율은 압축이 활성화된 모든 풀에 대한 압축 가능한 데이터 효율성 메트릭을 나타냅니다.", - "The Savings metric represents the actual disk capacity saved inclusive of all compression-enabled pools and associated replicas.": "저장 메트릭은 모든 압축이 활성화된 풀 및 관련 복제본을 포함하여 저장된 실제 디스크 용량을 나타냅니다.", - "Performance metrics over time showing IOPS, Latency and more. Each metric is a link to a detailed view of this metric.": "IOPS, 지연 시간 등을 나타내는 시간 경과에 따른 성능 메트릭입니다. 각 메트릭은 이 메트릭의 상세보기에 대한 링크입니다.", - "Recovery": "복구", - "Disk State": "디스크 상태", - "OpenShift Data Foundation status": "OpenShift Data Foundation 상태", - "Filesystem": "파일 시스템", - "Disks List": "디스크 목록", - "Start Disk Replacement": "디스크 교체 시작", - "<0>{{diskName}} can be replaced with a disk of same type.": "<0>{{diskName}}은/는 동일한 유형의 디스크로 교체할 수 있습니다.", - "Troubleshoot disk <1>{{diskName}}": "디스크 <1>{{diskName}} 문제 해결", - "here": "여기", - "Online": "온라인", - "Offline": "오프라인", - "NotResponding": "응답 없음", - "PreparingToReplace": "교체 준비 중", - "ReplacementFailed": "교체 실패", - "ReplacementReady": "교체 준비", - "Connection name": "연결 이름", - "This is a required field": "필수 필드입니다.", - "A unique name for the key management service within the project.": "프로젝트 내 키 관리 서비스의 고유 이름입니다.", - "Service instance ID": "서비스 인스턴스 ID", - "Service API key": "서비스 API 키", - "Customer root key": "고객 루트 키", - "IBM Base URL": "IBM 기본 URL", - "IBM Token URL": "IBM 토큰 URL", - "Connect to a Key Management Service": "키 관리 서비스에 연결", - "Key management service provider": "키 관리 서비스 공급자", - "kms-provider-name": "kms-provider-name", - "Token": "토큰", - "Create a secret with the token for every namespace using encrypted PVCs.": "암호화된 PVC를 사용하여 모든 네임스페이스에 대한 토큰으로 시크릿을 만듭니다.", - "Hide token": "토큰 숨기기", - "Reveal token": "토큰 공개", - "Authentication method": "인증 방법", - "authentication-method": "인증-방법", - "Please enter a URL": "URL을 입력하십시오.", - "Please enter a valid port": "유효한 포트를 입력하십시오", - "Address": "주소", - "Port": "포트", - "Advanced settings": "고급 설정", - "Raw Capacity": "원시 용량", - "x {{ replica, number }} replicas =": "x {{ replica, number }} 복제 =", - "No StorageClass selected": "스토리지 클래스가 선택되어 있지 않습니다", - "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "Arbiter 확장 클러스터에는 최소 4 개의 노드가 필요합니다 (2 개의 다른 영역, 영역 당 2 개의 노드). 다른 스토리지 클래스를 선택하거나 최소 노드 요구 사항과 일치하는 새 로컬 볼륨 세트를 만드십시오.", - "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "스토리지 클러스터에는 최소 3 개의 노드가 필요합니다. 다른 스토리지 클래스를 선택하거나 최소 노드 요구 사항과 일치하는 새 로컬 볼륨 세트를 만드십시오.", - "Adding capacity for <1>{{name}}, may increase your expenses.": "<1>{{name}}의 용량을 추가하면 비용이 증가할 수 있습니다.", - "StorageClass": "스토리지 클래스", - "Currently Used:": "현재 사용:", - "Add": "추가", - "Key Management Service Advanced Settings": "키 관리 서비스 고급 설정", - "Vault enterprise namespaces are isolated environments that functionally exist as Vaults within a Vault. They have separate login paths and support creating and managing data isolated to their namespace.": "Vault 엔터프라이즈 네임 스페이스는 Vault에서 Vault로 기능적으로 존재하는 분리된 환경입니다. 별도의 로그인 경로가 있으며 네임 스페이스에 분리된 데이터 생성 및 관리를 지원합니다.", - "Maximum file size exceeded. File limit is 4MB.": "최대 파일 크기를 초과했습니다. 파일 제한은 4MB입니다.", - "A PEM-encoded CA certificate file used to verify the Vault server's SSL certificate.": "Vault 서버의 SSL 인증서를 확인하는 데 사용되는 PEM 인코딩 CA 인증서 파일입니다.", - "A PEM-encoded client certificate. This certificate is used for TLS communication with the Vault server.": "PEM으로 인코딩 된 클라이언트 인증서. 이 인증서는 Vault 서버와의 TLS 통신에 사용됩니다.", - "An unencrypted, PEM-encoded private key which corresponds to the matching client certificate provided with VAULT_CLIENT_CERT.": "VAULT_CLIENT_CERT와 함께 제공되는 일치하는 클라이언트 인증서에 해당하는 암호화되지 않은 PEM 인코딩 개인 키입니다.", - "The name to use as the SNI host when OpenShift Data Foundation connecting via TLS to the Vault server": "OpenShift Data Foundation이 TLS를 통해 Vault 서버에 연결할 때 SNI 호스트로 사용하는 이름", - "Backend Path": "백엔드 경로", - "path/": "경로/", - "Authentication Path": "인증 경로", - "Authentication Namespace": "인증 네임스페이스", - "TLS Server Name": "TLS 서버 이름", - "Vault Enterprise Namespace": "Vault Enterprise 네임 스페이스", - "The name must be accurate and must match the service namespace": "이름은 정확해야 하며 서비스 네임 스페이스와 일치해야합니다.", - "CA Certificate": "CA 인증서", - "Upload a .PEM file here...": "여기에 .PEM 파일 업로드합니다...", - "Client Certificate": "클라이언트 인증서", - "Client Private Key": "클라이언트 개인 키", - "Attach OBC to a Deployment": "배포에 OBC 연결", - "Deployment Name": "배포 이름", - "Attach": "연결", - "<0><0>{{poolName}} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):": "<0><0>{{poolName}}은/는 삭제할 수 없습니다. 풀이 PVC에 바인드되면 삭제할 수 없습니다. 스토리지 클래스에서 모든 리소스를 분리하십시오:", - "<0>Deleting <1>{{poolName}} will remove all the saved data of this pool. Are you sure want to delete?": "<0>삭제하면 <1>{{poolName}}은/는 이 풀에 저장된 모든 데이터를 삭제합니다. 삭제하시겠습니까?", - "BlockPool Delete Modal": "블록 풀 삭제 모달", - "Try Again": "다시 시도", - "Finish": "종료", - "Go To Pvc List": "PVC 목록으로 이동", - "BlockPool Update Form": "블록 풀 업데이트 양식", - "replacement disallowed: disk {{diskName}} is {{replacingDiskStatus}}": "교체 불가: 디스크{{diskName}}은/는 {{replacingDiskStatus}}입니다.", - "replacement disallowed: disk {{diskName}} is {{replacementStatus}}": "교체 불가: 디스크{{diskName}}은/는 {{replacementStatus}}입니다.", - "Disk Replacement": "디스크 교체", - "This action will start preparing the disk for replacement.": "이 작업은 디스크 교체 준비를 시작합니다.", - "Data rebalancing is in progress": "데이터 재조정이 진행 중입니다.", - "See data resiliency status": "데이터 복구 기능의 상태 표시", - "Are you sure you want to replace <1>{{diskName}}?": "<1>{{diskName}}을/를 교체하시겠습니까?", - "Replace": "교체", - "Create NamespaceStore ": "네임 스페이스 저장소 만들기 ", - "Represents an underlying storage to be used as read or write target for the data in the namespace buckets.": "네임 스페이스 버킷의 데이터에 대한 읽기 또는 쓰기 대상으로 사용할 기본 스토리지를 나타냅니다.", - "Provider {{provider}} | Region: {{region}}": "공급자 {{provider}} | 지역: {{region}}", - "Create new NamespaceStore ": "새 네임 스페이스 저장소 만들기 ", - "An error has occurred while fetching namespace stores": "네임 스페이스 저장소를 가져 오는 동안 오류가 발생했습니다.", - "Select a namespace store": "네임 스페이스 저장소 선택", - "Namespace store name": "네임 스페이스 저장소 이름", - "A unique name for the namespace store within the project": "프로젝트의 네임스페이스 저장소의 고유 이름", - "Persistent volume claim": "영구 볼륨 클레임", - "Folder": "폴더", - "If the name you write exists, we will be using the existing folder if not we will create a new folder ": "작성하신 이름이 존재하면 기존 폴더를 사용하고 없으면 새 폴더를 생성합니다.", - "Namespace Store Table": "네임 스페이스 저장소 테이블", - "Service account keys are needed for Google Cloud Storage authentication. The keys can be found in the service accounts page in the GCP console.": "Google Cloud Storage 인증에는 서비스 계정 키가 필요합니다. 키는 GCP 콘솔의 서비스 계정 페이지에서 확인할 수 있습니다.", - "Learn more": "더 알아보기", - "Where can I find Google Cloud credentials?": "Google Cloud 인증 정보의 확인 방법", - "Upload a .json file with the service account keys provided by Google Cloud Storage.": "Google Cloud Storage에서 제공하는 서비스 계정 키를 사용하여 .json 파일을 업로드합니다.", - "Secret Key": "시크릿 키", - "Upload JSON": "JSON 업로드", - "Uploaded File Name": "업로드한 파일 이름", - "Upload File": "파일 업로드", - "Switch to Secret": "시크릿으로 전환", - "Select Secret": "시크릿 선택", - "Switch to upload JSON": "JSON 업로드로 전환", - "Cluster Metadata": "클러스터 메타 데이터", - "Target Bucket": "대상 버킷", - "Number of Volumes": "볼륨 수", - "Volume Size": "볼륨 크기", - "Target blob container": "대상 Blob 컨테이너", - "Target bucket": "대상 버킷", - "Account name": "계정 이름", - "Access key": "액세스 키", - "Account key": "계정 키", - "Secret key": "시크릿 키", - "Region Dropdown": "리전 드롭 다운", - "Endpoint": "엔드포인트", - "Endpoint Address": "엔드포인트 주소", - "Secret": "시크릿", - "Switch to Credentials": "인증 정보로 전환", - "Access Key Field": "액세스 키 필드", - "Secret Key Field": "시크릿 키 필드", - "ObjectBucketClaim Name": "개체 버킷 클레임 이름", - "my-object-bucket": "my-object-bucket", - "If not provided a generic name will be generated.": "지정되지 않은 경우 일반 이름이 생성됩니다.", - "Defines the object-store service and the bucket provisioner.": "개체 저장소 서비스 및 버킷 프로비저닝 도구를 정의합니다.", - "BucketClass": "버킷 클래스", - "Select BucketClass": "버킷 클래스 선택", - "Create ObjectBucketClaim": "개체 버킷 클레임 만들기", - "Edit YAML": "YAML 편집", - "Attach to Deployment": "배포에 연결", - "Disabled because the ObjectBucketClaim is being deleted.": "ObjectBucketClaim이 삭제되어 있으므로 사용할 수 없습니다.", - "Object Bucket Claim Details": "개체 버킷 클레임 세부 정보", - "Object Bucket": "개체 버킷", - "Namespace": "네임 스페이스", - "OBCTableHeader": "OBCTableHeader", - "Object Bucket Claims": "개체 버킷 클레임", - "Object Bucket Claim Data": "개체 버킷 클레임 데이터", - "Hide Values": "값 숨기기", - "Reveal Values": "값 표시", - "Data": "데이터", - "Create Object Bucket": "개체 버킷 만들기", - "Object Bucket Name": "개체 버킷 이름", - "ob-name-help": "ob-name-help", - "The corresponding ObjectBucketClaim must be deleted first.": "해당 개체 버킷 클레임을 먼저 삭제해야 합니다.", - "Object Bucket Details": "개체 버킷 세부 정보", - "Object Bucket Claim": "개체 버킷 클레임", - "OBTableHeader": "OBTableHeader", - "Object Buckets": "개체 버킷", - "Uses the available disks that match the selected filters on all nodes selected in the previous step.": "이전 단계에서 선택한 모든 노드에서 선택한 필터와 일치하는 사용 가능한 디스크가 사용됩니다.", - "A LocalVolumeSet allows you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "로컬 볼륨 세트를 사용하면 디스크 세트를 필터링하고 그룹화한 후 전용 스토리지 클래스를 생성하여 디스크에서 스토리지를 사용할 수 있습니다.", - "OpenShift Container Storage's StorageCluster requires a minimum of 3 nodes for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "OpenShift Container Storage의 스토리지 클러스터에는 초기 배포를 위해 최소 3 개의 노드가 필요합니다. {{nodes}} 노드만 선택한 필터와 일치합니다. 노드를 추가하려면 필터를 조정하십시오.", - "After the LocalVolumeSet and StorageClass are created you won't be able to go back to this step.": "로컬 볼륨 세트와 스토리지 클래스가 생성된 후에는이 단계로 돌아갈 수 없습니다.", - "Create StorageClass": "스토리지 클래스 만들기", - "Selected Capacity": "선택한 용량", - "Selected Nodes": "선택된 노드", - "Review StorageCluster": "스토리지 클러스터 검토", - "Storage and nodes": "스토리지 및 노드", - "Arbiter zone:": "Arbiter 영역:", - "None": "없음", - "selected based on the created StorageClass:": "생성된 스토리지 클래스에 따라 선택:", - "Total CPU and memory of {{cpu, number}} CPU and {{memory}}": "{{cpu, number}} CPU 및 {{memory}}의 총 CPU 및 메모리", - "Configure": "설정", - "Enable Encryption": "암호 활성화", - "Connect to external key management service: {{name}}": "외부 키 관리 서비스 {{name}}에 연결", - "Encryption Level: {{level}}": "암호화 수준: {{level}}", - "Using {{networkLabel}}": "{{networkLabel}}의 사용", - "Discover disks": "디스크 검색", - "Review and create": "검토 및 생성", - "Info Alert": "정보 알림", - "Internal - Attached devices": "내부-연결된 장치", - "Can be used on any platform where there are attached devices to the nodes, using the Local Storage Operator. The infrastructure StorageClass is provided by Local Storage Operator, on top of the attached drives.": "로컬 스토리지 Operator를 사용하여 노드에 연결된 장치가있는 모든 플랫폼에서 사용할 수 있습니다. 인프라 스토리지 클래스는 연결된 드라이브 위에 로컬 스토리지 Operator가 제공합니다.", - "Before we can create a StorageCluster, the Local Storage operator needs to be installed. When installation is finished come back to OpenShift Container Storage to create a StorageCluster.<1><0>Install": "스토리지 클러스터를 생성하려면 먼저 로컬 스토리지 Operator를 설치해야합니다. 설치가 완료되면 OpenShift Container Storage로 돌아와 스토리지 클러스터를 생성합니다.<1><0>설치", - "Node Table": "노드 테이블", - "StorageCluster exists": "스토리지 클러스터가 있음", - "Back to operator page": "operator 페이지로 돌아 가기", - "Go to cluster page": "클러스터 페이지로 이동", - "<0>A StorageCluster <1>{{clusterName}} already exists.<3>You cannot create another StorageCluster.": "<0>스토리지 클러스터 <1>{{clusterName}}가 이미 있습니다. <3>다른 스토리지 클러스터를 생성할 수 없습니다.", - "Connect to external cluster": "외부 클러스터에 연결", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External cluster metadata field.": "<1>{{SCRIPT_NAME}} 스크립트를 다운로드하고 RHCS 클러스터에서 실행한 다음 외부 클러스터 메타 데이터 필드에 결과 (JSON)를 업로드합니다.", - "Download Script": "스크립트 다운로드", - "A bucket will be created to provide the OpenShift Data Foundation's Service.": "OpenShift Data Foundation의 서비스를 제공하기 위해 버킷이 생성됩니다.", - "Bucket created for OpenShift Container Storage's Service": "OpenShift Container Storage 서비스 용으로 생성된 버킷", - "Create External StorageCluster": "외부 스토리지 클러스터 생성", - "External cluster metadata": "외부 클러스터 메타 데이터", - "Upload JSON File": "JSON 파일 업로드", - "Upload Credentials file": "인증 정보 파일 업로드", - "JSON data": "JSON 데이터", - "Create Button": "만들기 버튼", - "Create StorageCluster": "스토리지 클러스터 만들기", - "OpenShift Container Storage runs as a cloud-native service for optimal integration with applications in need of storage and handles the scenes such as provisioning and management.": "OpenShift Container Storage가 필요한 애플리케이션과의 통합 최적화를위한 클라우드 네이티브 서비스로 실행되며 프로비저닝 및 관리 등을 처리합니다.", - "Select mode:": "모드 선택 :", - "If not labeled, the selected nodes are labeled <1>{{label}} to make them target hosts for OpenShift Data Foundation's components.": "레이블이 지정되지 않은 경우 선택한 노드에 <1>{{label}} 레이블이 지정되어 OpenShift Data Foundation 구성 요소의 대상 호스트가 됩니다.", - "Mark nodes as dedicated": "노드를 전용으로 표시", - "This will taint the nodes with the<1>key: node.ocs.openshift.io/storage, <4>value: true, and <7>effect: NoSchedule": "그러면 <1>key: node.ocs.openshift.io/storage, <4>value: true, 및 <7>effect: NoSchedule로 노드가 손상됩니다.", - "Selected nodes will be dedicated to OpenShift Container Storage use only": "선택한 노드는 OpenShift Container Storage 전용으로 사용됩니다.", - "OpenShift Container Storage deployment in two data centers, with an arbiter node to settle quorum decisions.": "OpenShift Container Storage는 2개의 데이터 센터에 배포되며, 쿼럼 결정을 위한 중재자 노드가 제공됩니다.", - "To support high availability when two data centers can be used, enable arbiter to get the valid quorum between two data centers.": "두 개의 데이터 센터를 사용할 수 있는 경우 고가용성을 지원하려면 Arbiter가 두 데이터 센터간에 유효한 쿼럼을 취득할 수 있도록합니다.", - "Select arbiter zone": "Arbiter 영역 선택", - "Network": "네트워크", - "The default SDN networking uses a single network for all data operations such read/write and also for control plane, such as data replication. Multus allows a network separation between the data operations and the control plane operations.": "기본 SDN 네트워킹은 읽기/쓰기와 같은 모든 데이터 작업과 데이터 복제와 같은 컨트롤 플레인에 단일 네트워크를 사용합니다. Multus는 데이터 작업과 컨트롤 플레인 작업 간의 네트워크 분리를 허용합니다.", - "Default (SDN)": "기본값 (SDN)", - "Custom (Multus)": "사용자 지정 (Multus)", - "Public Network Interface": "공용 네트워크 인터페이스", - "Select a network": "네트워크 선택", - "Cluster Network Interface": "클러스터 네트워크 인터페이스", - "Requested Cluster Capacity:": "요청된 클러스터 용량 :", - "StorageClass:": "스토리지 클래스:", - "Select Capacity": "용량 선택", - "Requested Capacity": "요청된 용량", - "Select Nodes": "노드 선택", - "create internal mode StorageCluster wizard": "내부 모드 스토리지 클러스터 생성 마법사", - "Can be used on any platform, except bare metal. It means that OpenShift Container Storage uses an infrastructure StorageClass, provided by the hosting platform. For example, gp2 on AWS, thin on VMWare, etc.": "베어 메탈을 제외한 모든 플랫폼에서 사용할 수 있습니다. 이는 OpenShift Container Storage가 호스팅 플랫폼에서 제공하는 인프라 스토리지 클래스를 사용함을 의미합니다. (예: AWS의 gp2, VMWare의 thin 등)", - "{{title}} steps": "{{title}} 단계", - "{{title}} content": "{{title}} 컨텐츠", - "{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}} /{{replica}} 복제", - "Available capacity:": "사용 가능한 용량 :", - "Filesystem name": "파일 시스템 이름", - "Enter filesystem name": "파일 시스템 이름 입력", - "CephFS filesystem name into which the volume shall be created": "볼륨을 생성할 CephFS 파일 시스템 이름", - "no compression": "압축 없음", - "with compression": "압축", - "Replica {{poolSize}} {{compressionText}}": "복제 {{poolSize}} {{compressionText}}", - "Create New Pool": "새 풀 생성", - "Storage Pool": "스토리지 풀", - "Select a Pool": "풀 선택", - "Storage pool into which volume data shall be stored": "볼륨 데이터를 저장할 스토리지 풀", - "Error retrieving Parameters": "매개 변수 검색 오류", - "my-storage-pool": "my-storage-pool", - "An encryption key will be generated for each PersistentVolume created using this StorageClass.": "이 스토리지 클래스를 사용하여 생성된 각 영구 볼륨에 대해 암호화 키가 생성됩니다.", - "Key service": "키 관리 서비스", - "Select an existing connection": "기존 연결 선택", - "KMS service {{value}} already exist": "KMS 서비스{{value}}은/는 이미 존재합니다", - "Choose existing KMS connection": "기존 KMS 연결 선택", - "Create new KMS connection": "새 KMS 연결 만들기", - "PV expansion operation is not supported for encrypted PVs.": "암호화된 PV에는 PV 확장 작업이 지원되지 않습니다.", - "Enable Thick Provisioning": "씩 프로비저닝 활성화", - "By enabling thick-provisioning, volumes will allocate the requested capacity upon volume creation. Volume creation will be slower when thick-provisioning is enabled.": "씩 프로비저닝을 활성화하면 볼륨이 볼륨 생성시 요청된 용량을 할당합니다. 씩 프로비저닝이 활성화되면 볼륨 생성 속도가 느려집니다.", - "{{resource}} details": "{{resource}} 세부 정보", - "Kind": "유형", - "Labels": "라벨", - "Last updated": "마지막 업데이트", - "Storage Systems": "스토리지 시스템", - "Used capacity": "사용된 용량", - "Storage status represents the health status of {{operatorName}}'s StorageCluster.": "스토리지 상태는 {{operatorName}}의 스토리지 클러스터의 상태를 나타냅니다.", - "Health": "상태", - "Standard": "표준", - "Data will be consumed by a Multi-cloud object gateway, deduped, compressed, and encrypted. The encrypted chunks would be saved on the selected BackingStores. Best used when the applications would always use the OpenShift Data Foundation endpoints to access the data.": "데이터는 다중 클라우드개체 게이트웨이에서 사용되고 중복 제거, 압축 및 암호화됩니다. 암호화된 청크는 선택한 백업 저장소에 저장됩니다. 애플리케이션이 항상 OpenShift Data Foundation 엔드 포인트를 사용하여 데이터에 액세스하는 경우에 가장 적합합니다.", - "Data is stored on the NamespaceStores without performing de-duplication, compression, or encryption. BucketClasses of namespace type allow connecting to existing data and serving from them. These are best used for existing data or when other applications (and cloud-native services) need to access the data from outside OpenShift Data Foundation.": "데이터는 중복 제거, 압축 또는 암호화를 수행하지 않고 네임 스페이스 저장소에 저장됩니다. 네임 스페이스 유형의 버킷 클래스를 사용하면 기존 데이터에 연결하고 기존 데이터에서 서비스를 제공할 수 있습니다. 이러한 기능은 기존 데이터에 사용하거나 다른 애플리케이션 (및 클라우드 기반 서비스)이 OpenShift Data Foundation 외부에서 데이터에 액세스해야하는 경우에 가장 적합합니다.", - "Single NamespaceStore": "단일 네임 스페이스 저장소", - "The namespace bucket will read and write its data to a selected namespace store": "네임스페이스 버킷은 선택한 네임스페이스 저장소에서 데이터를 읽고 씁니다.", - "Multi NamespaceStores": "다중 네임 스페이스 저장소", - "The namespace bucket will serve reads from several selected backing stores, creating a virtual namespace on top of them and will write to one of those as its chosen write target": "네임 스페이스 버킷은 선택한 여러 백업 저장소에서 읽기 서비스를 제공하고 그 위에 가상 네임 스페이스를 만들고 선택한 쓰기 대상으로 해당 저장소 중 하나에 씁니다.", - "Cache NamespaceStore": "캐시 네임 스페이스 저장소", - "The caching bucket will serve data from a large raw data out of a local caching tiering.": "캐싱 버킷은 로컬 캐싱 계층에서 대규모 원시 데이터의 데이터를 제공합니다.", - "Create storage class": "스토리지 클래스 만들기", - "Create local volume set": "로컬 볼륨 세트 만들기", - "Logical used capacity per account": "계정에 사용되는 논리 사용 용량", - "Egress Per Provider": "공급자 별 송신", - "I/O Operations count": "I/O 작업 수", - "The StorageClass used by OpenShift Data Foundation to write its data and metadata.": "OpenShift Data Foundation에서 데이터 및 메타데이터를 작성하는 데 사용하는 스토리지 클래스입니다.", - "Infrastructure StorageClass created by Local Storage Operator and used by OpenShift Container Storage to write its data and metadata.": "로컬 스토리지 Operator가 생성하고 OpenShift Container Storage에서 데이터 및 메타 데이터를 작성하는 데 사용하는 인프라 스토리지 클래스입니다.", - "The amount of capacity that would be dynamically allocated on the selected StorageClass.": "선택한 스토리지 클래스에 동적으로 할당되는 용량입니다.", - "If you wish to use the Arbiter stretch cluster, a minimum of 4 nodes (2 different zones, 2 nodes per zone) and 1 additional zone with 1 node is required. All nodes must be pre-labeled with zones in order to be validated on cluster creation.": "Arbiter 확장 클러스터를 사용하려면 최소 4 개의 노드 (2 개의 다른 영역, 영역 당 2 개의 노드)와 1 개의 노드가 있는 1 개의 추가 영역이 필요합니다. 클러스터 생성시 유효성을 확인하려면 모든 노드에 영역 레이블을 미리 설정해야 합니다", - "Selected nodes are based on the StorageClass <1>{{scName}} and with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "선택한 노드는 선택한 스토리지 클래스를 기반으로 하며 <1>{{scName}} 권장 요구 사항은 노드 당 14 개의 CPU 및 34GiB RAM입니다.", - "Selected nodes are based on the StorageClass <1>{{scName}} and fulfill the stretch cluster requirements with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "선택한 노드는 선택한 스토리지 클래스를 기반으로 하며 <1>{{scName}} 권장 요구 사항은 노드 당 14 개의 CPU 및 34GiB RAM으로 확장 클러스터 요구 사항을 충족합니다.", - "Loading...": "로드 중 ...", - "Pool {{name}} creation in progress": "풀{{name}} 생성 중", - "Pool {{name}} was successfully created": "풀{{name}}이/가 성공적으로 생성되었습니다", - "An error occurred. Pool {{name}} was not created": "오류가 발생했습니다. 풀 {{name}}이/가 생성되지 않았습니다", - "Pool {{name}} creation timed out. Please check if odf operator and rook operator are running": "풀{{name}} 생성 시간이 초과되었습니다. odf operator와 rook operator가 실행 중인지 확인하십시오", - "The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.": "스토리지 클러스터 생성이 아직 진행 중이거나 실패했습니다. 스토리지 클러스터를 사용할 준비가 되면 다시 시도하십시오.", - "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.": "기본 풀 및 OpenShift Container Storage의 외부 모드에 대해서는 풀 관리 작업이 지원되지 않습니다.", - "Pool {{name}} was created with errors.": "풀 {{name}}이/가 생성되었지만 오류가 발생했습니다.", - "Delete": "삭제", - "StorageClasses": "스토리지 클래스", - "hr": "시간", - "min": "분", - "A minimal cluster deployment will be performed.": "최소 클러스터 배포가 수행됩니다.", - "The selected nodes do not match OpenShift Data Foundation's StorageCluster requirement of an aggregated 30 CPUs and 72 GiB of RAM. If the selection cannot be modified a minimal cluster will be deployed.": "선택한 노드가 집계된 30 개 CPU 및 72GiB RAM의 OpenShift Data Foundation의 스토리지 클러스터 요구 사항과 일치하지 않습니다. 선택 사항을 수정할 수 없는 경우 최소 클러스터가 배포됩니다.", - "Back to nodes selection": "노드 선택으로 돌아 가기", - "Select a StorageClass to continue": "스토리지 클래스를 선택하여 계속 진행합니다.", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing PersistentVolumes that will be used to provide the OpenShift Data Foundation service.": "이는 필수 필드입니다. 스토리지 클래스는 OpenShift Data Foundation 서비스를 제공하는 데 사용되는 백업 영구 볼륨을 생성하기 위해 기본 인프라에서 스토리지를 요청하는 데 사용됩니다.", - "Create new StorageClass": "새 스토리지 클래스 생성", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing persistent volumes that will be used to provide the OpenShift Data Foundation service.": "이는 필수 필드입니다. 스토리지 클래스는 OpenShift Data Foundation 서비스를 제공하는 데 사용되는 백업 영구 볼륨을 생성하기 위해 기본 인프라에서 스토리지를 요청하는 데 사용됩니다.", - "All required fields are not set": "모든 필수 필드가 설정되지 않았습니다.", - "In order to create the StorageCluster you must set the StorageClass, select at least 3 nodes (preferably in 3 different zones) and meet the minimum or recommended requirement": "스토리지 클러스터를 생성하려면 스토리지 클래스를 설정하고 최소 3 개의 노드 (바람직하게는 3 개의 다른 영역에서)를 선택하고 최소 또는 권장 요구 사항을 충족해야 합니다.", - "The StorageCluster requires a minimum of 3 nodes for the initial deployment. Please choose a different StorageClass or go to create a new LocalVolumeSet that matches the minimum node requirement.": "스토리지 클러스터에는 초기 배포를 위해 최소 3 개의 노드가 필요합니다. 다른 스토리지 클래스를 선택하거나 최소 노드 요구 사항과 일치하는 새 로컬 볼륨 세트를 생성하십시오.", - "Create new volume set instance": "새 볼륨 세트 인스턴스 만들기", - "Select at least 1 encryption level or disable encryption.": "하나 이상의 암호화 수준을 선택하거나 암호화를 비활성화합니다.", - "Fill out the details in order to connect to key management system": "키 관리 시스템에 연결하려면 세부 정보를 입력합니다.", - "This is a required field.": "이 필드는 필수 항목입니다.", - "Both public and cluster network attachment definition cannot be empty": "공용 네트워크 및 클러스터 네트워크 연결 정의는 비워 둘 수 없습니다.", - "A public or cluster network attachment definition must be selected to use Multus.": "Multus를 사용하려면 공용 네트워크 또는 클러스터 네트워크 연결 정의를 선택해야 합니다.", - "The number of selected zones is less than the minimum requirement of 3. If not modified a host-based failure domain deployment will be enforced.": "선택한 영역의 수가 최소 요구 사항 인 3보다 적습니다. 수정하지 않으면 호스트 기반 장애 도메인 배포가 적용됩니다.", - "When the nodes in the selected StorageClass are spread across fewer than 3 availability zones, the StorageCluster will be deployed with the host based failure domain.": "선택한 스토리지 클래스의 노드가 3 개 미만의 가용성 영역에 분산된 경우 스토리지 클러스터는 호스트 기반 장애 도메인과 함께 배포됩니다.", - "Cluster-Wide and StorageClass": "클러스터 전체 및 스토리지 클래스", - "Cluster-Wide": "클러스터 전체", - "Select at least 2 Backing Store resources": "백업 저장소 리소스를 2 개 이상 선택", - "Select at least 1 Backing Store resource": "백업 저장소 리소스를 1 개 이상 선택", - "x {{replica}} replicas = {{osdSize, number}} TiB": "x {{replica}} 복제 = {{osdSize, number}} TiB", - "SmallScale": "SmallScale", - "0.5 TiB": "0.5 TiB", - "2 TiB": "2 TiB", - "LargeScale": "LargeScale", - "4 TiB": "4 TiB", - "{{osdSize, number}} TiB": "{{osdSize, number}} TiB", - "Help": "도움말" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/ko/console-shared.json b/frontend/packages/ceph-storage-plugin/locales/ko/console-shared.json deleted file mode 100644 index ef1c1f503059..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/ko/console-shared.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "View {{title}} metrics in query browser": "쿼리 브라우저에서 {{title}} 메트릭보기", - "Not available": "사용할 수 없음", - "{{humanAvailable}} available of {{humanLimit}} total limit": "{{humanAvailable}} 사용 가능 (총 한도 {{humanLimit}})", - "{{humanAvailable}} available of {{humanMax}}": "{{humanAvailable}} 사용 가능 (총 {{humanMax}})", - "{{humanAvailable}} available": "{{humanAvailable}} 사용 가능" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/zh/ceph-storage-plugin.json b/frontend/packages/ceph-storage-plugin/locales/zh/ceph-storage-plugin.json deleted file mode 100644 index 14745da8d8f0..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/zh/ceph-storage-plugin.json +++ /dev/null @@ -1,757 +0,0 @@ -{ - "Add Capacity": "增加容量", - "Edit BlockPool": "编辑块池", - "Edit Bucket Class Resources": "编辑存储桶类资源", - "ObjectBucketClaim": "ObjectBucketClaim", - "Use existing claim": "使用现有声明", - "Select claim": "选择声明", - "Create new claim": "创建新声明", - "Create": "创建", - "Cancel": "取消", - "Overview": "概述", - "StorageSystems": "存储系统", - "StorageSystem details": "存储系统详情", - "Enabled": "已启用", - "Disabled": "禁用", - "Last synced": "最后同步", - "Default pool cannot be deleted": "不能删除默认池", - "BlockPool List": "块池列表", - "Delete BlockPool": "删除块池", - "{{replica}} Replication": "{{replica}}复制", - "Pool name": "池名称", - "my-block-pool": "my-block-pool", - "pool-name-help": "pool-name-help", - "Data protection policy": "数据保护政策", - "Select replication": "选择复制", - "Volume type": "卷类型", - "Select volume type": "选择卷类型", - "Compression": "压缩", - "Enable compression": "启用压缩", - "Enabling compression may result in little or no space savings for encrypted or random data. Also, enabling compression may have an impact on I/O performance.": "对于加密数据和随机数据,启用压缩可能不会节省太多空间。另外,启用压缩可能会影响到 I/O 性能。", - "OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.": "OpenShift Data Foundation 的存储集群不可用。在存储集群准备就绪后请再次尝试。", - "Create BlockPool": "创建块池", - "Close": "关闭", - "Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.": "OpenShift Data Foundation 的外部 RHCS 存储系统不支持池创建。", - "A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.": "块池是一个逻辑实体,为应用程序和工作负载提供弹性容量。池提供了一种支持政策以访问数据恢复和存储效率的方法。", - "BlockPool Creation Form": "块池创建表单", - "Name": "名称", - "Bucket Name": "存储桶名称", - "Type": "类型", - "Region": "区域", - "BackingStore Table": "后端存储表", - "Each BackingStore can be used for one tier at a time. Selecting a BackingStore in one tier will remove the resource from the second tier option and vice versa.": "每个后端存储在同一时间可用于一个层。在一个层中选择一个后端存储会从第二个层选项中删除资源,反之亦然。", - "Bucket created for OpenShift Data Foundation's Service": "为 OpenShift Data Foundation 的服务创建的存储桶", - "Tier 1 - BackingStores": "层 1 - 后端存储", - "Create BackingStore ": "创建后端存储 ", - "Tier-1-Table": "层-1-表", - "{{bs, number}} BackingStore_one": "{{bs, number}} 个后端存储", - "{{bs, number}} BackingStore_other": "{{bs, number}} 个后端存储", - "selected": "已选择", - "Tier 2 - BackingStores": "层 2 - 后端存储", - "Tier-2-Table": "层-2-表", - "General": "常规设置", - "Placement Policy": "放置策略", - "Resources": "资源", - "Review": "复查", - "Create BucketClass": "创建存储桶类", - "Create new BucketClass": "创建新存储桶类", - "BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.": "存储桶类是一个 CRD,它代表存储桶的类,用于定义 OBC 的层策略和数据放置。", - "Next": "下一个", - "Back": "前一个", - "Edit BucketClass Resource": "编辑存储桶类资源", - "{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "{{storeType}} 代表了一个存储对象,用作 Multicloud Object Gateway 存储桶中数据的底层存储。", - "Cancel ": "取消 ", - "Save": "保存", - "What is a BackingStore?": "什么是后端存储?", - "BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.": "后端存储代表了一个存储对象,用作 Multicloud Object Gateway 存储桶中数据的底层存储。", - "Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.": "支持多种后端存储类型: asws-s3 s3-compatiblegoogle-cloud-storage azure-blob obc PVC。", - "Learn More": "了解更多", - "What is a BucketClass?": "什么是存储桶类?", - "A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching": "一组应用到所有使用特定存储桶类创建的存储桶(OBC)策略。这些策略包括放置、命名空间和缓存", - "BucketClass type": "存储桶类类型", - "3-63 chars": "3-63 个字符", - "Starts and ends with lowercase number or letter": "以小写数字或字母开始和结尾", - "Only lowercase letters, numbers, non-consecutive periods or hyphens": "只支持小写字母、数字、非连续的句点或者连字符", - "Avoid using the form of an IP address": "避免使用 IP 地址格式", - "Globally unique name": "全局唯一名称", - "BucketClass name": "存储桶类名称", - "A unique name for the bucket class within the project.": "项目中的存储桶类的唯一名称。", - "my-multi-cloud-mirror": "my-multi-cloud-mirror", - "BucketClass Name": "存储桶类名称", - "Description (Optional)": "描述(可选)", - "Description of bucket class": "存储桶类描述", - "What is a Namespace Policy?": "什么是命名空间策略?", - "Namespace policy can be set to one single read and write source, multi read sources or cached policy.": "命名空间策略可为单个读写源、多读取源或缓存策略设置。", - "Namespace Policy Type": "命名空间策略类型", - "What is Caching?": "什么是缓存?", - "Caching is a policy that creates local copies of the data. It saves the copies locally to improve performance for frequently accessed data. Each cached copy has a TTL and is verified against the hub. Each non-read operation (upload, overwrite, delete) is performed on the hub": "缓存是一个用于创建数据的本地副本的策略。它会在本地保存副本以提高经常访问的数据的性能。每个缓存的副本都有一个 TTL,并针对 hub 进行验证。所有非读操作(上传、覆盖、删除)都会在 hub 中执行", - "Hub namespace store ": "hub 命名空间存储。", - "A single NamespaceStore that defines the read and write target of the namespace bucket.": "一个单一的命名空间存储,它定义了命名空间存储桶的读写目标。", - "NamespaceStore": "命名空间存储", - "Cache data settings": "缓存数据设置", - "The data will be temporarily copied on a backing store in order to later access it much more quickly.": "将数据临时复制到后备存储中,以便以后可以更快地访问这些数据。", - "Backing store": "后端存储", - "a local backing store is recommended for better performance": "为提高性能,建议使用本地后端存储", - "Time to live": "存活时间", - "Time to live is the time that an object is stored in a caching system before it is deleted or refreshed. Default: 0, Max: 24 hrs": "存活时间代表一个对象在删除或刷新前,存储在缓存系统中的时间。默认:0,最大:24 hrs", - "Read NamespaceStores": "读命名空间存储", - "Select a list of NamespaceStores that defines the read targets of the namespace bucket.": "选择命名空间存储列表,定义命名空间存储桶的读取目标。", - "Create NamespaceStore": "创建命名空间存储", - "{{nns, number}} namespace store_one": "{{nns, number}} 个命名空间存储", - "{{nns, number}} namespace store_other": "{{nns, number}} 个命名空间存储", - " selected": " 已选择", - "Write NamespaceStore": "写命名空间存储", - "Select a single NamespaceStore that defines the write targets of the namespace bucket.": "选择一个单一命名空间存储,定义命名空间存储桶的写入目标。", - "Read and Write NamespaceStore ": "读和写命名空间存储", - "Select one NamespaceStore which defines the read and write targets of the namespace bucket.": "选择一个命名空间存储,定义命名空间存储桶的读写目标。", - "What is a Placement Policy?": "放置政策是什么?", - "Data placement capabilities are built as a multi-layer structure here are the layers bottom-up:": "数据放置功能作为多层结构构建,这里从底到顶的层:", - "Spread Tier - list of BackingStores aggregates the storage of multiple stores.": "扩散层(Spread Tier) - 后备存储列表,聚合多个存储的存储。", - "Mirroring Tier - list of spread-layers async-mirroring to all mirrors with locality optimization (will allocate on the closest region to the source endpoint). Mirroring requires at least two BackingStores.": "镜像层(Mirroring Tier)- 在所有带有本地优化(将在最接近的区域中分配给源端点)的分布层器 async-mirroring 列表,镜像至少需要两个后备存储。", - "The number of replicas can be configured via the NooBaa management console.": "可以通过 NooBa 管理控制台配置的副本数量。", - "Tier 1 - Policy Type": "层 1 - 策略类型", - "Spread": "扩散(Spread)", - "Spreading the data across the chosen resources. By default a replica of one copy is used and does not include failure tolerance in case of resource failure.": "在所选资源间扩散数据。默认情况使用一个副本,在资源失败时不具有容错功能。", - "Mirror": "镜像(Mirror)", - "Full duplication of the data in each chosen resource. By default a replica of one copy per location is used. Includes failure tolerance in case of resource failure.": "每个所选资源的数据的完全复制。默认为每个位置的每个复制使用一个副本,包括资源失败时的容错功能。", - "Add Tier": "添加层", - "Tier 2 - Policy type": "层 2 - 策略类型", - "Remove Tier": "删除层", - "Spreading the data across the chosen resources does not include failure tolerance in case of resource failure.": "在所选资源间扩散数据不包含资源失败时的容错功能。", - "Full duplication of the data in each chosen resource includes failure tolerance in cause of resource failure.": "每个选定资源中数据的全部复制包括资源失败时的容错功能。", - "Namespace Policy: ": "命名空间策略: ", - "Read and write NamespaceStore : ": "读和写命名空间存储:", - "Hub namespace store: ": "hub 命名空间存储:", - "Cache backing store: ": "创建后端存储:", - "Time to live: ": "存活时间: ", - "Resources ": "资源 ", - "Selected read namespace stores: ": "选择的读命名空间存储: ", - "Selected write namespace store: ": "选择的写命名空间存储: ", - "Placement policy details ": "放置策略详情。", - "Tier 1: ": "层 1:", - "Selected BackingStores": "选择的后端存储", - "Tier 2: ": "层 2:", - "Review BucketClass": "复查存储桶类", - "BucketClass type: ": "存储桶类类型:", - "BucketClass name: ": "存储桶类名称:", - "Description: ": "描述 :", - "Provider {{provider}}": "供应商 {{provider}}", - "Create new BackingStore ": "创建新的后端存储 ", - "An error has occured while fetching backing stores": "获取后备存储时出错", - "Select a backing store": "选择后端存储", - "Storage targets that are used to store chunks of data on Multicloud Object Gateway buckets.": "用于在 Multicloud Object Gateway 存储桶中存储数据的存储目标。", - "A BackingStore represents a storage target to be used as the underlying storage layer in Multicloud Object Gateway buckets.": "一个后端存储代表了一个存储目标,用作 Multicloud Object Gateway 存储桶中数据的底层存储层。", - "Multiple types of BackingStores are supported: AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC.": "支持多种后备存储类型:AWS S3 S3 Compatible Google Cloud Storage Azure Blob PVC。", - "BackingStore Name": "后端存储名称", - "A unique name for the BackingStore within the project": "项目中的后端存储的唯一名称", - "Name can contain a max of 43 characters": "名称可包含最多 43 个字符", - "Provider": "供应商", - "Create BackingStore": "创建后端存储", - "This is an Advanced subscription feature. It requires Advanced Edition subscription. Please contact the account team for more information.": "这是一个高级订阅功能。它需要高级订阅。请联络帐户团队以获得更多信息。", - "Advanced Subscription": "高级订阅", - "Storage platform": "存储平台", - "Select a storage platform you wish to connect": "选择您要连接的存储平台", - "Select external system from list": "从列表中选择外部系统", - "Backing storage type": "后端存储类型", - "Use an existing StorageClass": "使用一个现有的存储类", - "OpenShift Data Foundation will use an existing StorageClass available on your hosting platform.": "OpenShift Data Foundation 将使用由您的托管平台提供的现有存储类。", - "Create a new StorageClass using local storage devices": "使用本地设备创建新存储类", - "OpenShift Data Foundation will use a StorageClass provided by the Local Storage Operator (LSO) on top of your attached drives. This option is available on any platform with devices attached to nodes.": "OpenShift Data Foundation 将在您附加的驱动器之上使用 Local Storage Operator (LSO) 提供的存储类。这个选项可在附加到节点的任意平台上使用。", - "Connect an external storage platform": "连接一个外部存储平台", - "OpenShift Data Foundation will create a dedicated StorageClass.": "OpenShift Data Foundation 将创建一个专用的存储类。", - "Deploys MultiCloud Object Gateway without block and file services.": "部署没有块和文件服务的多云对象网关。", - "Deploys OpenShift Data Foundation with block, shared fileSystem and object services.": "部署带有块、共享文件系统和对象服务的 OpenShift Data Foundation。", - "Deployment type": "部署类型", - "Taint nodes": "污点节点", - "Selected nodes will be dedicated to OpenShift Data Foundation use only": "所选节点仅供 OpenShift Data Foundation 使用", - "Select capacity": "选择容量", - "Requested capacity": "要求的容量", - "Select nodes": "选择节点", - "Select at least 3 nodes preferably in 3 different zones. It is recommended to start with at least 14 CPUs and 34 GiB per node.": "最少选择 3 个节点(最好在 3 个不同的区)。建议您从每个节点最少 14 个 CPU 和 34 GiB 开始。", - "PersistentVolumes are being provisioned on the selected nodes.": "持久性卷在所选节点上被置备。", - "Error while loading PersistentVolumes.": "加载持久性卷出错。", - "Selected capacity": "选择的容量", - "Available raw capacity": "可用原始容量", - "The available capacity is based on all attached disks associated with the selected StorageClass <2>{{storageClassName}}": "可用容量基于与所选存储类 <2>{{storageClassName}} 关联的所有附加磁盘", - "Selected nodes": "选中的节点", - "Role": "角色", - "CPU": "CPU", - "Memory": "内存", - "Zone": "区", - "Selected nodes table": "选中的节点表", - "To support high availability when two data centers can be used, enable arbiter to get a valid quorum between the two data centers.": "在两个数据中心可用时支持高可用性,启用仲裁以在两个数据中心之间获得有效的仲裁。", - "Arbiter minimum requirements": "Arbiter 最低要求", - "Stretch Cluster": "扩展集群", - "Enable arbiter": "启用 arbiter", - "Arbiter zone": "仲裁区", - "An arbiter node will be automatically selected from this zone": "将从此区中自动选择一个仲裁节点", - "Select an arbiter zone": "选择 arbiter 区", - "Arbiter zone selection": "Arbiter 区选择", - "Connection details": "连接详情", - "Disks on all nodes": "所有节点上的磁盘", - "{{nodes, number}} node_one": "{{nodes, number}} 个节点", - "{{nodes, number}} node_other": "{{nodes, number}} 个节点", - "Please enter a positive Integer": "请输入一个正的整数", - "LocalVolumeSet name": "本地卷集名称", - "A LocalVolumeSet will be created to allow you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "本地卷集将被创建以允许您过滤一组磁盘并对其进行分组,并创建一个专用存储类来使用它们的存储。", - "StorageClass name": "存储类名称", - "Filter disks by": "过滤磁盘", - "Uses the available disks that match the selected filters on all nodes.": "使用在所有节点上与所选过滤器相匹配的可用磁盘。", - "Disks on selected nodes": "在所选节点上的磁盘", - "Uses the available disks that match the selected filters only on selected nodes.": "使用在所选节点上与所选过滤器相匹配的可用磁盘。", - "Disk type": "磁盘类型", - "Advanced": "高级", - "Volume mode": "卷模式", - "Device type": "设备类型", - "Select disk types": "选择磁盘类型", - "Disk size": "磁盘大小", - "Minimum": "最小", - "Please enter a value less than or equal to max disk size": "请输入小于或等于最大磁盘大小的值", - "Maximum": "最大", - "Please enter a value greater than or equal to min disk size": "请输入大于或等于最小磁盘大小的值", - "Units": "单元", - "Maximum disks limit": "最大磁盘限制", - "Disks limit will set the maximum number of PVs to create on a node. If the field is empty we will create PVs for all available disks on the matching nodes.": "磁盘限制将设置节点上创建的最大 PV 数量。如果字段为空,将为匹配节点上的所有可用磁盘创建 PV。", - "All": "所有", - "Local Storage Operator not installed": "没有安装 Local Storage Operator", - "Before we can create a StorageSystem, the Local Storage Operator needs to be installed. When installation is finished come back to OpenShift Data Foundation to create a StorageSystem.<1><0>Install": "在创建存储系统前,需要安装 Local Storage Operator。当完成安装后,返回到 OpenShift Data Foundation 来创建一个存储系统。<1><0>安装", - "Checking Local Storage Operator installation": "检查 Local Storage Operator 的安装", - "Discovering disks on all hosts. This may take a few minutes.": "发现所有主机上的磁盘。这可能需要几分钟时间。", - "Minimum Node Requirement": "最低节点要求", - "A minimum of 3 nodes are required for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "初始的部署最少需要 3 个节点进行部署。只有 {{nodes}} 节点与所选过滤器匹配。请调整过滤器使其包含更多节点。", - "After the LocalVolumeSet is created you won't be able to edit it.": "在创建了本地卷集后,您将无法对它进行编辑。", - "Note:": "备注:", - "Create LocalVolumeSet": "创建本地卷集", - "Yes": "是", - "Are you sure you want to continue?": "您确定要继续吗?", - "Node": "节点", - "Model": "型号", - "Capacity": "容量", - "Selected Disks": "所选磁盘", - "Disk List": "任务列表", - "{{nodes, number}} Node_one": "{{nodes, number}} 个节点", - "{{nodes, number}} Node_other": "{{nodes, number}} 个节点", - "{{disks, number}} Disk_one": "{{disks, number}} 个磁盘", - "{{disks, number}} Disk_other": "{{disks, number}} 个磁盘", - "Selected versus Available Capacity": "选择的容量与可用容量的比较", - "Out of {{capacity}}": "超出 {{capacity}}", - "{{displayName}} connection details": "{{displayName}} 连接详情", - "Not connected": "未连接", - "Backing storage": "后端存储", - "Deployment type: {{deployment}}": "部署类型:{{deployment}}", - "Backing storage type: {{name}}": "后端存储类型:{{name}}", - "External storage platform: {{storagePlatform}}": "外部存储平台:{{storagePlatform}}", - "Capacity and nodes": "容量和节点", - "Cluster capacity: {{capacity}}": "集群容量:{{capacity}}", - "Selected nodes: {{nodeCount, number}} node_one": "选中的节点:{{nodeCount, number}} 个节点", - "Selected nodes: {{nodeCount, number}} node_other": "选中的节点:{{nodeCount, number}} 个节点", - "CPU and memory: {{cpu, number}} CPU and {{memory}} memory": "CPU 和内存:{{cpu, number}} 个 CPU 和 {{memory}} 内存", - "Zone: {{zoneCount, number}} zone_one": "时区:{{zoneCount, number}} 个时区", - "Zone: {{zoneCount, number}} zone_other": "时区:{{zoneCount, number}} 个时区", - "Arbiter zone: {{zone}}": "仲裁区:{{zone}}", - "Taint nodes: {{ocsTaintsStatus}}": "污点节点:{{ocsTaintsStatus}}", - "Security": "安全性", - "Encryption: Enabled": "加密:启用", - "External key management service: {{kmsStatus}}": "外部密钥管理服务: {{kmsStatus}}", - "Security and network": "安全和网络", - "Encryption: {{encryptionStatus}}": "加密:{{encryptionStatus}}", - "Network: {{networkType}}": "网络:{{networkType}}", - "Encryption level": "加密级别", - "The StorageCluster encryption level can be set to include all components under the cluster (including StorageClass and PVs) or to include only StorageClass encryption. PV encryption can use an auth token that will be used with the KMS configuration to allow multi-tenancy.": "存储集群加密级别可设置为包含集群中的所有组件(包括存储类和 PV),或仅包含存储类加密。PV 加密可以使用与 KMS 配置搭配使用的身份验证令牌来允许多租户。", - "Cluster-wide encryption": "集群范围的加密", - "Encryption for the entire cluster (block and file)": "整个集群的加密(块和文件)", - "StorageClass encryption": "存储类加密", - "An encryption key will be generated for each persistent volume (block) created using an encryption enabled StorageClass.": "将创建一个加密密钥,用于每个使用一个启用了加密的存储类创建的持久性卷(块)。", - "Connection settings": "连接设置", - "Connect to an external key management service": "连接到外部密钥管理服务", - "Data encryption for block and file storage. MultiCloud Object Gateway is always encrypted.": "块和文件存储的数据加密。MultiCloud Object Gateway 总会被加密。", - "MultiCloud Object Gateway is always encrypted.": "多云对象网关始终被加密。", - "Enable data encryption for block and file storage": "为块和文件存储启用数据加密", - "Enable encryption": "启用加密", - "Encryption": "Encryption", - "An error has occurred: {{error}}": "出错:{{error}}", - "IP address": "IP 地址", - "Rest API IP address of IBM FlashSystem.": "IBM FlashSystem 的 REST API IP 地址。", - "The endpoint is not a valid IP address": "端点不是一个有效的 IP 地址", - "Username": "用户名", - "Password": "密码", - "Hide password": "隐藏密码", - "Reveal password": "显示密码", - "The uploaded file is not a valid JSON file": "上传的文件不是有效的 JSON 文件", - "External storage system metadata": "外部存储系统元数据", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External storage system metadata field.": "下载 <1>{{SCRIPT_NAME}} 脚本并在 RHCS 集群上运行,然后在外部存储系统元数据字段上传结果(JSON)。", - "Download script": "下载脚本", - "Browse": "浏览", - "Clear": "清理", - "Upload helper script": "上传帮助程序脚本", - "An error has occurred": "出错", - "Create StorageSystem": "创建存储系统", - "Create a StorageSystem to represent your OpenShift Data Foundation system and all its required storage and computing resources.": "创建存储系统以代表您的 OpenShift Data Foundation 系统及其所有必要的存储和计算资源。", - "{{nodeCount, number}} node_one": "{{nodeCount, number}} 个节点", - "{{nodeCount, number}} node_other": "{{nodeCount, number}} 个节点", - "selected ({{cpu}} CPU and {{memory}} on ": "选择的({{cpu}} CPU 和 {{memory}} 于 ", - "{{zoneCount, number}} zone_one": "{{zoneCount, number}} 个时区", - "{{zoneCount, number}} zone_other": "{{zoneCount, number}} 个时区", - "Search by node name...": "按节点名称搜索......", - "Search by node label...": "按节点标签搜索......", - "Not found": "没有找到", - "Compression eligibility": "压缩资格", - "Compression eligibility indicates the percentage of incoming data that is compressible": "压缩资格表示传入数据中可压缩数据所占的百分比", - "Compression savings": "压缩节省", - "Compression savings indicates the total savings gained from compression for this pool, including replicas": "压缩节省表示此池压缩实现的总节省,包括副本", - "Compression ratio": "压缩比例", - "Compression ratio indicates the achieved compression on eligible data for this pool": "压缩率表示对这个池的有压缩资格的数据的压缩", - "Compression status": "压缩状态", - "Storage efficiency": "存储效率", - "Details": "详情", - "Replicas": "副本", - "Inventory": "库存", - "Not available": "不可用", - "Image states info": "镜像状态信息", - "What does each state mean?": "每种状态的含义是什么?", - "<0>Starting replay: Initiating image (PV) replication process.": "<0>Starting replay: 启动镜像(PV)复制过程。", - "<0>Replaying: Image (PV) replication is ongoing or idle between clusters.": "<0>Replaying: 镜像(PV)复制在集群间进行或闲置。", - "<0>Stopping replay: Image (PV) replication process is shutting down.": "<0>Stopping replay: 镜像(PV)复制过程正在停止。", - "<0>Stopped: Image (PV) replication process has shut down.": "<0>Stopped: 镜像(PV)已停止。", - "<0>Error: Image (PV) replication process stopped due to an error.": "<0>Error: 镜像(PV)复制过程因为错误而停止。", - "<0>Unknown: Unable to determine image (PV) state due to an error. Check your network connection and remote cluster mirroring daemon.": "<0>Unknown: 因为错误无法决定镜像(PV)的状态。检查您的网络连接和远程集群镜像守护进程。", - "image states info": "镜像状态信息", - "Image States": "镜像状态", - "Mirroring": "镜像", - "Mirroring status": "镜像状态", - "Overall image health": "镜像的整体健康状况", - "Show image states": "显示镜像状态", - "Last checked": "最后检查", - "Raw Capacity shows the total physical capacity from all storage media within the storage subsystem": "原始容量显示存储子系统内所有存储介质的物理容量总量", - "Start replay": "开始重播", - "Stop reply": "停止回复", - "Replaying": "正在重播", - "Stopped": "已停止", - "Error": "错误", - "Syncing": "同步", - "Unknown": "未知", - "Status": "状态", - "Performance": "性能", - "IOPS": "IOPS", - "Throughput": "吞吐量", - "Not enough usage data": "没有足够的使用数据", - "used": "使用了", - "available": "可用", - "Other": "其他", - "All other capacity usage that are not a part of the top 5 consumers.": "不是顶级 5 个用户的一部分的所有其他容量用量。", - "Available": "可用", - "Breakdown Chart": "分解图", - "Warning": "警告", - "Raw capacity": "原始容量", - "Used": "使用了", - "Available versus Used Capacity": "可用和已使用的容量对比", - "Used of {{capacity}}": "使用 {{capacity}}", - "Not Available": "不可用", - "Rebuilding data resiliency": "重建数据弹性", - "{{formattedProgress, number}}%": "{{formattedProgress, number}}%", - "Activity": "活跃", - "Estimating {{formattedEta}} to completion": "估算 {{formattedEta}} 到完成", - "Object_one": "对象", - "Object_other": "对象", - "Buckets": "存储桶", - "Buckets card represents the number of S3 buckets managed on Multicloud Object Gateway and the number of ObjectBucketClaims and the ObjectBuckets managed on both Multicloud Object Gateway and RGW (if deployed).": "存储桶卡包括了在多云对象网关中管理的 S3 存储桶的数量、对象存储桶声明的数量,以及在多云对象网关和 RGW(如果部署)上管理的对象存储桶的数量。", - "NooBaa Bucket": "NooBaa 存储桶", - "Break by": "由分解", - "Total": "总计", - "Projects": "项目", - "BucketClasses": "存储桶类", - "Service type": "服务类型", - "Cluster-wide": "集群范围的", - "Any NON Object bucket claims that were created via an S3 client or via the NooBaa UI system.": "通过 S3 客户端或通过 NooBaa UI 系统创建的任何 NON 对象存储桶声明。", - "Capacity breakdown": "容量分解", - "This card shows used capacity for different resources. The available capacity is based on cloud services therefore it cannot be shown.": "这个卡显示不同资源的使用容量。可用容量基于云服务,因此无法显示。", - "Type: {{serviceType}}": "类型:{{serviceType}}", - "Service Type Dropdown": "服务类型下拉菜单", - "Service Type Dropdown Toggle": "服务类型下拉菜单切换", - "By: {{serviceType}}": "按:{{serviceType}}", - "Break By Dropdown": "按下拉菜单分解", - "Providers": "供应商", - "Accounts": "帐户", - "Metric": "指标", - "I/O Operations": "I/O 操作", - "Logical Used Capacity": "使用的逻辑容量", - "Physical vs. Logical used capacity": "物理使用容量与逻辑使用容量比较", - "Egress": "Egress", - "Latency": "延迟", - "Bandwidth": "带宽", - "Service Type": "服务类型", - "Type: {{selectedService}}": "类型:{{selectedService}}", - "{{selectedMetric}} by {{selectedBreakdown}}": "通过 {{selectedBreakdown}} 的 {{selectedMetric}}", - "thousands": "数千", - "millions": "数百万", - "billions": "数十亿", - "Total Reads {{totalRead}}": "总读操作 {{totalRead}}", - "Total Writes {{totalWrite}}": "总写操作 {{totalWrite}}", - "Total Logical Used Capacity {{logicalCapacity}}": "逻辑使用的总容量 {{logicalCapacity}}", - "Total Physical Used Capacity {{physicalcapacity}}": "物理使用的总容量 {{physicalcapacity}}", - "Shows an overview of the data consumption per provider or account collected from the day of the entity creation.": "显示在创建实体时收集的每个供应商或帐户的数据消耗概述。", - "(in {{suffixLabel}})": "(在 {{suffixLabel}})", - "Data Consumption Graph": "数据消耗图", - "GET {{GETLatestValue}}": "GET {{GETLatestValue}}", - "PUT {{PUTLatestValue}}": "PUT {{PUTLatestValue}}", - "OpenShift Data Foundation": "OpenShift Data Foundation", - "OpenShift Container Storage": "OpenShift Container Storage", - "Service name": "服务名称", - "System name": "系统名称", - "Multicloud Object Gateway": "多云对象网关(MCG)", - "RADOS Object Gateway": "RADOS 对象网关", - "Version": "版本", - "Resource Providers": "资源供应商", - "A list of all Multicloud Object Gateway resources that are currently in use. Those resources are used to store data according to the buckets' policies and can be a cloud-based resource or a bare metal resource.": "当前正在使用的所有 Multicloud Object Gateway 资源列表。这些资源用于根据存储桶策略存储数据,并可以是基于云的资源或裸机资源。", - "Object Service": "对象服务", - "Data Resiliency": "数据弹性", - "Object Service Status": "对象服务状态", - "The object service includes 2 services.": "对象服务包括 2 个服务。", - "The data resiliency includes 2 services": "数据弹性包括 2 个服务", - "Services": "服务", - "Object Gateway (RGW)": "对象网关(RGW)", - "All resources are unhealthy": "所有资源都不健康", - "Object Bucket has an issue": "对象存储桶有问题", - "Many buckets have issues": "很多存储桶有问题", - "Some buckets have issues": "有些存储桶有问题", - "{{capacityRatio, number}}:1": "{{capacityRatio, number}}:1", - "OpenShift Data Foundation can be configured to use compression. The efficiency rate reflects the actual compression ratio when using such a configuration.": "OpenShift Data Foundation 可以配置为使用压缩。在使用这种配置时,效率比率反映了实际的压缩比例。", - "Savings": "节省", - "Savings shows the uncompressed and non-deduped data that would have been stored without those techniques.": "节省显示在没有使用这些技术的情况下,未压缩的数据。", - "Storage Efficiency": "存储效率", - "OpenShift Container Storage Overview": "OpenShift Container Storage 概述", - "Block and File": "块和文件", - "Object_0": "对象", - "BlockPools": "块池", - "Storage Classes": "存储类", - "Pods": "Pod", - "{{metricType}}": "{{metricType}}", - "Break by dropdown": "按下拉菜单分解", - "Service Name": "服务名称", - "Cluster Name": "集群名称", - "Mode": "模式", - "Storage Cluster": "存储集群", - "Utilization": "使用率", - "Used Capacity": "使用的容量", - "Expanding StorageCluster": "扩展存储集群", - "Upgrading OpenShift Data Foundation's Operator": "升级 OpenShift Data Foundation 的 Operator", - "Used Capacity Breakdown": "使用容量分解", - "This card shows the used capacity for different Kubernetes resources. The figures shown represent the Usable storage, meaning that data replication is not taken into consideration.": "此卡显示了不同 Kubernetes 资源的使用容量。显示中的内容代表了可用的存储,这意味着数据复制不会被考虑。", - "Cluster name": "集群名称", - "Internal": "内部", - "Raw capacity is the absolute total disk space available to the array subsystem.": "原始容量是阵列子系统可用的绝对磁盘空间。", - "Troubleshoot": "故障排除", - "Active health checks": "主动健康状态检查", - "Progressing": "进行中", - "The Compression Ratio represents the compressible data effectiveness metric inclusive of all compression-enabled pools.": "Compression Ratio 代表所有启用了压缩的池的压缩数据机制指标。", - "The Savings metric represents the actual disk capacity saved inclusive of all compression-enabled pools and associated replicas.": "Savings 指标代表保存的实际磁盘容量,其中包括所有启用了压缩的池和相关副本。", - "Performance metrics over time showing IOPS, Latency and more. Each metric is a link to a detailed view of this metric.": "性能指标数据显示 IOPS、Latency 等信息。每个指标数据都是一个指向这个指标的详细视图的链接。", - "Recovery": "恢复", - "Disk State": "磁盘状态", - "OpenShift Data Foundation status": "OpenShift Data Foundation 状态", - "Filesystem": "文件系统", - "Disks List": "磁盘列表", - "Start Disk Replacement": "启动磁盘替换", - "<0>{{diskName}} can be replaced with a disk of same type.": "<0>{{diskName}} 可使用同类磁盘替换。", - "Troubleshoot disk <1>{{diskName}}": "故障排除磁盘 <1>{{diskName}}", - "here": "此处", - "Online": "在线", - "Offline": "离线", - "NotResponding": "NotResponding", - "PreparingToReplace": "PreparingToReplace", - "ReplacementFailed": "ReplacementFailed", - "ReplacementReady": "ReplacementReady", - "Connection name": "连接名称", - "This is a required field": "这是必填字段", - "A unique name for the key management service within the project.": "项目中密钥管理服务的唯一名称。", - "Service instance ID": "服务实例 ID", - "Service API key": "服务 API 密钥", - "Customer root key": "客户 root 密钥", - "IBM Base URL": "IBM 基本 URL", - "IBM Token URL": "IBM 令牌 URL", - "Connect to a Key Management Service": "连接到一个密钥管理服务", - "Key management service provider": "密钥管理服务供应商", - "kms-provider-name": "kms-provider-name", - "Token": "令牌", - "Create a secret with the token for every namespace using encrypted PVCs.": "为使用加密 PVC 的每个命名空间创建一个带有令牌的 secret。", - "Hide token": "隐藏令牌", - "Reveal token": "显示令牌", - "Authentication method": "身份验证方法", - "authentication-method": "身份验证方法", - "Please enter a URL": "请输入一个 URL", - "Please enter a valid port": "请输入一个有效端口", - "Address": "地址", - "Port": "端口", - "Advanced settings": "高级设置", - "Raw Capacity": "原始容量", - "x {{ replica, number }} replicas =": "x {{ replica, number }} 副本 =", - "No StorageClass selected": "没有选择存储类", - "The Arbiter stretch cluster requires a minimum of 4 nodes (2 different zones, 2 nodes per zone). Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "Arbiter 扩展集群至少需要 4 个节点(2 个不同区,每个区 2 个节点)。请选择不同的 StorageClass 或创建一个符合最低节点要求的新 LocalVolumeSet。", - "The StorageCluster requires a minimum of 3 nodes. Please choose a different StorageClass or create a new LocalVolumeSet that matches the minimum node requirement.": "存储集群要求最少 3 个节点。请选择不同的存储集群或创建一个符合最低节点要求的新的本地卷集。", - "Adding capacity for <1>{{name}}, may increase your expenses.": "为 <1>{{name}} 增加容量,可能会增加您的费用。", - "StorageClass": "存储类", - "Currently Used:": "目前使用的:", - "Add": "添加", - "Key Management Service Advanced Settings": "密钥管理服务高级设置", - "Vault enterprise namespaces are isolated environments that functionally exist as Vaults within a Vault. They have separate login paths and support creating and managing data isolated to their namespace.": "Vault 企业命名空间是作为 Vaults 存在于 Vault 里的隔离的环境。它们有单独的登录路径,并支持创建和管理与其命名空间隔离的数据。", - "Maximum file size exceeded. File limit is 4MB.": "超过了最大文件大小。文件限制为 4MB。", - "A PEM-encoded CA certificate file used to verify the Vault server's SSL certificate.": "用于验证 Vault 服务器 SSL 证书的 PEM 编码的 CA 证书文件。", - "A PEM-encoded client certificate. This certificate is used for TLS communication with the Vault server.": "PEM 编码的客户端证书。此证书用于与 Vault 服务器的 TLS 通信。", - "An unencrypted, PEM-encoded private key which corresponds to the matching client certificate provided with VAULT_CLIENT_CERT.": "与 VAULT_CLIENT_CERT 提供的匹配客户端证书对应的未加密 PEM 编码私钥。", - "The name to use as the SNI host when OpenShift Data Foundation connecting via TLS to the Vault server": "当 OpenShift Data Foundation 通过 TLS 连接到 Vault 服务器时用作 SNI 主机的名称", - "Backend Path": "后端路径", - "path/": "path/", - "Authentication Path": "身份验证路径", - "Authentication Namespace": "身份验证命名空间", - "TLS Server Name": "TLS 服务器名称", - "Vault Enterprise Namespace": "Vault 企业命名空间", - "The name must be accurate and must match the service namespace": "名称必须准确且必须与服务命名空间匹配", - "CA Certificate": "CA 证书", - "Upload a .PEM file here...": "在这里上传一个 .PEM 文件...", - "Client Certificate": "客户端证书", - "Client Private Key": "客户端私钥", - "Attach OBC to a Deployment": "把 OBC 附加到部署中", - "Deployment Name": "部署名称", - "Attach": "附加", - "<0><0>{{poolName}} cannot be deleted. When a pool is bounded to PVC it cannot be deleted. Please detach all the resources from StorageClass(es):": "<0><0>{{poolName}} 无法删除。当池绑定到 PVC 时无法删除它。请从存储类中分离所有资源: ", - "<0>Deleting <1>{{poolName}} will remove all the saved data of this pool. Are you sure want to delete?": "<0>删除 <1>{{poolName}} 将删除此池的所有保存数据。您确定要删除吗?", - "BlockPool Delete Modal": "块池删除模态", - "Try Again": "再试", - "Finish": "完成", - "Go To Pvc List": "进入 Pvc 列表", - "BlockPool Update Form": "块池更新表单", - "replacement disallowed: disk {{diskName}} is {{replacingDiskStatus}}": "替换禁用的:磁盘 {{diskName}} 是 {{replacingDiskStatus}}", - "replacement disallowed: disk {{diskName}} is {{replacementStatus}}": "替换禁用的:磁盘 {{diskName}} 是 {{replacementStatus}}", - "Disk Replacement": "替换磁盘", - "This action will start preparing the disk for replacement.": "这个操作将开始为磁盘准备替换。", - "Data rebalancing is in progress": "数据重新平衡正在进行中", - "See data resiliency status": "请参阅数据弹性状态", - "Are you sure you want to replace <1>{{diskName}}?": "您确定要替换 <1>{{diskName}}?", - "Replace": "替换", - "Create NamespaceStore ": "创建命名空间存储 ", - "Represents an underlying storage to be used as read or write target for the data in the namespace buckets.": "代表一个底层存储,作为命名空间存储桶中的数据读取或写入目标。", - "Provider {{provider}} | Region: {{region}}": "供应商 {{provider}} | 区域: {{region}}", - "Create new NamespaceStore ": "创建新的命名空间存储", - "An error has occurred while fetching namespace stores": "获取命名空间存储时出现错误", - "Select a namespace store": "选择一个命名空间存储", - "Namespace store name": "命名空间存储名称", - "A unique name for the namespace store within the project": "项目中的命名空间存储的唯一名称", - "Persistent volume claim": "持久性卷声明", - "Folder": "目录", - "If the name you write exists, we will be using the existing folder if not we will create a new folder ": "如果存在您写入的名称,我们将使用现有文件夹,否则将创建一个新文件夹 ", - "Namespace Store Table": "命名空间存储表", - "Service account keys are needed for Google Cloud Storage authentication. The keys can be found in the service accounts page in the GCP console.": "Google Cloud Storage 身份验证需要服务帐户密钥。这些密钥可以在 GCP 控制台的服务帐户页面中找到。", - "Learn more": "了解更多", - "Where can I find Google Cloud credentials?": "在哪里可以找到 Google Cloud 云凭证?", - "Upload a .json file with the service account keys provided by Google Cloud Storage.": "上传一个包括由 Google Cloud Storage 提供的服务帐户密钥的 .json 文件。", - "Secret Key": "Secret 键", - "Upload JSON": "上传 JSON", - "Uploaded File Name": "上传的文件名", - "Upload File": "上传文件", - "Switch to Secret": "切换到 Secret", - "Select Secret": "选择 Secret", - "Switch to upload JSON": "切换到上传 JSON", - "Cluster Metadata": "集群元数据", - "Target Bucket": "目标存储桶", - "Number of Volumes": "卷数量", - "Volume Size": "卷大小", - "Target blob container": "目标 blob 容器", - "Target bucket": "目标存储桶", - "Account name": "服务帐户名称", - "Access key": "访问密钥", - "Account key": "帐户密钥", - "Secret key": "Secret 键", - "Region Dropdown": "区域下拉菜单", - "Endpoint": "端点", - "Endpoint Address": "端点地址", - "Secret": "Secret", - "Switch to Credentials": "切换到凭证", - "Access Key Field": "访问密钥字段", - "Secret Key Field": "Secret 键字段", - "ObjectBucketClaim Name": "对象存储桶声明名称", - "my-object-bucket": "my-object-bucket", - "If not provided a generic name will be generated.": "如果没有提供,则会生成一个通用名称。", - "Defines the object-store service and the bucket provisioner.": "定义对象存储服务和存储桶置备程序。", - "BucketClass": "存储桶类", - "Select BucketClass": "选择存储桶类", - "Create ObjectBucketClaim": "创建对象存储桶声明", - "Edit YAML": "编辑 YAML", - "Attach to Deployment": "附加到部署", - "Disabled because the ObjectBucketClaim is being deleted.": "禁用,因为 ObjectBucketClaim 已被删除。", - "Object Bucket Claim Details": "对象存储桶声明详情", - "Object Bucket": "对象存储桶", - "Namespace": "命名空间", - "OBCTableHeader": "OBC 表头", - "Object Bucket Claims": "对象存储桶声明", - "Object Bucket Claim Data": "对象存储桶声明数据", - "Hide Values": "隐藏值", - "Reveal Values": "显示值", - "Data": "数据", - "Create Object Bucket": "创建对象存储桶", - "Object Bucket Name": "对象存储桶名称", - "ob-name-help": "ob-name-help", - "The corresponding ObjectBucketClaim must be deleted first.": "需要首先删除对应的对象存储桶声明。", - "Object Bucket Details": "对象存储桶详情", - "Object Bucket Claim": "对象存储桶声明", - "OBTableHeader": "OB 表头", - "Object Buckets": "对象存储桶", - "Uses the available disks that match the selected filters on all nodes selected in the previous step.": "使用与在上一步中选择的所有节点上选择的过滤器匹配的可用磁盘。", - "A LocalVolumeSet allows you to filter a set of disks, group them and create a dedicated StorageClass to consume storage from them.": "本地卷集允许您过滤一组磁盘对其进行分组,并创建一个专用存储类来使用它们的存储。", - "OpenShift Container Storage's StorageCluster requires a minimum of 3 nodes for the initial deployment. Only {{nodes}} node match to the selected filters. Please adjust the filters to include more nodes.": "OpenShift Container Storage 的存储集群最少需要 3 个节点进行初始部署。只有 {{nodes}} 个节点与所选过滤器匹配。请调整过滤器使其包含更多节点。", - "After the LocalVolumeSet and StorageClass are created you won't be able to go back to this step.": "在创建了本地卷集和存储类后,您将无法返回到这一步。", - "Create StorageClass": "创建存储类", - "Selected Capacity": "选择的容量", - "Selected Nodes": "选中的节点", - "Review StorageCluster": "查看存储集群", - "Storage and nodes": "存储和节点", - "Arbiter zone:": "Arbiter 区:", - "None": "无", - "selected based on the created StorageClass:": "根据创建的存储类选择:", - "Total CPU and memory of {{cpu, number}} CPU and {{memory}}": "CPU 和内存总量 {{cpu, number}} CPU 和 {{memory}}", - "Configure": "配置", - "Enable Encryption": "启用加密", - "Connect to external key management service: {{name}}": "连接到外部密钥管理服务: {{name}}", - "Encryption Level: {{level}}": "加密级别:{{level}}", - "Using {{networkLabel}}": "使用 {{networkLabel}}", - "Discover disks": "发现磁盘", - "Review and create": "查看并创建", - "Info Alert": "信息警报", - "Internal - Attached devices": "内部 - 附加的设备", - "Can be used on any platform where there are attached devices to the nodes, using the Local Storage Operator. The infrastructure StorageClass is provided by Local Storage Operator, on top of the attached drives.": "可以通过 Local Storage Operator 在将节设备附加到节点的任何平台中使用。基础架构存储类由 Local Storage Operator 在附加的驱动器上提供。", - "Before we can create a StorageCluster, the Local Storage operator needs to be installed. When installation is finished come back to OpenShift Container Storage to create a StorageCluster.<1><0>Install": "在创建存储集群前,需要安装 Local Storage operator。当完成安装后,请返回 OpenShift Container Storage 来创建一个存储集群。<1><0>安装", - "Node Table": "节点表", - "StorageCluster exists": "存在存储集群", - "Back to operator page": "返回 operator 页", - "Go to cluster page": "进入集群页", - "<0>A StorageCluster <1>{{clusterName}} already exists.<3>You cannot create another StorageCluster.": "<0>存储集群 <1>{{clusterName}} 已存在。<3>无法创建另一个存储集群。", - "Connect to external cluster": "连接到外部集群", - "Download <1>{{SCRIPT_NAME}} script and run on the RHCS cluster, then upload the results (JSON) in the External cluster metadata field.": "下载 <1>{{SCRIPT_NAME}} 脚本并在 RHCS 集群上运行,然后在外部集群元数据字段上传结果(JSON)。", - "Download Script": "下载脚本", - "A bucket will be created to provide the OpenShift Data Foundation's Service.": "将创建一个存储桶来提供 OpenShift Data Foundation 的服务。", - "Bucket created for OpenShift Container Storage's Service": "为 OpenShift Container Storage 的服务创建的存储桶", - "Create External StorageCluster": "创建外部存储集群", - "External cluster metadata": "外部集群元数据", - "Upload JSON File": "上传 JSON 文件", - "Upload Credentials file": "上传凭证文件", - "JSON data": "JSON 数据", - "Create Button": "创建按钮", - "Create StorageCluster": "创建存储集群", - "OpenShift Container Storage runs as a cloud-native service for optimal integration with applications in need of storage and handles the scenes such as provisioning and management.": "OpenShift Container Storage 作为一个云原生服务运行,可优化与需要存储的应用程序集成,并处理场景,如置备和管理。", - "Select mode:": "选择模式:", - "If not labeled, the selected nodes are labeled <1>{{label}} to make them target hosts for OpenShift Data Foundation's components.": "如果没有标记,所选节点会标记为 <1>{{label}},使其为 OpenShift Data Foundation 组件的目标主机。", - "Mark nodes as dedicated": "将节点标记为专用", - "This will taint the nodes with the<1>key: node.ocs.openshift.io/storage, <4>value: true, and <7>effect: NoSchedule": "这将使用<1>key: node.ocs.openshift.io/storage、<4>value: true 和 <7>effect: NoSchedule 来对节点进行污点设置", - "Selected nodes will be dedicated to OpenShift Container Storage use only": "所选节点仅供 OpenShift Container Storage 使用", - "OpenShift Container Storage deployment in two data centers, with an arbiter node to settle quorum decisions.": "OpenShift Container Storage 部署为两个数据中心,其具有一个空闲节点来处理仲裁决策。", - "To support high availability when two data centers can be used, enable arbiter to get the valid quorum between two data centers.": "在两个数据中心可用时支持高可用性,启用 arbiter 以在两个数据中心之间获得有效的仲裁。", - "Select arbiter zone": "选择 arbiter 区", - "Network": "网络", - "The default SDN networking uses a single network for all data operations such read/write and also for control plane, such as data replication. Multus allows a network separation between the data operations and the control plane operations.": "默认 SDN 网络将单个网络用于所有数据操作,如读/写,也用于控制平面。Multus 允许在数据操作和控制平面操作间进行网络分离。", - "Default (SDN)": "默认 (SDN)", - "Custom (Multus)": "自定义 (Multus)", - "Public Network Interface": "公共网络接口", - "Select a network": "选择一个网络", - "Cluster Network Interface": "集群网络接口", - "Requested Cluster Capacity:": "请求的集群容量:", - "StorageClass:": "存储类:", - "Select Capacity": "选择容量", - "Requested Capacity": "要求的容量", - "Select Nodes": "选择节点", - "create internal mode StorageCluster wizard": "创建内部模式存储集群向导", - "Can be used on any platform, except bare metal. It means that OpenShift Container Storage uses an infrastructure StorageClass, provided by the hosting platform. For example, gp2 on AWS, thin on VMWare, etc.": "可在除裸机外的任何平台中使用。它表示 OpenShift Container Storage 使用由主机平台提供的基础架构存储类。例如,AWS 上的 gp2、VMWare 上的精简等。", - "{{title}} steps": "{{title}} 步骤", - "{{title}} content": "{{title}} 内容", - "{{availableCapacity}} / {{replica}} replicas": "{{availableCapacity}} / {{replica}} 副本", - "Available capacity:": "可用容量:", - "Filesystem name": "文件系统名称", - "Enter filesystem name": "输入文件系统名称", - "CephFS filesystem name into which the volume shall be created": "卷应该被创建到的 cephFS 文件系统名称", - "no compression": "没有压缩", - "with compression": "有压缩", - "Replica {{poolSize}} {{compressionText}}": "副本 {{poolSize}} {{compressionText}}", - "Create New Pool": "创建新池", - "Storage Pool": "存储池", - "Select a Pool": "选择一个池", - "Storage pool into which volume data shall be stored": "保存卷数据的存储池", - "Error retrieving Parameters": "获取参数时出错", - "my-storage-pool": "my-storage-pool", - "An encryption key will be generated for each PersistentVolume created using this StorageClass.": "将创建一个加密密钥,用于每个使用这个存储类创建的持久性卷。", - "Key service": "密钥服务", - "Select an existing connection": "选择一个现有的连接", - "KMS service {{value}} already exist": "KMS 服务 {{value}} 已存在", - "Choose existing KMS connection": "选择现有的 KMS 连接", - "Create new KMS connection": "创建新的 KMS 连接", - "PV expansion operation is not supported for encrypted PVs.": "加密的 PV 不支持 PV 扩展操作。", - "Enable Thick Provisioning": "启用 Thick Provisioning", - "By enabling thick-provisioning, volumes will allocate the requested capacity upon volume creation. Volume creation will be slower when thick-provisioning is enabled.": "通过启用 thick-provisioning,卷将在创建卷时分配所需容量。当启用 thick-provisioning 时,创建卷的速度会减慢。", - "{{resource}} details": "{{resource}}详情", - "Kind": "种类(Kind)", - "Labels": "标签", - "Last updated": "最后更新", - "Storage Systems": "存储系统", - "Used capacity": "使用的容量", - "Storage status represents the health status of {{operatorName}}'s StorageCluster.": "存储状态代表 {{operatorName}} 的存储集群的健康状态。", - "Health": "健康", - "Standard": "Standard", - "Data will be consumed by a Multi-cloud object gateway, deduped, compressed, and encrypted. The encrypted chunks would be saved on the selected BackingStores. Best used when the applications would always use the OpenShift Data Foundation endpoints to access the data.": "数据将被多云对象网关、dedupe、压缩和加密功能使用。加密的块将保存在所选后备存储中。最适合于在应用程序始终使用 OpenShift Data Foundation 端点访问数据时使用。", - "Data is stored on the NamespaceStores without performing de-duplication, compression, or encryption. BucketClasses of namespace type allow connecting to existing data and serving from them. These are best used for existing data or when other applications (and cloud-native services) need to access the data from outside OpenShift Data Foundation.": "数据存储在没有进行 dedupe、压缩、加密的命名空间中。命名空间类型的存储桶允许连接到现有数据并从中服务。最适用于现有数据,或其他应用程序(及原生云服务)需要从 OpenShift Data Foundation 外部访问数据。", - "Single NamespaceStore": "单一命名空间存储", - "The namespace bucket will read and write its data to a selected namespace store": "命名空间存储桶将读取和写入其数据到所选命名空间存储", - "Multi NamespaceStores": "多命名空间存储", - "The namespace bucket will serve reads from several selected backing stores, creating a virtual namespace on top of them and will write to one of those as its chosen write target": "命名空间存储桶将提供几个所选后备存储的读取服务,在其之上创建虚拟命名空间,并将写入其中之一作为它的选择的写入目标", - "Cache NamespaceStore": "缓存命名空间存储", - "The caching bucket will serve data from a large raw data out of a local caching tiering.": "缓存存储桶将提供来自本地缓存层的大型原始数据的数据。", - "Create storage class": "创建存储类", - "Create local volume set": "创建本地卷集", - "Logical used capacity per account": "每个帐户的逻辑使用容量", - "Egress Per Provider": "每个供应商的 Egress", - "I/O Operations count": "I/O 操作数", - "The StorageClass used by OpenShift Data Foundation to write its data and metadata.": "OpenShift Data Foundation 使用的用于写它的数据和元数据的存储类。", - "Infrastructure StorageClass created by Local Storage Operator and used by OpenShift Container Storage to write its data and metadata.": "由 Local Storage Operator 创建的基础架构存储类,、OpenShift Container Storage 使用它来写入其数据和元数据的。", - "The amount of capacity that would be dynamically allocated on the selected StorageClass.": "在所选存储类中动态分配的容量。", - "If you wish to use the Arbiter stretch cluster, a minimum of 4 nodes (2 different zones, 2 nodes per zone) and 1 additional zone with 1 node is required. All nodes must be pre-labeled with zones in order to be validated on cluster creation.": "如果您希望使用 Arbiter 扩展集群,则需要至少有 4 个节点(2 个不同的区、每个区有 2 个节点),以及一个额外的带有一个节点的区。所有节点都必须预先标记区以便在创建集群时进行验证。", - "Selected nodes are based on the StorageClass <1>{{scName}} and with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "所选节点基于存储类 <1>{{scName}},推荐每个节点需要 14 个 CPU 和 34 GiB RAM。", - "Selected nodes are based on the StorageClass <1>{{scName}} and fulfill the stretch cluster requirements with a recommended requirement of 14 CPU and 34 GiB RAM per node.": "所选节点基于存储类 <1>{{scName}},为了实现集群的扩展,推荐每个节点需要 14 个 CPU 和 34 GiB RAM。", - "Loading...": "正在载入...", - "Pool {{name}} creation in progress": "池 {{name}} 创建正在进行中", - "Pool {{name}} was successfully created": "池 {{name}} 成功创建", - "An error occurred. Pool {{name}} was not created": "出现错误,池 {{name}} 没有创建", - "Pool {{name}} creation timed out. Please check if odf operator and rook operator are running": "池 {{name}} 创建超时。请检查 ocs operator 和 rook operator 是否在运行", - "The creation of a StorageCluster is still in progress or has failed. Try again after the StorageCuster is ready to use.": "创建存储集群仍在进行中,或者已经失败。请在存储集群就绪后再试。", - "Pool management tasks are not supported for default pool and OpenShift Container Storage's external mode.": "默认池和 OpenShift Container Storage 的外部模式不支持池管理任务。", - "Pool {{name}} was created with errors.": "池 {{name}} 已创建但有错误。", - "Delete": "删除", - "StorageClasses": "存储类", - "hr": "小时", - "min": "分钟", - "A minimal cluster deployment will be performed.": "将会执行最小的集群部署。", - "The selected nodes do not match OpenShift Data Foundation's StorageCluster requirement of an aggregated 30 CPUs and 72 GiB of RAM. If the selection cannot be modified a minimal cluster will be deployed.": "所选节点与聚合的 30 个 CPU 和 72 GiB 内存的 OpenShift Data Foundation 存储集群要求不匹配。如果无法修改选择,将部署最小集群。", - "Back to nodes selection": "返回节点选择", - "Select a StorageClass to continue": "选择一个存储类继续", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing PersistentVolumes that will be used to provide the OpenShift Data Foundation service.": "这是必需的字段。存储类将用于从底层基础架构请求存储,以创建用于提供 OpenShift Data Foundation 服务的后端持久性卷。", - "Create new StorageClass": "创建新存储类", - "This is a required field. The StorageClass will be used to request storage from the underlying infrastructure to create the backing persistent volumes that will be used to provide the OpenShift Data Foundation service.": "这是必需的字段。存储类将用于从底层基础架构请求存储,以创建用于提供 OpenShift Data Foundation 服务的后端持久性卷。", - "All required fields are not set": "所有必填字段没有设置", - "In order to create the StorageCluster you must set the StorageClass, select at least 3 nodes (preferably in 3 different zones) and meet the minimum or recommended requirement": "要创建存储集群,您必须设置存储类选择至少 3 个节点(最好是在 3 个不同的区),并满足最低或推荐要求", - "The StorageCluster requires a minimum of 3 nodes for the initial deployment. Please choose a different StorageClass or go to create a new LocalVolumeSet that matches the minimum node requirement.": "存储集群的初始部署最少需要 3 个节点。请选择不同的存储类,或创建一个符合最低节点要求的本地卷集。", - "Create new volume set instance": "创建新卷集实例", - "Select at least 1 encryption level or disable encryption.": "至少选择 1 个加密级别或禁用加密。", - "Fill out the details in order to connect to key management system": "填写详情以便连接到密钥管理系统", - "This is a required field.": "这是一个必需的字段。", - "Both public and cluster network attachment definition cannot be empty": "公共和集群网络附加定义不能为空", - "A public or cluster network attachment definition must be selected to use Multus.": "必须选择一个公共或集群网络附加定义来使用 Multus。", - "The number of selected zones is less than the minimum requirement of 3. If not modified a host-based failure domain deployment will be enforced.": "所选区的数量小于最低要求的 3 个。如果没有修改,一个基于主机的故障域部署将被强制实施。", - "When the nodes in the selected StorageClass are spread across fewer than 3 availability zones, the StorageCluster will be deployed with the host based failure domain.": "当所选存储类中的节点分散于少于 3 个可用区时,存储集群会根据故障域部署到相应的主机。", - "Cluster-Wide and StorageClass": "集群范围和存储类", - "Cluster-Wide": "集群范围", - "Select at least 2 Backing Store resources": "选择至少 2 个后端存储资源", - "Select at least 1 Backing Store resource": "选择至少 1 个后端存储资源", - "x {{replica}} replicas = {{osdSize, number}} TiB": "x {{replica}} 副本 = {{osdSize, number}} TiB", - "SmallScale": "SmallScale", - "0.5 TiB": "0.5 TiB", - "2 TiB": "2 TiB", - "LargeScale": "LargeScale", - "4 TiB": "4 TiB", - "{{osdSize, number}} TiB": "{{osdSize, number}} TiB", - "Help": "帮助" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/locales/zh/console-shared.json b/frontend/packages/ceph-storage-plugin/locales/zh/console-shared.json deleted file mode 100644 index 956acb4e11a6..000000000000 --- a/frontend/packages/ceph-storage-plugin/locales/zh/console-shared.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "View {{title}} metrics in query browser": "在查询浏览器中查看 {{title}} 指标", - "Not available": "不可用", - "{{humanAvailable}} available of {{humanLimit}} total limit": "{{humanAvailable}} 可用({{humanLimit}}总限制)", - "{{humanAvailable}} available of {{humanMax}}": "{{humanAvailable}} 可用(总共 {{humanMax}})", - "{{humanAvailable}} available": "{{humanAvailable}} 可用" -} \ No newline at end of file diff --git a/frontend/packages/ceph-storage-plugin/package.json b/frontend/packages/ceph-storage-plugin/package.json deleted file mode 100644 index af9c0d966e8d..000000000000 --- a/frontend/packages/ceph-storage-plugin/package.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "@console/ceph-storage-plugin", - "version": "0.0.0-fixed", - "description": "Ceph Storage - Persistent storage for Kubernetes", - "private": true, - "main": "src/index.ts", - "dependencies": { - "@console/plugin-sdk": "0.0.0-fixed", - "@console/shared": "0.0.0-fixed", - "@console/internal": "0.0.0-fixed", - "@console/operator-lifecycle-manager": "0.0.0-fixed", - "@console/local-storage-operator-plugin": "0.0.0-fixed" - }, - "consolePlugin": { - "entry": "src/plugin.ts", - "integrationTestSuites": { - "ceph-storage-install": [ - "integration-tests/**/1-install/installFlow.scenario.ts" - ], - "ceph-storage-tests": [ - "integration-tests/**/2-tests/*.scenario.ts" - ], - "ceph-storage": [ - "integration-tests/**/*.scenario.ts" - ] - }, - "exposedModules": { - "alert": "src/utils/alert-action-path.tsx", - "storageProvider": "src/components/attach-obc/attach-obc-deployment.tsx", - "createStorageSystem": "src/components/create-storage-system/create-storage-system.tsx", - "blockPoolListPage": "./src/components/block-pool/block-pool-list-page.tsx", - "blockPoolCreatePage": "./src/components/block-pool/create-block-pool.tsx", - "blockPoolDetailsPage": "./src/components/block-pool/block-pool-details-page.tsx", - "resourceDetailsPage": "./src/components/odf-resources/resource-details-page.tsx", - "resourceListPage": "./src/components/odf-resources/resource-list-page.tsx", - "bsCreate": "./src/components/create-backingstore-page/create-bs-page.tsx", - "bcCreate": "./src/components/bucket-class/create-bc.tsx", - "nssCreate": "./src/components/namespace-store/create-namespace-store.tsx", - "odfSystemDashboard": "./src/components/dashboards/odf-system-dashboard.tsx", - "actions": "src/actions", - "storageProvisioners": "src/utils/odf-provisioners", - "storageProvisionerComponents": "src/components/ocs-storage-class-form/ocs-storage-class-form", - "thickProvisioner": "src/components/ocs-storage-class-form/ocs-thick-provisioner" - } - } -} diff --git a/frontend/packages/ceph-storage-plugin/src/__mocks__/breakdown-data.ts b/frontend/packages/ceph-storage-plugin/src/__mocks__/breakdown-data.ts deleted file mode 100644 index df43f0b25c13..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__mocks__/breakdown-data.ts +++ /dev/null @@ -1,48 +0,0 @@ -import { humanizeBinaryBytes } from '@console/internal/components/utils'; - -export const breakdownData = { - top5: [ - { - x: 1, - y: 10 * 1000, // 10 MiB - label: 'First Data', - metric: { namespace: 'default' }, - }, - { - x: 2, - y: 20 * 1000, // 20 MiB - label: 'First Data', - metric: { namespace: 'default' }, - }, - { - x: 3, - y: 30 * 1000, // 30 MiB - label: 'First Data', - metric: { namespace: 'default' }, - }, - { - x: 4, - y: 40 * 1000, // 40 MiB - label: 'First Data', - metric: { namespace: 'default' }, - }, - { - x: 5, - y: 50 * 1000, // 50 MiB - label: 'First Data', - metric: { namespace: 'default' }, - }, - ], - capacityAvailable: '10000000', - metricTotal: '10000000', - capacityUsed: '150000', - humanize: humanizeBinaryBytes, - fakeModel: { - abbr: 'fk', - kind: 'fake', - label: 'Fake', - labelPlural: 'Fakes', - plural: 'fakes', - apiVersion: 'v1', - }, -}; diff --git a/frontend/packages/ceph-storage-plugin/src/__mocks__/independent-mode-dashboard-data.ts b/frontend/packages/ceph-storage-plugin/src/__mocks__/independent-mode-dashboard-data.ts deleted file mode 100644 index 56583f311b4d..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__mocks__/independent-mode-dashboard-data.ts +++ /dev/null @@ -1,70 +0,0 @@ -export const dashboardData = { - watchURL: { - url: 'foo', - }, - stopWatchURL: 'foobar', - watchPrometheus: { - query: 'fooQuery', - }, - stopWatchPrometheusQuery: { - query: 'fooQuery2', - }, - watchAlerts: '', - stopWatchAlerts: '', - urlResults: 'foo', - prometheusResults: { - getIn: () => {}, - }, - notificationAlerts: 'foo', - watchK8sResource: 'foo', - stopWatchK8sResource: 'foo', - detailResources: { - ocs: { - loaded: true, - loadError: false, - data: [ - { - metadata: { - name: 'foo', - }, - }, - ], - }, - subscription: { - loaded: true, - loadError: false, - data: [ - { - spec: { - name: 'ocs-operator', - }, - status: { - installedCSV: 'fooVersion', - }, - }, - ], - }, - }, - statusCardData: { - data: { - data: ['foo', 'bar'], - loaded: true, - loadError: '', - }, - }, - infra: { - metadata: { - name: 'cluster', - }, - status: { - platform: 'AWS', - }, - }, - expectedDropDownItems: { - Pods: 'Pods', - Projects: 'Projects', - 'Storage Classes': 'Storage Classes', - }, - expectedHeaderLink: - 'topk(20, (sum(kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)|(ceph.rook.io/block)"})) by (namespace)))', -}; diff --git a/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-body.spec.tsx b/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-body.spec.tsx deleted file mode 100644 index 52ba7986563f..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-body.spec.tsx +++ /dev/null @@ -1,83 +0,0 @@ -import * as React from 'react'; -import { ShallowWrapper, shallow } from 'enzyme'; -import { - BreakdownBodyProps, - BreakdownCardBody, -} from '../components/dashboards/common/capacity-breakdown/breakdown-body'; -import { getStackChartStats } from '../components/dashboards/common/capacity-breakdown/utils'; -import { breakdownData } from '../__mocks__/breakdown-data'; -import { BreakdownChart } from '../components/dashboards/common/capacity-breakdown/breakdown-chart'; -import { TotalCapacityBody } from '../components/dashboards/common/capacity-breakdown/breakdown-capacity'; -import { BreakdownChartLoading } from '../components/dashboards/common/capacity-breakdown/breakdown-loading'; - -const top5MetricsStats = getStackChartStats(breakdownData.top5, breakdownData.humanize); - -describe('', () => { - let wrapper: ShallowWrapper; - beforeEach(() => { - wrapper = shallow( - , - ); - }); - - it('Renders Breakdown Chart', () => { - const breakdownChart = wrapper.find(BreakdownChart); - expect(breakdownChart.exists()).toBe(true); - expect(breakdownChart.props().data.length).toBe(7); - // Last is popped if capacityAvailable is available(7 - 1) - expect(breakdownChart.props().legends.length).toBe(6); - expect(breakdownChart.props().ocsVersion).toBeFalsy(); - }); - - it('Shows used and available capacity', () => { - expect(wrapper.find(TotalCapacityBody).exists()).toBe(true); - expect(wrapper.find('.capacity-breakdown-card__available-body').exists()).toBe(true); - }); - - it('Hides available capacity text, legend, stack', () => { - wrapper.setProps({ capacityAvailable: null }); - expect(wrapper.find(TotalCapacityBody).exists()).toBe(true); - expect(wrapper.find('.capacity-breakdown-card__available-body').exists()).toBe(false); - const breakdownChart = wrapper.find(BreakdownChart); - expect(breakdownChart.exists()).toBe(true); - expect(breakdownChart.props().data.length).toBe(6); - expect(breakdownChart.props().legends.length).toBe(6); - expect(breakdownChart.props().ocsVersion).toBeFalsy(); - }); - - it('Hides others capacity text, legend, stack', () => { - wrapper.setProps({ - top5MetricsStats: getStackChartStats(breakdownData.top5.slice(0, 4), breakdownData.humanize), - capacityAvailable: null, - }); - const breakdownChart = wrapper.find(BreakdownChart); - expect(breakdownChart.exists()).toBe(true); - expect(breakdownChart.props().data.length).toBe(4); - expect(breakdownChart.props().legends.length).toBe(4); - expect(breakdownChart.props().ocsVersion).toBeFalsy(); - }); - - it('Shows usage data warning', () => { - wrapper.setProps({ capacityUsed: '0' }); - expect(wrapper.text()).toBe('Not enough usage data'); - }); - - it('Shows loading state', () => { - wrapper.setProps({ isLoading: true }); - expect(wrapper.find(BreakdownChartLoading).exists()).toBe(true); - }); - - it('Shows not available', () => { - wrapper.setProps({ capacityUsed: null, top5MetricsStats: [], hasLoadError: true }); - expect(wrapper.text()).toBe('Not available'); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-chart.spec.tsx b/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-chart.spec.tsx deleted file mode 100644 index 3c3c3b21bc11..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__tests__/breakdown-chart.spec.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import * as React from 'react'; -import { ShallowWrapper, shallow } from 'enzyme'; -import { Link } from 'react-router-dom'; -import { Chart, ChartBar } from '@patternfly/react-charts'; -import { Tooltip } from '@patternfly/react-core'; -import { - addAvailable, - getStackChartStats, - getLegends, -} from '../components/dashboards/common/capacity-breakdown/utils'; -import { breakdownData } from '../__mocks__/breakdown-data'; -import { - BreakdownChart, - BreakdownChartProps, - LinkableLegend, - LinkableLegendProps, -} from '../components/dashboards/common/capacity-breakdown/breakdown-chart'; - -const stackData = getStackChartStats(breakdownData.top5, breakdownData.humanize); - -const chartData = addAvailable( - stackData, - breakdownData.capacityAvailable, - breakdownData.metricTotal, - breakdownData.humanize, - (key) => key, -); - -const legends = getLegends(chartData); - -describe('', () => { - let wrapper: ShallowWrapper; - - beforeEach(() => { - wrapper = shallow( - , - ); - }); - - it('Renders ', () => { - const chart = wrapper.find(Chart); - expect(chart.exists()).toBe(true); - expect(chart.find(ChartBar).length).toBe(chartData.length); - }); -}); - -describe('', () => { - let wrapper: ShallowWrapper; - - beforeEach(() => { - wrapper = shallow(); - }); - - it('Renders Link', () => { - expect(wrapper.find(Link).exists()).toBe(true); - }); - - it('Returns tooltip', () => { - wrapper.setProps({ - datum: { name: 'Other', labelId: 'Other', link: '#', labels: { fill: '#000' } }, - }); - expect(wrapper.find(Tooltip).exists()).toBe(true); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/src/__tests__/flexible-scaling.spec.ts b/frontend/packages/ceph-storage-plugin/src/__tests__/flexible-scaling.spec.ts deleted file mode 100644 index 7a5cf0d1bf78..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__tests__/flexible-scaling.spec.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { isFlexibleScaling } from '../utils/install'; - -describe('isFlexibleScaling', () => { - describe('for 0 AZ', () => { - it('return truthy with 3 nodes', () => { - expect(isFlexibleScaling(3, 0)).toBe(true); - }); - it('return truthy with more than 3 nodes', () => { - expect(isFlexibleScaling(4, 0)).toBe(true); - }); - it('return falsy with less than 3 nodes', () => { - expect(isFlexibleScaling(2, 0)).toBe(false); - }); - it('return falsy with 0 nodes', () => { - expect(isFlexibleScaling(0, 0)).toBe(false); - }); - }); - - describe('for 1 AZ', () => { - it('return truthy with 3 nodes', () => { - expect(isFlexibleScaling(3, 1)).toBe(true); - }); - it('return truthy with more than 3 nodes', () => { - expect(isFlexibleScaling(4, 1)).toBe(true); - }); - it('return falsy with less than 3 nodes', () => { - expect(isFlexibleScaling(2, 1)).toBe(false); - }); - it('return falsy with 0 nodes', () => { - expect(isFlexibleScaling(0, 1)).toBe(false); - }); - }); - - describe('for 2 AZ', () => { - it('return truthy with 3 nodes', () => { - expect(isFlexibleScaling(3, 2)).toBe(true); - }); - it('return truthy with more than 3 nodes', () => { - expect(isFlexibleScaling(4, 2)).toBe(true); - }); - it('return falsy with less than 3 nodes', () => { - expect(isFlexibleScaling(2, 2)).toBe(false); - }); - it('return falsy with 0 nodes', () => { - expect(isFlexibleScaling(0, 2)).toBe(false); - }); - }); - - describe('for 3 AZ', () => { - it('returns falsy with 3 nodes', () => { - expect(isFlexibleScaling(3, 3)).toBe(false); - }); - it('return falsy with more than 3 nodes', () => { - expect(isFlexibleScaling(4, 3)).toBe(false); - }); - it('return falsy with less than 3 nodes', () => { - expect(isFlexibleScaling(2, 3)).toBe(false); - }); - it('return falsy with 0 nodes', () => { - expect(isFlexibleScaling(0, 3)).toBe(false); - }); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-breakdown-card.spec.tsx b/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-breakdown-card.spec.tsx deleted file mode 100644 index f33315ea02af..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-breakdown-card.spec.tsx +++ /dev/null @@ -1,69 +0,0 @@ -import * as React from 'react'; -import { ShallowWrapper, shallow } from 'enzyme'; -import { CardBody, CardHeader, CardTitle, Select } from '@patternfly/react-core'; -import { DashboardItemProps } from '@console/internal/components/dashboard/with-dashboard-resources'; -import { PROJECTS, STORAGE_CLASSES, PODS } from '../constants'; -import { BreakdownCard } from '../components/dashboards/persistent-external/breakdown-card'; -import { dashboardData } from '../__mocks__/independent-mode-dashboard-data'; -import { BreakdownCardBody } from '../components/dashboards/common/capacity-breakdown/breakdown-body'; -import { getSelectOptions } from '../components/dashboards/common/capacity-breakdown/breakdown-dropdown'; - -describe('BreakdownCard', () => { - let wrapper: ShallowWrapper; - const dropdownOptions = [ - { - name: PROJECTS, - id: PROJECTS, - }, - { - name: STORAGE_CLASSES, - id: STORAGE_CLASSES, - }, - { - name: PODS, - id: PODS, - }, - ]; - - const breakdownSelectItems = getSelectOptions(dropdownOptions); - beforeEach(() => { - wrapper = shallow( - , - ).dive(); - }); - - it('Should render Card Header', () => { - expect(wrapper.find(CardHeader).exists()).toBe(true); - }); - - it('Should render Card Title', () => { - expect(wrapper.find(CardTitle).exists()).toBe(true); - }); - - it('Should render Dropdown', () => { - expect(wrapper.find(Select).exists()).toBe(true); - expect(wrapper.find(Select).props().children).toEqual(breakdownSelectItems); - }); - - it('Should render Card body', () => { - expect(wrapper.find(CardBody).exists()).toBe(true); - }); - - it('Should render Breakdown Card body', () => { - expect(wrapper.find(BreakdownCardBody).exists()).toBe(true); - expect(wrapper.find(BreakdownCardBody).props().isLoading).toBe(true); - expect(wrapper.find(BreakdownCardBody).props().hasLoadError).toBe(false); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-details-card.spec.tsx b/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-details-card.spec.tsx deleted file mode 100644 index 0999f7920aa4..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/__tests__/independent-dashboard-details-card.spec.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import * as React from 'react'; -import { ShallowWrapper, shallow } from 'enzyme'; - -import { DashboardItemProps } from '@console/internal/components/dashboard/with-dashboard-resources'; -import { CardHeader, CardTitle } from '@patternfly/react-core'; -import { DetailsCard } from '../components/dashboards/persistent-external/details-card'; -import { dashboardData } from '../__mocks__/independent-mode-dashboard-data'; - -xdescribe('DetailsCard', () => { - let wrapper: ShallowWrapper; - - beforeEach(() => { - wrapper = shallow( - , - ).dive(); - }); - - it('Should render Card Header', () => { - expect(wrapper.find(CardHeader).exists()).toBe(true); - }); - - it('Should render Card Title', () => { - expect(wrapper.find(CardTitle).exists()).toBe(true); - }); - - it('Should render details properly', () => { - expect(wrapper.find('[data-test-id="cluster-name"]').text()).toEqual('foo'); - expect(wrapper.find('[data-test-id="cluster-subscription"]').text()).toEqual('fooVersion'); - }); -}); diff --git a/frontend/packages/ceph-storage-plugin/src/actions/actions.ts b/frontend/packages/ceph-storage-plugin/src/actions/actions.ts deleted file mode 100644 index 747b93100b81..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/actions/actions.ts +++ /dev/null @@ -1,54 +0,0 @@ -import i18next from 'i18next'; -import { K8sResourceKind } from '@console/internal/module/k8s'; -import { Action } from '@console/dynamic-plugin-sdk'; -import { - addCapacityModal, - addSSCapacityModal, -} from '../components/modals/add-capacity-modal/add-capacity-modal'; -import { updateBlockPoolModal } from '../components/modals/block-pool-modal/update-block-pool-modal'; -import editBucketClassModal from '../components/bucket-class/modals/edit-backingstore-modal'; -import { StoragePoolKind, StorageSystemKind } from '../types'; - -export const CephStorageCSVActions = { - AddCapacity: (resource: K8sResourceKind): Action => { - return { - id: 'add-capacity', - label: i18next.t('ceph-storage-plugin~Add Capacity'), - insertBefore: 'edit-csv', - cta: () => { - addCapacityModal({ ocsConfig: resource }); - }, - }; - }, - EditBlackPool: (resource: StoragePoolKind): Action => { - return { - id: 'edit-block-pool', - label: i18next.t('ceph-storage-plugin~Edit BlockPool'), - insertBefore: 'edit-csv', - cta: () => { - updateBlockPoolModal({ blockPoolConfig: resource }); - }, - }; - }, - EditBucketClassResources: (resource): Action => { - return { - id: 'edit-bucket-class', - label: i18next.t('ceph-storage-plugin~Edit Bucket Class Resources'), - insertBefore: 'edit-csv', - cta: () => { - editBucketClassModal({ bucketClass: resource, modalClassName: 'nb-modal' }); - }, - }; - }, -}; - -export const AddCapacityStorageSystem = (resource: StorageSystemKind): Action => { - return { - id: 'add-capacity-storage-system', - label: i18next.t('ceph-storage-plugin~Add Capacity'), - insertBefore: 'edit-csv', - cta: () => { - addSSCapacityModal({ storageSystem: resource }); - }, - }; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/actions/csv-actions.ts b/frontend/packages/ceph-storage-plugin/src/actions/csv-actions.ts deleted file mode 100644 index 1d4cce065e54..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/actions/csv-actions.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { useMemo } from 'react'; -import { useK8sModel } from '@console/shared/src/hooks/useK8sModel'; -import { referenceFor, referenceForModel } from '@console/internal/module/k8s'; -import { useFlag } from '@console/shared'; -import { AddCapacityStorageSystem, CephStorageCSVActions } from './actions'; -import { - CephBlockPoolModel, - NooBaaBucketClassModel, - OCSServiceModel, - StorageSystemModel, -} from '../models'; -import { FEATURES } from '../features'; - -export const useCsvActions = ({ resource }) => { - const [k8sModel, inFlight] = useK8sModel(referenceFor(resource)); - const disableAddCapacity = useFlag(FEATURES.CSV_ACTIONS); - - const actions = useMemo( - () => [ - ...(referenceForModel(k8sModel) === referenceForModel(OCSServiceModel) - ? [CephStorageCSVActions.AddCapacity(resource)] - : []), - ...(referenceForModel(k8sModel) === referenceForModel(CephBlockPoolModel) - ? [CephStorageCSVActions.EditBlackPool(resource)] - : []), - ...(referenceForModel(k8sModel) === referenceForModel(NooBaaBucketClassModel) - ? [CephStorageCSVActions.EditBucketClassResources(resource)] - : []), - ...(referenceForModel(k8sModel) === referenceForModel(StorageSystemModel) && - !disableAddCapacity - ? [AddCapacityStorageSystem(resource)] - : []), - ], - [k8sModel, resource, disableAddCapacity], - ); - - return useMemo(() => [actions, !inFlight, undefined], [actions, inFlight]); -}; diff --git a/frontend/packages/ceph-storage-plugin/src/actions/index.ts b/frontend/packages/ceph-storage-plugin/src/actions/index.ts deleted file mode 100644 index d4507f8112a0..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/actions/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './csv-actions'; diff --git a/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.scss b/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.scss deleted file mode 100644 index e10d3ac77a54..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.scss +++ /dev/null @@ -1,4 +0,0 @@ -.ceph-attach-obc__subgroup { - margin-left: var(--pf-global--spacer--md); - margin-top: var(--pf-global--spacer--md); -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.tsx b/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.tsx deleted file mode 100644 index f7e77ce91f08..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/attach-obc/attach-obc-deployment.tsx +++ /dev/null @@ -1,141 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { match as Match } from 'react-router'; -import { History } from 'history'; -import { - ListDropdown, - resourceObjPath, - ButtonBar, - LoadingBox, -} from '@console/internal/components/utils'; -import { - K8sKind, - referenceForModel, - k8sCreate, - k8sPatch, - DeploymentKind, - referenceFor, -} from '@console/internal/module/k8s/'; -import { Form, FormGroup, Radio, ActionGroup, Button } from '@patternfly/react-core'; -import { useK8sGet } from '@console/internal/components/utils/k8s-get-hook'; -import { getName } from '@console/shared'; -import { NooBaaObjectBucketClaimModel } from '../../models'; -import { CreateOBCForm } from '../object-bucket-claim-page/create-obc'; -import { commonReducer, defaultState } from '../object-bucket-page/state'; -import { getAttachOBCPatch } from '../../utils'; -import './attach-obc-deployment.scss'; - -const AttachStorage: React.FC = (props) => { - const { t } = useTranslation(); - const [state, dispatch] = React.useReducer(commonReducer, defaultState); - const [createOBC, setCreateOBC] = React.useState(false); - const [selectedOBC, setSelectedOBC] = React.useState(''); - const { kindObj, namespace, resourceName, history } = props; - - const [deployment, loaded, loadError] = useK8sGet( - kindObj, - resourceName, - namespace, - ); - - const onSubmit = async (e: { preventDefault: () => void }) => { - e.preventDefault(); - try { - let obc = selectedOBC; - if (createOBC) { - dispatch({ type: 'setProgress' }); - const obj = await k8sCreate(NooBaaObjectBucketClaimModel, state.payload); - obc = getName(obj); - } - const patch = getAttachOBCPatch(obc, deployment); - const patchedObj = await k8sPatch(kindObj, deployment, patch); - dispatch({ type: 'unsetProgress' }); - history.push(`${resourceObjPath(patchedObj, referenceFor(patchedObj))}/environment`); - } catch (err) { - dispatch({ type: 'unsetProgress' }); - dispatch({ type: 'setError', message: err.message }); - } - }; - - const onRadioToggle = () => setCreateOBC((val) => !val); - - return ( -
- - - {!createOBC && ( -
- setSelectedOBC(item)} - /> -
- )} -
- - - {createOBC && ( -
- -
- )} -
- - - - - - -
- ); -}; - -const AttachStorageWrapper: React.FC = (props) => { - const { - kindObj, - kindsInFlight, - match: { params }, - } = props; - return !kindObj && kindsInFlight ? ( - - ) : ( - - ); -}; - -type AttachStorageWrapperProps = { - kindObj: K8sKind; - kindsInFlight: any; - match?: Match<{ ns: string; name: string }>; - history: History; -}; - -type AttachStorageProps = AttachStorageWrapperProps & { - namespace: string; - resourceName: string; -}; - -export default AttachStorageWrapper; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-details-page.tsx b/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-details-page.tsx deleted file mode 100644 index e53bc4a5e2be..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-details-page.tsx +++ /dev/null @@ -1,89 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { RouteComponentProps } from 'react-router'; -import { useLocation } from 'react-router-dom'; - -import { referenceForModel } from '@console/internal/module/k8s/k8s'; -import { DetailsPage } from '@console/internal/components/factory'; -import { navFactory, ResourceIcon } from '@console/internal/components/utils'; -import { useK8sWatchResource } from '@console/internal/components/utils/k8s-watch-hook'; - -import { menuActionCreator } from './block-pool-menu-action'; -import { CephClusterKind } from '../../types'; -import { cephClusterResource } from '../../resources'; -import { BlockPoolDashboard } from '../dashboards/block-pool/block-pool-dashboard'; -import { CEPH_STORAGE_NAMESPACE } from '../../constants'; -import { CephBlockPoolModel } from '../../models'; - -const BlockPoolIcon: React.FC = ({ name, kind }) => { - return ( - - - {name} - - ); -}; - -const BlockPoolDetailsPage: React.FC = (props) => { - const { poolName } = props.match.params; - const { editYaml } = navFactory; - const { t } = useTranslation(); - const location = useLocation(); - const kind = referenceForModel(CephBlockPoolModel); - - const [cephClusters] = useK8sWatchResource(cephClusterResource); - - // Overview page and YAML page - const pagesFor = React.useCallback( - () => [ - { - href: '', - // t('ceph-storage-plugin~Overview') - nameKey: 'ceph-storage-plugin~Overview', - component: BlockPoolDashboard, - }, - editYaml(), - ], - [editYaml], - ); - - const breadcrumbs = () => [ - { - name: t('ceph-storage-plugin~StorageSystems'), - path: '/odf/systems', - }, - { - name: t('ceph-storage-plugin~StorageSystem details'), - path: location.pathname.split(`/${poolName}`)[0], - }, - { - name: poolName, - path: '', - }, - ]; - - return ( - } - customData={{ tFunction: t, cephCluster: cephClusters?.[0] }} - /> - ); -}; - -type BlockPoolIconProps = { - name: string; - kind: string; -}; - -type BlockPoolDetailsPagePros = RouteComponentProps<{ poolName: string }>; - -export default BlockPoolDetailsPage; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-list-page.tsx b/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-list-page.tsx deleted file mode 100644 index 523e4739b824..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-list-page.tsx +++ /dev/null @@ -1,334 +0,0 @@ -import * as React from 'react'; -import { TFunction } from 'i18next'; -import { useTranslation } from 'react-i18next'; -import { useLocation, Link } from 'react-router-dom'; - -import { useDeepCompareMemoize, Status } from '@console/shared'; -import { StatusIconAndText } from '@console/dynamic-plugin-sdk'; -import { sortable, wrappable } from '@patternfly/react-table'; -import { Tooltip } from '@patternfly/react-core'; -import { referenceForModel, referenceFor } from '@console/internal/module/k8s'; -import { ListPage, Table, RowFunctionArgs, TableData } from '@console/internal/components/factory'; -import { - ResourceIcon, - ResourceKebab, - Kebab, - humanizeBinaryBytes, -} from '@console/internal/components/utils'; -import { usePrometheusPoll } from '@console/internal/components/graphs/prometheus-poll-hook'; -import { PrometheusEndpoint } from '@console/internal/components/graphs/helpers'; -import { StorageClassModel } from '@console/internal/models'; -import { useK8sWatchResource } from '@console/internal/components/utils/k8s-watch-hook'; - -import { menuActions, disableMenuAction } from './block-pool-menu-action'; -import { healthStateMapping } from '../dashboards/block-pool/states'; -import { CephBlockPoolModel } from '../../models'; -import { StoragePoolKind, OcsStorageClassKind, CephClusterKind } from '../../types'; -import { CEPH_STORAGE_NAMESPACE } from '../../constants'; -import { - BlockPoolColumnInfo, - getScNamesUsingPool, - getPerPoolMetrics, - isDefaultPool, -} from '../../utils/block-pool'; -import { twelveHoursdateTimeNoYear } from '../../utils/common'; -import { PopoverHelper } from '../../utils/popover-helper'; -import { getPoolQuery, StorageDashboardQuery } from '../../queries/ceph-queries'; -import { COMPRESSION_ON } from '../../constants/storage-pool-const'; -import { cephClusterResource, scResource } from '../../resources'; - -const getHeader = (t: TFunction) => () => { - const blockPoolColumnInfo = BlockPoolColumnInfo(t); - - return [ - { - title: blockPoolColumnInfo.name.title, - id: blockPoolColumnInfo.name.id, - sortField: 'metadata.name', - transforms: [sortable], - props: { className: blockPoolColumnInfo.name.classes }, - }, - { - title: blockPoolColumnInfo.status.title, - id: blockPoolColumnInfo.status.id, - props: { className: blockPoolColumnInfo.status.classes }, - }, - { - title: blockPoolColumnInfo.storageclasses.title, - id: blockPoolColumnInfo.storageclasses.id, - props: { className: blockPoolColumnInfo.storageclasses.classes }, - }, - { - title: blockPoolColumnInfo.replicas.title, - id: blockPoolColumnInfo.replicas.id, - props: { className: blockPoolColumnInfo.replicas.classes }, - }, - { - title: blockPoolColumnInfo.usedcapacity.title, - id: blockPoolColumnInfo.usedcapacity.id, - transforms: [wrappable], - props: { className: blockPoolColumnInfo.usedcapacity.classes }, - }, - { - title: blockPoolColumnInfo.mirroringstatus.title, - id: blockPoolColumnInfo.mirroringstatus.id, - transforms: [wrappable], - props: { className: blockPoolColumnInfo.mirroringstatus.classes }, - }, - { - title: blockPoolColumnInfo.overallImagehealth.title, - id: blockPoolColumnInfo.overallImagehealth.id, - transforms: [wrappable], - props: { className: blockPoolColumnInfo.overallImagehealth.classes }, - }, - { - title: blockPoolColumnInfo.compressionstatus.title, - id: blockPoolColumnInfo.compressionstatus.id, - transforms: [wrappable], - props: { className: blockPoolColumnInfo.compressionstatus.classes }, - }, - { - title: blockPoolColumnInfo.compressionsavings.title, - id: blockPoolColumnInfo.compressionsavings.id, - transforms: [wrappable], - props: { className: blockPoolColumnInfo.compressionsavings.classes }, - }, - { - title: '', - props: { className: Kebab.columnClass }, - }, - ]; -}; - -const BlockPoolTableRow: React.FC> = ({ obj, customData }) => { - const { t } = useTranslation(); - const blockPoolColumnInfo = BlockPoolColumnInfo(t); - const props: BlockPoolListRowProps = customData; - const { name } = obj.metadata; - const replica = obj.spec?.replicated?.size; - const mirroringStatus: boolean = obj.spec?.mirroring?.enabled; - const mirroringImageHealth: string = mirroringStatus - ? obj.status?.mirroringStatus?.summary?.image_health - : '-'; - const lastChecked: string = obj.status?.mirroringStatus?.lastChecked; - const formatedDateTime = lastChecked - ? twelveHoursdateTimeNoYear.format(new Date(lastChecked)) - : '-'; - const compressionStatus: boolean = obj.spec?.compressionMode === COMPRESSION_ON; - const phase = obj?.status?.phase; - - // Hooks - const poolScNames: string[] = React.useMemo( - () => getScNamesUsingPool(props?.storageClasses, name), - [name, props], - ); - - // Details page link - const to = `${props.listPagePath}/${name}`; - - // Metrics - // {poolRawCapacity: {"pool-1" : size_bytes, "pool-2" : size_bytes, ...}} - const rawCapacity: string = props.poolRawCapacity[name] - ? humanizeBinaryBytes(props.poolRawCapacity[name]).string - : '-'; - const compressionSavings: string = props.poolCompressionSavings[name] - ? humanizeBinaryBytes(props.poolCompressionSavings[name]).string - : '-'; - - return ( - <> - - - - {name} - - - - - - - - - - {replica} - - - {rawCapacity} - - - {mirroringStatus ? t('ceph-storage-plugin~Enabled') : t('ceph-storage-plugin~Disabled')} - - - - - - - - {compressionStatus ? t('ceph-storage-plugin~Enabled') : t('ceph-storage-plugin~Disabled')} - - - {compressionStatus ? compressionSavings : '-'} - - - {isDefaultPool(obj) ? ( - - - - ) : ( - - )} - - - ); -}; - -const BlockPoolList: React.FC = (props) => { - const { t } = useTranslation(); - - // Hooks - const [cephClusters] = useK8sWatchResource(cephClusterResource); - const [scResources] = useK8sWatchResource(scResource); - const memoizedSC: OcsStorageClassKind[] = useDeepCompareMemoize(scResources, true); - const poolNames: string[] = props.data.map((pool) => pool.metadata?.name); - const memoizedPoolNames = useDeepCompareMemoize(poolNames, true); - - // Metrics - const [poolRawCapacityMetrics, rawCapLoadError, rawCapLoading] = usePrometheusPoll({ - endpoint: PrometheusEndpoint.QUERY, - query: getPoolQuery(memoizedPoolNames, StorageDashboardQuery.POOL_RAW_CAPACITY_USED), - namespace: CEPH_STORAGE_NAMESPACE, - }); - - // compression queries - const [compressionSavings, compressionLoadError, compressionLoading] = usePrometheusPoll({ - endpoint: PrometheusEndpoint.QUERY, - query: getPoolQuery(poolNames, StorageDashboardQuery.POOL_COMPRESSION_SAVINGS), - namespace: CEPH_STORAGE_NAMESPACE, - }); - - const customData = React.useMemo(() => { - const poolRawCapacity: PoolMetrics = getPerPoolMetrics( - poolRawCapacityMetrics, - rawCapLoadError, - rawCapLoading, - ); - const poolCompressionSavings: PoolMetrics = getPerPoolMetrics( - compressionSavings, - compressionLoadError, - compressionLoading, - ); - return { - storageClasses: memoizedSC ?? [], - cephCluster: cephClusters?.[0], - poolRawCapacity, - poolCompressionSavings, - listPagePath: props.customData, - }; - }, [ - cephClusters, - compressionLoadError, - compressionLoading, - compressionSavings, - memoizedSC, - poolRawCapacityMetrics, - rawCapLoadError, - rawCapLoading, - props.customData, - ]); - - return ( - - ); -}; - -export const BlockPoolListPage: React.FC = (props) => { - const location = useLocation(); - const listPagePath: string = location.pathname; - const createProps = { - to: `${listPagePath}/create/~new`, - }; - return ( - - ); -}; - -export type PoolMetrics = { - [poolName: string]: string; -}; - -type BlockPoolListRowProps = { - cephCluster: CephClusterKind; - storageClasses: OcsStorageClassKind[]; - poolRawCapacity: PoolMetrics; - poolCompressionSavings: PoolMetrics; - listPagePath: string; -}; - -type BlockPoolListProps = { - data: StoragePoolKind[]; - customData: string; -}; - -type BlockListPoolPageProps = { - namespace: string; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-menu-action.ts b/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-menu-action.ts deleted file mode 100644 index f24f420464d2..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/block-pool-menu-action.ts +++ /dev/null @@ -1,46 +0,0 @@ -import { TFunction } from 'i18next'; - -import { asAccessReview } from '@console/internal/components/utils'; -import { K8sKind } from '@console/internal/module/k8s'; - -import { StoragePoolKind, CephClusterKind } from '../../types'; -import { updateBlockPoolModal } from '../modals/block-pool-modal/update-block-pool-modal'; -import { deleteBlockPoolModal } from '../modals/block-pool-modal/delete-block-pool-modal'; -import { CEPH_EXTERNAL_CR_NAME } from '../../constants'; -import { isDefaultPool } from '../../utils/block-pool'; - -export const disableMenuAction = (blockPoolConfig: StoragePoolKind, cephCluster: CephClusterKind) => - blockPoolConfig?.metadata?.deletionTimestamp || - cephCluster?.metadata?.name === CEPH_EXTERNAL_CR_NAME || - isDefaultPool(blockPoolConfig); - -const editBlockPool = (kindObj: K8sKind, blockPoolConfig: StoragePoolKind, _, customData) => { - const t: TFunction = customData?.tFunction; - return { - labelKey: t('ceph-storage-plugin~Edit BlockPool'), - callback: () => updateBlockPoolModal({ kindObj, blockPoolConfig }), - accessReview: asAccessReview(kindObj, blockPoolConfig, 'patch'), - }; -}; - -const deleteBlockPool = (kindObj: K8sKind, blockPoolConfig: StoragePoolKind, _, customData) => { - const t: TFunction = customData?.tFunction; - return { - labelKey: t('ceph-storage-plugin~Delete BlockPool'), - callback: () => deleteBlockPoolModal({ kindObj, blockPoolConfig }), - accessReview: asAccessReview(kindObj, blockPoolConfig, 'patch'), - }; -}; - -export const menuActions = [editBlockPool, deleteBlockPool]; - -export const menuActionCreator = ( - kindObj: K8sKind, - blockPoolConfig: StoragePoolKind, - resource?: any, - customData?: any, -) => { - if (!disableMenuAction(blockPoolConfig, customData?.cephCluster)) - return menuActions.map((action) => action(kindObj, blockPoolConfig, resource, customData)); - return []; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.scss b/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.scss deleted file mode 100644 index 8360eea114ea..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.scss +++ /dev/null @@ -1,16 +0,0 @@ -.ceph-block-pool-body__input { - margin: var(--pf-global--spacer--lg) 0 !important; -} - -.ceph-block-pool__switch { - --pf-c-switch--FontSize: var(--pf-global--FontSize--xs) !important; - --pf-c-switch__input--focus__toggle--OutlineWidth: 0 !important; // To disable the switch focus border -} - -.ceph-block-pool__error-icon { - color: var(--pf-global--danger-color--100) !important; -} - -.ceph-block-pool__check-icon { - color: var(--pf-global--success-color--100) !important; -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.tsx b/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.tsx deleted file mode 100644 index 20e94913b77b..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/body.tsx +++ /dev/null @@ -1,286 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import * as _ from 'lodash'; - -import { - Alert, - Dropdown, - DropdownToggle, - DropdownItem, - EmptyState, - EmptyStateIcon, - EmptyStateBody, -} from '@patternfly/react-core'; -import { CaretDownIcon } from '@patternfly/react-icons'; -import { ListKind } from '@console/internal/module/k8s'; -import { useK8sGet } from '@console/internal/components/utils/k8s-get-hook'; -import { useFlag } from '@console/shared/src/hooks/flag'; - -import { CephClusterKind, StorageClusterKind } from '../../types'; -import { OCSServiceModel } from '../../models'; -import { CEPH_STORAGE_NAMESPACE, OCS_DEVICE_REPLICA } from '../../constants/index'; -import { checkArbiterCluster } from '../../utils/common'; -import { - PROGRESS_STATUS, - ProgressStatusProps, - BlockPoolState, - BlockPoolAction, - BlockPoolActionType, - getErrorMessage, -} from '../../utils/block-pool'; -import { POOL_STATE, POOL_PROGRESS } from '../../constants/storage-pool-const'; - -import { FEATURES } from '../../features'; - -import './body.scss'; - -export const BlockPoolStatus: React.FC = ({ status, name, error = '' }) => { - const { t } = useTranslation(); - const statusObj: ProgressStatusProps = PROGRESS_STATUS(t, name).find( - (state) => state.name === status, - ); - - return ( - <> - - - - {error ? getErrorMessage(error) : statusObj.desc} - - - - ); -}; - -export type BlockPoolStatusProps = { - status: string; - name?: string; - error?: string; -}; - -export const BlockPoolBody = (props: BlockPoolBodyPros) => { - const { cephCluster, state, dispatch, showPoolStatus, isUpdate } = props; - const { t } = useTranslation(); - - const isPoolManagementSupported = useFlag(FEATURES.OCS_POOL_MANAGEMENT); - const [storageCluster, storageClusterLoaded, storageClusterLoadError] = useK8sGet< - ListKind - >(OCSServiceModel, null, CEPH_STORAGE_NAMESPACE); - - const [isReplicaOpen, setReplicaOpen] = React.useState(false); - const [isVolumeTypeOpen, setVolumeTypeOpen] = React.useState(false); - const [availableDeviceClasses, setAvailableDeviceClasses] = React.useState([]); - - // Failure Domain - React.useEffect(() => { - if (storageClusterLoaded && !storageClusterLoadError) - dispatch({ - type: BlockPoolActionType.SET_FAILURE_DOMAIN, - payload: storageCluster?.items[0]?.status?.failureDomain || '', - }); - }, [storageCluster, storageClusterLoaded, storageClusterLoadError, dispatch]); - - // Volume Type - const deviceClasses = React.useMemo(() => cephCluster?.status?.storage?.deviceClasses ?? [], [ - cephCluster, - ]); - - const setVolumeType = React.useCallback( - (volumeType: string) => - dispatch({ type: BlockPoolActionType.SET_POOL_VOLUME_TYPE, payload: volumeType }), - [dispatch], - ); - - React.useEffect(() => { - if (deviceClasses.length && isPoolManagementSupported) { - if (state.volumeType === '') { - // Set default value - const ssdDeviceClass = - deviceClasses.find((deviceClass) => deviceClass.name === 'ssd') || {}; - Object.keys(ssdDeviceClass).length - ? setVolumeType('ssd') - : setVolumeType(deviceClasses[0].name); - } - - // Volume Type dropdown - setAvailableDeviceClasses( - deviceClasses.map((device) => { - return ( - setVolumeType(e.currentTarget.id)} - > - {device?.name.toUpperCase()} - - ); - }), - ); - } - }, [deviceClasses, dispatch, isPoolManagementSupported, setVolumeType, state.volumeType]); - - // Check storage cluster is in ready state - const isClusterReady: boolean = cephCluster?.status?.phase === POOL_STATE.READY; - - // Check storage cluster is arbiter - React.useEffect(() => { - const isArbiterCluster: boolean = checkArbiterCluster(storageCluster?.items[0]); - dispatch({ type: BlockPoolActionType.SET_POOL_ARBITER, payload: isArbiterCluster }); - if (isArbiterCluster) { - dispatch({ type: BlockPoolActionType.SET_POOL_REPLICA_SIZE, payload: '4' }); - } - }, [ - storageCluster, - storageClusterLoaded, - storageClusterLoadError, - state.isArbiterCluster, - dispatch, - ]); - - const replicaList: string[] = _.keys(OCS_DEVICE_REPLICA).filter( - (replica: string) => - (state.isArbiterCluster && replica === '4') || (!state.isArbiterCluster && replica !== '4'), - ); - - const replicaDropdownItems = replicaList.map((replica) => ( - - dispatch({ type: BlockPoolActionType.SET_POOL_REPLICA_SIZE, payload: e.currentTarget.id }) - } - > - {t('ceph-storage-plugin~{{replica}} Replication', { replica: OCS_DEVICE_REPLICA[replica] })} - - )); - - return ( - <> - {isClusterReady || !showPoolStatus ? ( - <> -
- - - dispatch({ - type: BlockPoolActionType.SET_POOL_NAME, - payload: e.currentTarget.value, - }) - } - value={state.poolName} - placeholder={t('ceph-storage-plugin~my-block-pool')} - aria-describedby={t('ceph-storage-plugin~pool-name-help')} - id="pool-name" - name="newPoolName" - data-test="new-pool-name-textbox" - disabled={isUpdate} - required - /> -
-
- - setReplicaOpen(!isReplicaOpen)} - toggleIndicator={CaretDownIcon} - isDisabled={state.isArbiterCluster} - > - {state.replicaSize - ? t('ceph-storage-plugin~{{replica}} Replication', { - replica: OCS_DEVICE_REPLICA[state.replicaSize], - }) - : t('ceph-storage-plugin~Select replication')} - - } - isOpen={isReplicaOpen} - dropdownItems={replicaDropdownItems} - onSelect={() => setReplicaOpen(false)} - id="pool-replica-size" - /> -
- {isPoolManagementSupported && ( -
- - setVolumeTypeOpen(!isVolumeTypeOpen)} - toggleIndicator={CaretDownIcon} - isDisabled={isUpdate} - > - {state.volumeType.toUpperCase() || t('ceph-storage-plugin~Select volume type')} - - } - isOpen={isVolumeTypeOpen} - dropdownItems={availableDeviceClasses} - onSelect={() => setVolumeTypeOpen(false)} - id="pool-volume-type" - /> -
- )} -
- -
- -
-
- {state.isCompressed && ( - - )} - - ) : ( - - )} - - ); -}; - -export type BlockPoolBodyPros = { - cephCluster?: CephClusterKind; - state: BlockPoolState; - showPoolStatus: boolean; - dispatch: React.Dispatch; - isUpdate?: boolean; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.scss b/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.scss deleted file mode 100644 index 4be2ff8606e3..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.scss +++ /dev/null @@ -1,5 +0,0 @@ -.ceph-create-block-pool__form { - max-width: 80%; - margin-left: var(--pf-global--spacer--lg); - margin-right: var(--pf-global--spacer--lg); -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.tsx b/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.tsx deleted file mode 100644 index 4cbfb81503a5..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/create-block-pool.tsx +++ /dev/null @@ -1,137 +0,0 @@ -import * as React from 'react'; -import { match as RouteMatch } from 'react-router'; -import { useTranslation } from 'react-i18next'; - -import { useK8sWatchResource } from '@console/internal/components/utils/k8s-watch-hook'; -import { useDeepCompareMemoize } from '@console/shared'; -import { StatusBox } from '@console/internal/components/utils/status-box'; -import { history } from '@console/internal/components/utils'; -import { Button } from '@patternfly/react-core'; -import { k8sCreate } from '@console/internal/module/k8s'; -import { Modal } from '@console/shared/src/components/modal'; - -import { BlockPoolBody } from './body'; -import { BlockPoolFooter } from './footer'; -import { CephClusterKind, StoragePoolKind } from '../../types'; -import { cephClusterResource } from '../../resources'; -import { CEPH_EXTERNAL_CR_NAME } from '../../constants'; -import { CephBlockPoolModel } from '../../models'; -import { - blockPoolReducer, - blockPoolInitialState, - BlockPoolActionType, - getPoolKindObj, - getErrorMessage, -} from '../../utils/block-pool'; -import { POOL_STATE } from '../../constants/storage-pool-const'; - -import './create-block-pool.scss'; - -const CreateBlockPool: React.FC = ({ match }) => { - const { params, url } = match; - const { t } = useTranslation(); - - const [state, dispatch] = React.useReducer(blockPoolReducer, blockPoolInitialState); - const [cephClusters, isLoaded, loadError] = useK8sWatchResource( - cephClusterResource, - ); - - const cephCluster: CephClusterKind = useDeepCompareMemoize(cephClusters[0], true); - - // OCS create pool page url ends with ~new, ODF create pool page ends with /create/~new - const blockPoolPageUrl = params?.appName - ? url.replace('/~new', '') - : url.replace('/create/~new', ''); - - const onClose = () => { - history.goBack(); - }; - - // Create new pool - const createPool = () => { - if (cephCluster?.status?.phase === POOL_STATE.READY) { - const poolObj: StoragePoolKind = getPoolKindObj(state); - - dispatch({ type: BlockPoolActionType.SET_INPROGRESS, payload: true }); - k8sCreate(CephBlockPoolModel, poolObj) - .then(() => history.push(`${blockPoolPageUrl}/${state.poolName}`)) - .finally(() => dispatch({ type: BlockPoolActionType.SET_INPROGRESS, payload: false })) - .catch((err) => - dispatch({ - type: BlockPoolActionType.SET_ERROR_MESSAGE, - payload: getErrorMessage(err.message) || 'Could not create BlockPool.', - }), - ); - } else - dispatch({ - type: BlockPoolActionType.SET_ERROR_MESSAGE, - payload: t( - "ceph-storage-plugin~OpenShift Data Foundation's StorageCluster is not available. Try again after the StorageCluster is ready to use.", - ), - }); - }; - - if (cephCluster?.metadata.name === CEPH_EXTERNAL_CR_NAME) { - return ( - - {t('ceph-storage-plugin~Close')} - , - ]} - > - - {t( - "ceph-storage-plugin~Pool creation is not supported for OpenShift Data Foundation's external RHCS StorageSystem.", - )} - - - ); - } - - return ( - <> -
-

- {t('ceph-storage-plugin~Create BlockPool')} -

-

- {t( - 'ceph-storage-plugin~A BlockPool is a logical entity providing elastic capacity to applications and workloads. Pools provide a means of supporting policies for access data resilience and storage efficiency.', - )} -

-
-
- {isLoaded && !loadError ? ( - <> - - - - ) : ( - - )} -
- - ); -}; - -type CreateBlockPoolProps = { - match: RouteMatch<{ appName: string; systemName: string }>; -}; - -export default CreateBlockPool; diff --git a/frontend/packages/ceph-storage-plugin/src/components/block-pool/footer.tsx b/frontend/packages/ceph-storage-plugin/src/components/block-pool/footer.tsx deleted file mode 100644 index f726f5c067c2..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/block-pool/footer.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; - -import { useFlag } from '@console/shared/src/hooks/flag'; -import { ButtonBar } from '@console/internal/components/utils'; -import { ActionGroup, Button } from '@patternfly/react-core'; - -import { FEATURES } from '../../features'; -import { checkRequiredValues, BlockPoolState } from '../../utils/block-pool'; - -import './create-block-pool.scss'; - -export const BlockPoolFooter = (props: BlockPoolFooterProps) => { - const { state, cancel, onConfirm } = props; - const { t } = useTranslation(); - - const isPoolManagementSupported = useFlag(FEATURES.OCS_POOL_MANAGEMENT); - - return ( - - - - - - - ); -}; - -type BlockPoolFooterProps = { - state: BlockPoolState; - cancel: () => void; - onConfirm: () => void; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/_backingstore-table.scss b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/_backingstore-table.scss deleted file mode 100644 index b402973b1cd3..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/_backingstore-table.scss +++ /dev/null @@ -1,30 +0,0 @@ -.nb-bs-table { - .pf-c-toolbar { - --pf-c-toolbar__content--PaddingLeft: 0; - } - - .pf-c-table { - border: var(--pf-global--BorderWidth--sm) solid var(--pf-global--BorderColor--100); - - &__check { - width: 8.333% !important; - - @media screen and (max-width: 768px) { - width: 16.66% !important; - } - } - } - - .co-m-pane__body { - padding: 0; - } -} - -.nb-bc-step-page-form { - .pf-c-alert.pf-m-inline.pf-m-info.co-alert { - margin-bottom: 0; - } - &--margin { - margin-bottom: var(--pf-global--spacer--lg); - } -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/backingstore-table.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/backingstore-table.tsx deleted file mode 100644 index 4a46ae38f69f..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/backingstore-table.tsx +++ /dev/null @@ -1,304 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import * as classNames from 'classnames'; -import * as _ from 'lodash'; -import { compose } from 'redux'; -import { Title, Flex, FlexItem, Button, FormGroup, Form, Alert } from '@patternfly/react-core'; -import { PlusCircleIcon } from '@patternfly/react-icons'; -import { IRow, sortable } from '@patternfly/react-table'; -import { - getNamespace, - getName, - useSelectList, - getUID, - useDeepCompareMemoize, -} from '@console/shared'; -import { Table, ListPage } from '@console/internal/components/factory'; -import { getFilteredRows } from '@console/internal/components/factory/table-data-hook'; -import { Filter } from '@console/internal/components/factory/table'; -import { ResourceLink } from '@console/internal/components/utils'; -import { referenceForModel } from '@console/internal/module/k8s'; -import { - getBucketName, - getRegion, - getBackingStoreType, - getBSLabel, -} from '../../utils/noobaa-utils'; -import CreateBackingStoreFormModal from '../create-backingstore-page/create-bs-modal'; -import { NooBaaBackingStoreModel } from '../../models'; -import { BackingStoreKind, PlacementPolicy } from '../../types'; -import './_backingstore-table.scss'; - -const tableColumnClasses = [ - '', - classNames('pf-m-hidden', 'pf-m-visible-on-sm'), - classNames('pf-m-hidden', 'pf-m-visible-on-sm'), - classNames('pf-m-hidden', 'pf-m-visible-on-sm'), -]; - -const getRows: GetRows = (rowProps, selectedItems) => { - const { - componentProps: { data }, - } = rowProps; - - const rows = data.map((bs) => { - const cells: IRow['cells'] = [ - { - title: ( - - ), - }, - { - title: getBucketName(bs) || '-', - }, - { - title: getBackingStoreType(bs) || '-', - }, - { - title: getRegion(bs) || '-', - }, - ]; - return { - cells, - selected: selectedItems?.has(bs.metadata.uid), - props: { - id: getUID(bs), - }, - }; - }); - return rows; -}; - -const BackingStoreTable: React.FC = (props) => { - const { t } = useTranslation(); - - const { - customData: { onRowsSelected, preSelected }, - data, - filters, - } = props; - const visibleRows = getFilteredRows(filters, null, data); - const visibleUIDs = React.useMemo(() => new Set(visibleRows?.map(getUID)), [visibleRows]); - const { onSelect, selectedRows, updateSelectedRows } = useSelectList( - data, - visibleUIDs, - onRowsSelected, - ); - const memoizedData = useDeepCompareMemoize(data, true); - const memoizedPreSelected = useDeepCompareMemoize(preSelected, true); - React.useEffect(() => { - if (!_.isEmpty(memoizedPreSelected) && selectedRows.size === 0 && !_.isEmpty(memoizedData)) { - const preSelectedRows = memoizedData.filter((item) => - memoizedPreSelected.includes(getUID(item)), - ); - updateSelectedRows(preSelectedRows); - } - }, [memoizedData, memoizedPreSelected, selectedRows.size, updateSelectedRows]); - - const getColumns = () => [ - { - title: t('ceph-storage-plugin~Name'), - sortField: 'metadata.name', - transforms: [sortable], - props: { className: tableColumnClasses[0] }, - }, - { - title: t('ceph-storage-plugin~Bucket Name'), - props: { className: tableColumnClasses[1] }, - }, - { - title: t('ceph-storage-plugin~Type'), - props: { className: tableColumnClasses[2] }, - }, - { - title: t('ceph-storage-plugin~Region'), - props: { className: tableColumnClasses[3] }, - }, - ]; - - return ( -
getRows(rowProps, selectedRows)} - aria-label={t('ceph-storage-plugin~BackingStore Table')} - /> - ); -}; - -const BackingStoreList: React.FC = ({ - unselectableItems = [], - onSelectBackingStore, - preSelected = [], - name, -}) => { - const flatten = compose( - (data: BackingStoreKind[]) => - _.filter(data, (item) => !unselectableItems.includes(item?.metadata?.uid)), - (resources) => resources?.[referenceForModel(NooBaaBackingStoreModel)]?.data ?? {}, - ); - - return ( -
- -
- ); -}; - -const BackingStoreSelection: React.FC = (props) => { - const { - namespace, - tier1Policy, - tier2Policy, - setSelectedTierA, - setSelectedTierB, - hideCreateBackingStore = false, - } = props; - - const { t } = useTranslation(); - - const openModal = () => CreateBackingStoreFormModal({ namespace }); - const selectedTierA = props.selectedTierA.map(getUID); - const selectedTierB = props.selectedTierB.map(getUID); - - return ( - <> -
- {!!tier2Policy && ( - - )} - - <Flex justifyContent={{ default: 'justifyContentSpaceBetween' }}> - <Title headingLevel="h3" size="xl"> - {t('ceph-storage-plugin~Tier 1 - BackingStores')}{' '} - {tier1Policy ? `(${tier1Policy})` : ''} - - {!hideCreateBackingStore && ( - - - - )} - - - - - - -

- {t('ceph-storage-plugin~{{bs, number}} BackingStore', { - bs: selectedTierA.length, - count: selectedTierA.length, - })}{' '} - {t('ceph-storage-plugin~selected')} -

- - {!!tier2Policy && ( -
- - {t('ceph-storage-plugin~Tier 2 - BackingStores')}{' '} - {tier2Policy ? `(${tier2Policy})` : ''} - - - - -

- {t('ceph-storage-plugin~{{bs, number}} BackingStore', { - bs: selectedTierB.length, - count: selectedTierB.length, - })}{' '} - {t('ceph-storage-plugin~selected')} -

- - )} - - ); -}; - -export default BackingStoreSelection; - -type BackingStoreTableProps = { - data?: BackingStoreKind[]; - customData?: { - onRowsSelected?: (arg: BackingStoreKind[]) => void; - preSelected?: string[]; - }; - filters?: Filter[]; - preSelected?: string[]; -}; - -type BackingStoreListProps = { - unselectableItems?: string[]; - onSelectBackingStore?: (arg: BackingStoreKind[]) => void; - preSelected?: string[]; - name?: string; -}; - -type BackingStoreSelectionProps = { - namespace: string; - selectedTierA: BackingStoreKind[]; - setSelectedTierA: (arg: BackingStoreKind[]) => void; - selectedTierB: BackingStoreKind[]; - setSelectedTierB: (arg: BackingStoreKind[]) => void; - tier1Policy: PlacementPolicy; - tier2Policy: PlacementPolicy; - hideCreateBackingStore?: boolean; -}; - -type GetRows = ( - rowProps: { componentProps: { data: BackingStoreKind[] } }, - selectedItems: Set, -) => { cells: IRow['cells']; selected: boolean; props: { id: string } }[]; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.scss b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.scss deleted file mode 100644 index db89d938f8f6..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.scss +++ /dev/null @@ -1,93 +0,0 @@ -/* Step pages */ -.nb-create-bc-step-page { - display: block; - width: 100%; -} - -.nb-create-bc-step-page-form__radio { - padding-bottom: var(--pf-global--spacer--md); -} -.nb-bc-bs-table__data { - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.nb-create-bc-step-page-form__dropdown { - display: block; - min-height: 12rem; -} - -.nb-create-bc-list--indent { - margin-left: var(--pf-global--spacer--lg); - width: fit-content; -} -.nb-create-bc-step-page-form { - display: block; - width: 80%; - margin-right: auto; - margin-bottom: var(--pf-global--spacer--lg); -} - -.nb-create-bc-step-page-form__element { - margin-top: var(--pf-global--spacer--md); - margin-bottom: var(--pf-global--spacer--md); - cursor: default; -} - -.nb-create-bc-step-page-form__element--light-text { - margin-top: var(--pf-global--spacer--sm); - margin-bottom: var(--pf-global--spacer--sm); - color: var(--pf-global--Color--400); -} - -.nb-bc-step-page-form__element--short { - width: 30%; - display: flex; - margin-top: var(--pf-global--spacer--md); -} - -.nb-bc-bs-page__table--short { - width: 90%; - max-height: 50vh; - overflow-y: auto; -} - -/* Review page */ -.nb-create-bc-step-page-review__item { - margin-bottom: var(--pf-global--spacer--lg); - margin-top: var(--pf-global--spacer--lg); -} - -.nb-create-bc-step-page-review__item-header { - color: var(--pf-global--Color--400); -} - -.nb-create-bc-step-page-review__item-tier1 { - margin-bottom: var(--pf-global--spacer--lg); -} - -.nb-create-bc-step-page__info { - margin-bottom: var(--pf-global--spacer--lg); -} - -.nb-create-bc-step-page--danger { - margin-bottom: var(--pf-global--spacer--lg); -} - -.nb-bc-step-page-form__title { - margin-bottom: var(--pf-global--spacer--md); -} - -.nb-create-bc-header { - padding: var(--pf-global--spacer--lg) 0 var(--pf-global--spacer--md) var(--pf-global--spacer--md); -} - -.nb-create-bc-wizard { - overflow-y: auto; -} - -.nb-bc-create-review__selected-stores { - padding-top: var(--pf-global--spacer--sm); - padding-bottom: var(--pf-global--spacer--sm); -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.tsx deleted file mode 100644 index d8f6d429d8c3..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/create-bc.tsx +++ /dev/null @@ -1,322 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { RouteComponentProps } from 'react-router'; -import { Title, Wizard, WizardStep } from '@patternfly/react-core'; -import { - apiVersionForModel, - k8sCreate, - k8sGet, - referenceForModel, -} from '@console/internal/module/k8s'; -import { history } from '@console/internal/components/utils/router'; -import { ClusterServiceVersionModel } from '@console/operator-lifecycle-manager'; -import { getName } from '@console/shared'; -import { useFlag } from '@console/shared/src/hooks/flag'; -import GeneralPage from './wizard-pages/general-page'; -import PlacementPolicyPage from './wizard-pages/placement-policy-page'; -import BackingStorePage from './wizard-pages/backingstore-page'; -import ReviewPage from './wizard-pages/review-page'; -import { initialState, reducer, State } from './state'; -import './create-bc.scss'; -import { NamespacePolicyPage } from './wizard-pages/namespace-policy-page'; -import { SingleNamespaceStorePage } from './wizard-pages/namespace-store-pages/single-namespace-store'; -import { CacheNamespaceStorePage } from './wizard-pages/namespace-store-pages/cache-namespace-store'; -import { MultiNamespaceStorePage } from './wizard-pages/namespace-store-pages/multi-namespace-store'; -import { BucketClassType, NamespacePolicyType } from '../../constants/bucket-class'; -import { validateBucketClassName, validateDuration } from '../../utils/bucket-class'; -import { NooBaaBucketClassModel } from '../../models'; -import { PlacementPolicy } from '../../types'; -import { ODF_MODEL_FLAG, CEPH_STORAGE_NAMESPACE } from '../../constants'; - -enum CreateStepsBC { - GENERAL = 'GENERAL', - PLACEMENT = 'PLACEMENT', - RESOURCES = 'RESOURCES', - REVIEW = 'REVIEW', -} - -const CreateBucketClass: React.FC = ({ match }) => { - const { t } = useTranslation(); - const [state, dispatch] = React.useReducer(reducer, initialState); - const { ns = CEPH_STORAGE_NAMESPACE, appName } = match.params; - const [clusterServiceVersion, setClusterServiceVersion] = React.useState(null); - const isODF = useFlag(ODF_MODEL_FLAG); - - React.useEffect(() => { - k8sGet(ClusterServiceVersionModel, appName, ns) - .then((clusterServiceVersionObj) => { - setClusterServiceVersion(clusterServiceVersionObj); - }) - .catch(() => setClusterServiceVersion(null)); - }, [appName, ns]); - - const getNamespaceStorePage = () => { - switch (state.namespacePolicyType) { - case NamespacePolicyType.SINGLE: - return ; - case NamespacePolicyType.CACHE: - return ; - case NamespacePolicyType.MULTI: - return ; - default: - return null; - } - }; - - const getPayload = (currentState: State) => { - const metadata = { - apiVersion: apiVersionForModel(NooBaaBucketClassModel), - kind: NooBaaBucketClassModel.kind, - metadata: { - name: currentState.bucketClassName, - namespace: ns, - }, - }; - let payload = null; - if (currentState.bucketClassType === BucketClassType.STANDARD) { - payload = { - ...metadata, - spec: { - placementPolicy: { - tiers: [ - { - placement: currentState.tier1Policy, - backingStores: currentState.tier1BackingStore.map(getName), - }, - ], - }, - }, - }; - if (currentState.tier2Policy) { - payload.spec.placementPolicy.tiers.push({ - placement: currentState.tier2Policy, - backingStores: currentState.tier2BackingStore.map(getName), - }); - } - } else { - switch (currentState.namespacePolicyType) { - case NamespacePolicyType.SINGLE: - payload = { - ...metadata, - spec: { - namespacePolicy: { - type: currentState.namespacePolicyType, - single: { - resource: getName(currentState.readNamespaceStore[0]), - }, - }, - }, - }; - break; - case NamespacePolicyType.MULTI: - payload = { - ...metadata, - spec: { - namespacePolicy: { - type: state.namespacePolicyType, - multi: { - writeResource: getName(state.writeNamespaceStore[0]), - readResources: state.readNamespaceStore.map(getName), - }, - }, - }, - }; - break; - case NamespacePolicyType.CACHE: - payload = { - ...metadata, - spec: { - namespacePolicy: { - type: currentState.namespacePolicyType, - cache: { - caching: { - ttl: currentState.timeToLive, - }, - hubResource: getName(currentState.hubNamespaceStore), - }, - }, - placementPolicy: { - tiers: [ - { - backingStores: [getName(currentState.cacheBackingStore)], - }, - ], - }, - }, - }; - break; - default: - return null; - } - } - return payload; - }; - const finalStep = () => { - dispatch({ type: 'setIsLoading', value: true }); - const payload = getPayload(state); - const promiseObj = k8sCreate(NooBaaBucketClassModel, payload); - promiseObj - .then((obj) => { - const resourcePath = `${referenceForModel(NooBaaBucketClassModel)}/${getName(obj)}`; - dispatch({ type: 'setIsLoading', value: false }); - isODF - ? history.push(`/odf/resource/${resourcePath}`) - : history.push( - `/k8s/ns/${ns}/clusterserviceversions/${getName( - clusterServiceVersion, - )}/${resourcePath}`, - ); - }) - .catch((err) => { - dispatch({ type: 'setIsLoading', value: false }); - dispatch({ type: 'setError', value: err.message }); - }); - }; - - const backingStoreNextConditions = () => { - if (state.tier1BackingStore.length === 0) return false; - if (state.tier1Policy === PlacementPolicy.Mirror && state.tier1BackingStore.length < 2) - return false; - if (state.tier2Policy === PlacementPolicy.Mirror && state.tier2BackingStore.length < 2) - return false; - if (!!state.tier2Policy && state.tier2BackingStore.length === 0) return false; - return true; - }; - - const namespaceStoreNextConditions = () => { - if (state.namespacePolicyType === NamespacePolicyType.SINGLE) { - return state.readNamespaceStore.length === 1 && state.writeNamespaceStore.length === 1; - } - if (state.namespacePolicyType === NamespacePolicyType.CACHE) { - return ( - !!state.hubNamespaceStore && !!state.cacheBackingStore && validateDuration(state.timeToLive) - ); - } - if (state.namespacePolicyType === NamespacePolicyType.MULTI) { - return state.readNamespaceStore.length >= 1 && state.writeNamespaceStore.length === 1; - } - return false; - }; - - const creationConditionsSatisfied = () => { - return ( - (state.bucketClassType === BucketClassType.STANDARD - ? backingStoreNextConditions() - : namespaceStoreNextConditions()) && !!state.bucketClassName - ); - }; - - const [currentStep, setCurrentStep] = React.useState(1); - const [stepsReached, setStepsReached] = React.useState(1); - - const StepPositionMap = Object.entries(CreateStepsBC).reduce((acc, cur, index) => { - acc[cur[0]] = index + 1; - return acc; - }, {}); - - const canJumpToHelper = (that) => { - const currentId = StepPositionMap[that.id]; - if (currentId === currentStep && !that.enableNext) { - setStepsReached(currentId); - } - return stepsReached >= currentId; - }; - - const steps: WizardStep[] = [ - { - id: CreateStepsBC.GENERAL, - name: t('ceph-storage-plugin~General'), - component: , - enableNext: validateBucketClassName(state.bucketClassName.trim()), - get canJumpTo() { - return canJumpToHelper(this); - }, - }, - { - id: CreateStepsBC.PLACEMENT, - name: t('ceph-storage-plugin~Placement Policy'), - component: - state.bucketClassType === BucketClassType.STANDARD ? ( - - ) : ( - - ), - enableNext: - state.bucketClassType === BucketClassType.STANDARD - ? !!state.tier1Policy - : !!state.namespacePolicyType, - get canJumpTo() { - return canJumpToHelper(this); - }, - }, - { - id: CreateStepsBC.RESOURCES, - name: t('ceph-storage-plugin~Resources'), - component: - state.bucketClassType === BucketClassType.STANDARD ? ( - - ) : ( - getNamespaceStorePage() - ), - enableNext: - state.bucketClassType === BucketClassType.STANDARD - ? backingStoreNextConditions() - : namespaceStoreNextConditions(), - get canJumpTo() { - return canJumpToHelper(this); - }, - }, - { - id: CreateStepsBC.REVIEW, - name: t('ceph-storage-plugin~Review'), - component: , - nextButtonText: t('ceph-storage-plugin~Create BucketClass'), - enableNext: creationConditionsSatisfied(), - get canJumpTo() { - return canJumpToHelper(this); - }, - }, - ]; - - return ( - <> -
- - {t('ceph-storage-plugin~Create new BucketClass')} - -

- {t( - 'ceph-storage-plugin~BucketClass is a CRD representing a class for buckets that defines tiering policies and data placements for an OBC.', - )} -

-
-
- history.goBack()} - onNext={({ id }) => { - setCurrentStep(currentStep + 1); - const idIndexPlusOne = StepPositionMap[id]; - const newStepHigherBound = - stepsReached < idIndexPlusOne ? idIndexPlusOne : stepsReached; - setStepsReached(newStepHigherBound); - }} - onBack={() => { - setCurrentStep(currentStep - 1); - }} - onGoToStep={(newStep) => { - setCurrentStep(StepPositionMap[newStep.id]); - }} - /> -
- - ); -}; - -type CreateBCProps = RouteComponentProps<{ ns?: string; appName?: string }>; - -export default CreateBucketClass; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/_bs-modal.scss b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/_bs-modal.scss deleted file mode 100644 index 764c533181e4..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/_bs-modal.scss +++ /dev/null @@ -1,36 +0,0 @@ -.nb-bc-modal { - max-height: 70vh; - overflow-y: auto; - - .modal-body { - background-color: var(--pf-global--BackgroundColor--100); - } - - @media screen and (max-width: 426px) { - max-height: 73vh; - } - - .co-m-nav-title { - padding: 0; - margin: 0; - } - - &__text{ - padding-bottom: var(--pf-global--spacer--md); - } -} - -.nb-create-bc-step-page-form__dropdown { - min-height: 12rem; -} - -.nb-edit-modal__save-btn { - margin-left: var(--pf-global--spacer--md); -} - -.nb-modal { - width: 53%; - @media screen and (max-width: 426px) { - width: unset; - } -} diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/edit-backingstore-modal.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/edit-backingstore-modal.tsx deleted file mode 100644 index 0976f88921cf..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/modals/edit-backingstore-modal.tsx +++ /dev/null @@ -1,291 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { TFunction } from 'i18next'; -import { ActionGroup, Button } from '@patternfly/react-core'; -import { k8sUpdate, K8sKind } from '@console/internal/module/k8s'; -import { - ModalTitle, - ModalBody, - ModalFooter, - ModalComponentProps, - createModalLauncher, - CreateModalLauncherProps, -} from '@console/internal/components/factory'; -import { withHandlePromise, HandlePromiseProps } from '@console/internal/components/utils'; -import { useK8sGet } from '@console/internal/components/utils/k8s-get-hook'; -import { getName, useFlag } from '@console/shared'; -import { - NooBaaBucketClassModel, - NooBaaBackingStoreModel, - NooBaaNamespaceStoreModel, -} from '../../../models'; -import { getBackingStoreNames, getBackingStorePolicy } from '../../../utils/noobaa-utils'; -import BackingStoreSelection from '../backingstore-table'; -import { - BackingStoreKind, - K8sListResponse, - BucketClassKind, - PlacementPolicy, - NamespaceStoreKind, -} from '../../../types'; -import './_bs-modal.scss'; -import { initialState, reducer } from '../state'; -import { NamespacePolicyType } from '../../../constants/bucket-class'; -import { SingleNamespaceStorePage } from '../wizard-pages/namespace-store-pages/single-namespace-store'; -import { CacheNamespaceStorePage } from '../wizard-pages/namespace-store-pages/cache-namespace-store'; -import { MultiNamespaceStorePage } from '../wizard-pages/namespace-store-pages/multi-namespace-store'; -import { validateDuration } from '../../../utils/bucket-class'; -import { FEATURES } from '../../../features'; - -const BucketClassEditModal = withHandlePromise< - HandlePromiseProps & BucketClassEditModalProps & ModalComponentProps & CreateModalLauncherProps ->((props) => { - const { t } = useTranslation(); - const { bucketClass, inProgress, errorMessage, handlePromise, close, cancel } = props; - const isNamespaceStoreSupported = useFlag(FEATURES.OCS_NAMESPACE_STORE); - const isNamespaceType = isNamespaceStoreSupported && !!bucketClass.spec?.namespacePolicy; - const [state, dispatch] = React.useReducer(reducer, initialState); - const [data, loaded, loadError] = useK8sGet( - NooBaaBackingStoreModel, - null, - bucketClass.metadata.namespace, - ); - - const [nsData, nsLoaded, nsLoadErr] = useK8sGet( - NooBaaNamespaceStoreModel, - null, - bucketClass.metadata.namespace, - ); - - const policyA = getBackingStorePolicy(bucketClass, 0); - const policyB = getBackingStorePolicy(bucketClass, 1); - - const getNamespaceStorePage = () => { - switch (state.namespacePolicyType) { - case NamespacePolicyType.SINGLE: - return ( - - ); - case NamespacePolicyType.CACHE: - return ( - - ); - case NamespacePolicyType.MULTI: - return ( - - ); - default: - return null; - } - }; - - // Resolve to BackingStore Objects from Name - React.useEffect(() => { - if (!isNamespaceType && loaded && !loadError) { - const bsNamesTier1 = getBackingStoreNames(bucketClass, 0); - const bsNamesTier2 = getBackingStoreNames(bucketClass, 1); - const bsTier1 = (data as K8sListResponse).items?.filter((item) => - bsNamesTier1.includes(getName(item)), - ); - const bsTier2 = (data as K8sListResponse).items?.filter((item) => - bsNamesTier2.includes(getName(item)), - ); - dispatch({ type: 'setBackingStoreTier1', value: bsTier1 }); - dispatch({ type: 'setBackingStoreTier2', value: bsTier2 }); - } - }, [data, loaded, loadError, bucketClass, isNamespaceType]); - - React.useEffect(() => { - if (isNamespaceType && nsLoaded && !nsLoadErr) { - dispatch({ type: 'setNamespacePolicyType', value: bucketClass.spec?.namespacePolicy.type }); - if (bucketClass.spec?.namespacePolicy.type === NamespacePolicyType.SINGLE) { - const singleNS = (nsData as K8sListResponse).items.find( - (item) => getName(item) === bucketClass.spec.namespacePolicy.single.resource, - ); - dispatch({ type: 'setWriteNamespaceStore', value: [singleNS] }); - dispatch({ type: 'setReadNamespaceStore', value: [singleNS] }); - } - if (bucketClass.spec?.namespacePolicy.type === NamespacePolicyType.MULTI) { - const writeNS = (nsData as K8sListResponse).items.find( - (item) => getName(item) === bucketClass.spec.namespacePolicy.multi.writeResource, - ); - const readNS = (nsData as K8sListResponse).items.filter((item) => - bucketClass.spec.namespacePolicy.multi.readResources.includes(getName(item)), - ); - dispatch({ type: 'setWriteNamespaceStore', value: [writeNS] }); - dispatch({ type: 'setReadNamespaceStore', value: readNS }); - } - if (bucketClass.spec?.namespacePolicy.type === NamespacePolicyType.CACHE) { - const hubNS = (nsData as K8sListResponse).items.find( - (item) => getName(item) === bucketClass.spec.namespacePolicy.cache.hubResource, - ); - const cacheBS = (data as K8sListResponse).items.find((item) => - bucketClass.spec.placementPolicy.tiers[0].backingStores.includes(getName(item)), - ); - dispatch({ type: 'setHubNamespaceStore', value: hubNS }); - dispatch({ type: 'setCacheBackingStore', value: cacheBS }); - } - } - }, [bucketClass, nsData, nsLoaded, nsLoadErr, isNamespaceType, data]); - - const isEnabled = (() => { - const satifiesPolicyA = (() => { - if (policyA === PlacementPolicy.Spread) { - return state.tier1BackingStore?.length >= 1; - } - if (policyA === PlacementPolicy.Mirror) { - return state.tier1BackingStore?.length >= 2; - } - return false; - })(); - const satifiesPolicyB = policyB - ? policyB === PlacementPolicy.Spread - ? state.tier2BackingStore?.length >= 1 - : state.tier2BackingStore?.length >= 2 - : true; - return satifiesPolicyA && satifiesPolicyB; - })(); - - const isEnabledNS = () => { - if (state.namespacePolicyType === NamespacePolicyType.SINGLE) { - return state.readNamespaceStore.length === 1 && state.writeNamespaceStore.length === 1; - } - if (state.namespacePolicyType === NamespacePolicyType.MULTI) { - return state.readNamespaceStore.length >= 1 && state.writeNamespaceStore.length === 1; - } - if (state.namespacePolicyType === NamespacePolicyType.CACHE) { - return ( - !!state.hubNamespaceStore && !!state.cacheBackingStore && validateDuration(state.timeToLive) - ); - } - return false; - }; - - const onSubmit = () => { - if (!isNamespaceType) { - bucketClass.spec.placementPolicy.tiers[0].backingStores = state.tier1BackingStore.map( - getName, - ); - if (policyB?.length) { - bucketClass.spec.placementPolicy.tiers[1].backingStores = state.tier2BackingStore.map( - getName, - ); - } - } else { - switch (state.namespacePolicyType) { - case NamespacePolicyType.SINGLE: - bucketClass.spec.namespacePolicy.single.resource = getName(state.readNamespaceStore[0]); - break; - case NamespacePolicyType.MULTI: - bucketClass.spec.namespacePolicy.multi.writeResource = getName( - state.writeNamespaceStore[0], - ); - bucketClass.spec.namespacePolicy.multi.readResources = state.readNamespaceStore.map( - getName, - ); - break; - case NamespacePolicyType.CACHE: - bucketClass.spec.namespacePolicy.cache.hubResource = getName(state.hubNamespaceStore); - bucketClass.spec.namespacePolicy.cache.caching.ttl = state.timeToLive; - bucketClass.spec.placementPolicy.tiers[0].backingStores = [ - getName(state.cacheBackingStore), - ]; - break; - default: - } - } - - handlePromise( - k8sUpdate( - NooBaaBucketClassModel, - bucketClass, - bucketClass.metadata.namespace, - bucketClass.metadata.name, - ), - close, - ); - }; - return ( - <> - {t('ceph-storage-plugin~Edit BucketClass Resource')} -
- -

- {t( - 'ceph-storage-plugin~{{storeType}} represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.', - { - storeType: isNamespaceType - ? t('ceph-storage-plugin~NamespaceStore') - : t('ceph-storage-plugin~BackingStore'), - }, - )} -

- {!isNamespaceType ? ( - - dispatch({ type: 'setBackingStoreTier1', value: selectedA }) - } - setSelectedTierB={(selectedB) => - dispatch({ type: 'setBackingStoreTier2', value: selectedB }) - } - hideCreateBackingStore - /> - ) : ( - getNamespaceStorePage() - )} -
-
- - - - - - - - ); -}); - -export default createModalLauncher(BucketClassEditModal); - -type BucketClassEditModalProps = { - bucketClass: BucketClassKind; -}; - -export const editBucketClass = (t: TFunction) => (_kind: K8sKind, resource: BucketClassKind) => ({ - labelKey: t('ceph-storage-plugin~Edit Bucket Class Resources'), - callback: () => createModalLauncher(BucketClassEditModal)({ bucketClass: resource as any }), -}); diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/review-utils.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/review-utils.tsx deleted file mode 100644 index bd923039e445..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/review-utils.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import * as React from 'react'; -import { TextContent, Text, TextVariants, CardBody, Card } from '@patternfly/react-core'; -import { BackingStoreKind, NamespaceStoreKind } from '../../types'; - -export const StoreCard: React.FC = ({ resources }) => - !!resources.length && ( - - - - {resources.map((res) => ( - - {res.metadata.name} - - ))} - - - - ); - -type StoreCardProp = { - resources: (NamespaceStoreKind | BackingStoreKind)[]; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/state.ts b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/state.ts deleted file mode 100644 index 2d578b7ab29f..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/state.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { BucketClassType, NamespacePolicyType, TimeUnits } from '../../constants/bucket-class'; -import { BackingStoreKind, NamespaceStoreKind, PlacementPolicy } from '../../types'; - -export const initialState = { - namespace: 'openshift-storage', - bucketClassName: '', - bucketClassType: BucketClassType.STANDARD, - namespacePolicyType: NamespacePolicyType.SINGLE, - readNamespaceStore: [], - writeNamespaceStore: [], - hubNamespaceStore: null, - cacheBackingStore: null, - timeToLive: 0, - timeUnit: TimeUnits.HOUR, - description: '', - tier1Policy: PlacementPolicy.Spread, - tier2Policy: null, - tier1BackingStore: [], - tier2BackingStore: [], - isLoading: false, - error: '', -}; - -export type State = { - namespace: string; - bucketClassName: string; - description: string; - bucketClassType: BucketClassType; - namespacePolicyType: NamespacePolicyType; - readNamespaceStore: NamespaceStoreKind[]; - writeNamespaceStore: NamespaceStoreKind[]; - hubNamespaceStore: NamespaceStoreKind; - cacheBackingStore: BackingStoreKind; - timeToLive: number; - timeUnit: TimeUnits; - tier1Policy: PlacementPolicy; - tier2Policy: PlacementPolicy; - tier1BackingStore: BackingStoreKind[]; - tier2BackingStore: BackingStoreKind[]; - isLoading: boolean; - error: string; -}; - -export type Action = - | { type: 'setNamespace'; name: string } - | { type: 'setBucketClassName'; name: string } - | { type: 'setBucketClassType'; value: BucketClassType } - | { type: 'setNamespacePolicyType'; value: NamespacePolicyType } - | { type: 'setReadNamespaceStore'; value: NamespaceStoreKind[] } - | { type: 'setWriteNamespaceStore'; value: NamespaceStoreKind[] } - | { type: 'setHubNamespaceStore'; value: NamespaceStoreKind } - | { type: 'setCacheBackingStore'; value: BackingStoreKind } - | { type: 'setTimeToLive'; value: number } - | { type: 'setTimeUnit'; value: TimeUnits } - | { type: 'setDescription'; value: string } - | { type: 'setPlacementPolicyTier1'; value: PlacementPolicy } - | { type: 'setPlacementPolicyTier2'; value: PlacementPolicy } - | { type: 'setBackingStoreTier1'; value: BackingStoreKind[] } - | { type: 'setBackingStoreTier2'; value: BackingStoreKind[] } - | { type: 'setIsLoading'; value: boolean } - | { type: 'setError'; value: string }; - -export const reducer = (state: State, action: Action) => { - switch (action.type) { - case 'setNamespace': - return Object.assign({}, state, { namespace: action.name }); - case 'setBucketClassName': - return Object.assign({}, state, { bucketClassName: action.name }); - case 'setBucketClassType': - return Object.assign({}, state, { bucketClassType: action.value }); - case 'setNamespacePolicyType': - return Object.assign({}, state, { namespacePolicyType: action.value }); - case 'setReadNamespaceStore': - return Object.assign({}, state, { readNamespaceStore: action.value }); - case 'setWriteNamespaceStore': - return Object.assign({}, state, { writeNamespaceStore: action.value }); - case 'setHubNamespaceStore': - return Object.assign({}, state, { hubNamespaceStore: action.value }); - case 'setCacheBackingStore': - return Object.assign({}, state, { cacheBackingStore: action.value }); - case 'setTimeToLive': - return Object.assign({}, state, { timeToLive: action.value }); - case 'setTimeUnit': - return Object.assign({}, state, { timeUnit: action.value }); - case 'setDescription': - return Object.assign({}, state, { description: action.value }); - case 'setPlacementPolicyTier1': - return Object.assign({}, state, { tier1Policy: action.value }); - case 'setPlacementPolicyTier2': - return Object.assign({}, state, { tier2Policy: action.value }); - case 'setBackingStoreTier1': - return Object.assign({}, state, { tier1BackingStore: action.value }); - case 'setBackingStoreTier2': - return Object.assign({}, state, { tier2BackingStore: action.value }); - case 'setIsLoading': - return Object.assign({}, state, { isLoading: action.value }); - case 'setError': - return Object.assign({}, state, { error: action.value }); - default: - return initialState; - } -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/backingstore-page.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/backingstore-page.tsx deleted file mode 100644 index 76e9f65c0e88..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/backingstore-page.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { Alert, AlertActionCloseButton } from '@patternfly/react-core'; -import { FirehoseResult, ExternalLink } from '@console/internal/components/utils'; -import { K8sResourceKind } from '@console/internal/module/k8s'; -import { Action, State } from '../state'; -import BackingStoreSelection from '../backingstore-table'; - -const BackingStorePage: React.FC = React.memo( - ({ dispatcher, state, namespace }) => { - // CR data - // CR data clones to maintain order and selection state for table rows - const { tier2Policy, tier1Policy, tier1BackingStore, tier2BackingStore } = state; - const [showHelp, setShowHelp] = React.useState(true); - const { t } = useTranslation(); - - return ( -
- {showHelp && ( - setShowHelp(false)} />} - > -

- {t( - 'ceph-storage-plugin~BackingStore represents a storage target to be used as the underlying storage for the data in Multicloud Object Gateway buckets.', - )} -

-

- {t( - 'ceph-storage-plugin~Multiple types of BackingStores are supported: asws-s3 s3-compatible google-cloud-storage azure-blob obc PVC.', - )} -

- -
- )} - dispatcher({ type: 'setBackingStoreTier1', value: [...bs] })} - setSelectedTierB={(bs) => dispatcher({ type: 'setBackingStoreTier2', value: [...bs] })} - /> -
- ); - }, -); - -export default BackingStorePage; - -type BackingStorePageProps = { - backingStores?: FirehoseResult; - dispatcher: React.Dispatch; - state: State; - namespace: string; -}; diff --git a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/general-page.tsx b/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/general-page.tsx deleted file mode 100644 index e54c4a2d77fb..000000000000 --- a/frontend/packages/ceph-storage-plugin/src/components/bucket-class/wizard-pages/general-page.tsx +++ /dev/null @@ -1,142 +0,0 @@ -import * as React from 'react'; -import { useTranslation } from 'react-i18next'; -import { - Alert, - AlertActionCloseButton, - Form, - FormGroup, - TextArea, - TextInput, - Radio, - ValidatedOptions, -} from '@patternfly/react-core'; -import { ExternalLink } from '@console/internal/components/utils'; -import { FieldLevelHelp } from '@console/internal/components/utils/field-level-help'; -import '../create-bc.scss'; -import { useFlag } from '@console/shared'; -import { Action, State } from '../state'; -import { bucketClassTypeRadios } from '../../../constants/bucket-class'; -import { validateBucketClassName } from '../../../utils/bucket-class'; -import { FEATURES } from '../../../features'; - -const GeneralPage: React.FC = ({ dispatch, state }) => { - const { t } = useTranslation(); - - const [showHelp, setShowHelp] = React.useState(true); - - const [validated, setValidated] = React.useState(ValidatedOptions.default); - - const isNamespaceStoreSupported = useFlag(FEATURES.OCS_NAMESPACE_STORE); - const onChange = (value: string) => { - dispatch({ type: 'setBucketClassName', name: value }); - if (validateBucketClassName(value)) { - setValidated(ValidatedOptions.success); - } else { - setValidated(ValidatedOptions.error); - } - }; - - return ( -
- {showHelp && ( - setShowHelp(false)} />} - > -

- {t( - 'ceph-storage-plugin~A set of policies which would apply to all buckets (OBCs) created with the specific bucket class. These policies include placement, namespace and caching', - )} -

- -
- )} -
- {isNamespaceStoreSupported && ( - - {bucketClassTypeRadios(t).map((radio) => { - const checked = radio.value === state.bucketClassType; - return ( - { - dispatch({ type: 'setBucketClassType', value: radio.value }); - }} - checked={checked} - className="nb-create-bc-step-page-form__radio" - name="bucketclasstype" - /> - ); - })} - - )} - -
    -
  • {t('ceph-storage-plugin~3-63 chars')}
  • -
  • {t('ceph-storage-plugin~Starts and ends with lowercase number or letter')}
  • -
  • - {t( - 'ceph-storage-plugin~Only lowercase letters, numbers, non-consecutive periods or hyphens', - )} -
  • -
  • {t('ceph-storage-plugin~Avoid using the form of an IP address')}
  • -
  • {t('ceph-storage-plugin~Globally unique name')}
  • -
- - } - className="nb-create-bc-step-page-form__element" - fieldId="bucketclassname-input" - label={t('ceph-storage-plugin~BucketClass name')} - helperText={t( - 'ceph-storage-plugin~A unique name for the bucket class within the project.', - )} - > - -
- -