Skip to content

Commit

Permalink
Added ceph osd tree related test cases for OCS expand
Browse files Browse the repository at this point in the history
  • Loading branch information
gnehapk committed Dec 17, 2019
1 parent 031407f commit 5ba2e44
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 2 deletions.
Expand Up @@ -17,19 +17,29 @@ import {
import {
CLUSTER_STATUS,
EXPAND_WAIT,
HOST,
KIND,
NS,
OSD,
POD_NAME_PATTERNS,
SECOND,
ZONE,
} from '../../utils/consts';
import {
createOSDTreeMap,
getIds,
getNewOSDIds,
getOSDPreparePodsCnt,
getPodName,
getPodPhase,
getPodRestartCount,
isPodPresent,
nodeType,
osdTreeType,
testPodIsRunning,
testPodIsSucceeded,
verifyNodeOSDMapping,
verifyZoneOSDMapping,
} from '../../utils/helpers';

const storageCluster = JSON.parse(execSync(`kubectl get -o json -n ${NS} ${KIND}`).toString());
Expand All @@ -44,6 +54,10 @@ const expansionObjects = {
updatedClusterJSON: {},
previousPods: { items: [] as PodKind[] },
updatedPods: { items: [] as PodKind[] },
osdtree: { nodes: [] as nodeType[] },
formattedOSDTree: {},
previousOSDIds: [] as number[],
newOSDIds: [] as number[],
};

describe('Check availability of ocs cluster', () => {
Expand Down Expand Up @@ -92,6 +106,11 @@ if (clusterStatus && cephHealth) {
await verifyFields();
await click(confirmButton);

expansionObjects.osdtree = JSON.parse(
execSync(
`oc -n openshift-storage rsh $(oc -n openshift-storage get pod | grep ceph-operator| awk '{print$1}') ceph --conf=/var/lib/rook/openshift-storage/openshift-storage.config osd tree --format=json`,
).toString(),
);
const statusCol = storageClusterRow(uid).$('td:nth-child(4)');

// need to wait as cluster states fluctuates for some time. Waiting for 2 secs for the same
Expand All @@ -115,6 +134,15 @@ if (clusterStatus && cephHealth) {
expansionObjects.updatedPods = JSON.parse(
execSync(`kubectl get pod -o json -n ${NS}`).toString(),
);

expansionObjects.formattedOSDTree = createOSDTreeMap(
expansionObjects.osdtree.nodes,
) as osdTreeType;
expansionObjects.previousOSDIds = getIds(expansionObjects.osdtree.nodes, OSD);
expansionObjects.newOSDIds = getNewOSDIds(
expansionObjects.osdtree.nodes,
expansionObjects.previousOSDIds,
);
}, EXPAND_WAIT);

it('Newly added capacity should takes into effect at the storage level', () => {
Expand All @@ -125,6 +153,7 @@ if (clusterStatus && cephHealth) {
it('New osd pods corresponding to the additional capacity should be in running state', () => {
const newOSDPods = [] as PodKind[];
const newOSDPreparePods = [] as PodKind[];

expansionObjects.updatedPods.items.forEach((pod) => {
const podName = getPodName(pod);
if (!isPodPresent(expansionObjects.previousPods, podName)) {
Expand Down Expand Up @@ -176,5 +205,19 @@ if (clusterStatus && cephHealth) {
const cephHealthAfter = cephValueAfter.items[0];
expect(cephHealthAfter.status.ceph.health).not.toBe(CLUSTER_STATUS.HEALTH_ERROR);
});

it('New osds are added correctly to the availability zones/failure domains', () => {
const zones = getIds(expansionObjects.osdtree.nodes, ZONE);
expect(
verifyZoneOSDMapping(zones, expansionObjects.newOSDIds, expansionObjects.formattedOSDTree),
).toEqual(true);
});

it('New osds are added correctly to the right nodes', () => {
const nodes = getIds(expansionObjects.osdtree.nodes, HOST);
expect(
verifyNodeOSDMapping(nodes, expansionObjects.newOSDIds, expansionObjects.formattedOSDTree),
).toEqual(true);
});
});
}
@@ -1,4 +1,4 @@
export const OCS_OP = 'Openshift Container Storage Operator';
export const OCS_OP = 'OpenShift Container Storage';
export const NS = 'openshift-storage';

export const SECOND = 1000;
Expand Down Expand Up @@ -39,3 +39,6 @@ export const EXPAND_WAIT = 15 * MINUTE;
export const STORAGE_CLUSTER_TAB_CNT = 15;
export const CAPACITY_UNIT = 'TiB';
export const CAPACITY_VALUE = '2';
export const HOST = 'host';
export const ZONE = 'zone';
export const OSD = 'osd';
Expand Up @@ -2,7 +2,7 @@ import { execSync } from 'child_process';
import * as crudView from '@console/internal-integration-tests/views/crud.view';
import { ExpectedConditions as until, browser, $ } from 'protractor';
import * as _ from 'lodash';
import { POD_NAME_PATTERNS, SECOND } from './consts';
import { OSD, POD_NAME_PATTERNS, SECOND } from './consts';

export const checkIfClusterIsReady = async () => {
let stillLoading = true;
Expand Down Expand Up @@ -100,3 +100,67 @@ export const isPodPresent = (pods, podName) => {
export const getOSDPreparePodsCnt = (pods) =>
pods.items.filter((pod) => getPodName(pod).includes(POD_NAME_PATTERNS.ROOK_CEPH_OSD_PREPARE))
.length;

export const getIds = (nodes: nodeType[], type: string): number[] =>
nodes.filter((node) => node.type === type).map((node) => node.id);

export const getNewOSDIds = (nodes: nodeType[], osds: number[]): number[] =>
nodes.filter((node) => node.type === OSD && osds.indexOf(node.id) === -1).map((node) => node.id);

// created dictionary for faster acess O(1)
export const createOSDTreeMap = (nodes: nodeType[]): osdTreeType => {
const tree = {};
nodes.forEach((node) => {
tree[node.id] = node;
});
return tree;
};

export const verifyZoneOSDMapping = (
zones: number[],
osds: number[],
osdtree: osdTreeType,
): boolean => {
let filteredOsds = [];
zones.forEach((zone) => {
const hostId = osdtree[zone].children[0];
const len = osdtree[hostId].children.length;
filteredOsds = osds.filter((osd) => osd !== osdtree[hostId].children[len - 1]);
});

return filteredOsds.length === 0;
};

export const verifyNodeOSDMapping = (
nodes: number[],
osds: number[],
osdtree: osdTreeType,
): boolean => {
let filteredOsds = [];
nodes.forEach((node) => {
const len = osdtree[node].children.length;
filteredOsds = osds.filter((osd) => osd !== osdtree[node].children[len - 1]);
});

return filteredOsds.length === 0;
};

export type nodeType = {
id: number;
name: string;
type: string;
type_id: number;
children: number[];
pool_weights?: {};
device_class?: string;
crush_weight?: number;
depth?: number;
exists?: number;
status?: string;
reweight?: number;
primary_affinity?: number;
};

export type osdTreeType = {
[key: string]: nodeType;
};

0 comments on commit 5ba2e44

Please sign in to comment.