From 719aceda7bb299816066f5cb46e48c6a6a9734e6 Mon Sep 17 00:00:00 2001 From: Grace Date: Tue, 7 Jun 2022 12:48:44 -0700 Subject: [PATCH] @W-11179348@: improving runtime of windows-unit-tests --- .circleci/config.yml | 109 ++++++++++++++++-- package.json | 4 + .../salesforce/graph/ops/MethodUtilTest.java | 39 +++++-- 3 files changed, 134 insertions(+), 18 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1481cbf0f..63e4b3ec9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -80,6 +80,9 @@ commands: - gradle/collect_test_results: reports_path: pmd-cataloger/build/reports/ test_results_path: pmd-cataloger/build/test-results/ + - gradle/collect_test_results: + reports_path: sfge/build/reports/ + test_results_path: sfge/build/test-results/ - store_test_results: path: test-results - store_artifacts: # upload nyc test coverage as artifact. @@ -218,12 +221,21 @@ jobs: # Purpose: Runs the unit tests in a Windows environment. windows-unit-tests: + # `parallelism` indicates how many simultaneous executors should be run, allowing us to split + # long-running tasks across multiple executors. + parallelism: 4 # larger values didn't seem to affect performance greatly executor: name: win/default # executor type - size: "medium" + size: "large" shell: bash.exe parameters: node-version: *node_version_param + # Specify a subset of unit tests to be run, instead of the whole suite. + # This allows us to work around the suboptimal performance of the Windows executor by running + # multiple executors in parallel where different unit tests are ran in each. + test-type: + type: string + default: all working_directory: C:\repo steps: - attach_workspace: @@ -259,12 +271,90 @@ jobs: - run: mkdir test-results # Unit tests - - run: - name: test - # Necessary to explicitly use bash, otherwise gradlew's status code won't be received and the job will hang. - shell: bash.exe - command: yarn test --reporter mocha-junit-reporter --reporter-option mochaFile=test-results/mocha/test-results.xml - when: always + - when: + condition: + equal: [ all, << parameters.test-type >> ] + steps: + - run: + name: test + # Necessary to explicitly use bash, otherwise gradlew's status code won't be received and the job will hang. + shell: bash.exe + command: yarn test --reporter mocha-junit-reporter --reporter-option mochaFile=test-results/mocha/test-results.xml + when: always + + - when: + condition: + equal: [ sfge, << parameters.test-type >> ] + steps: + - run: + name: test-sfge + # Necessary to explicitly use bash, otherwise gradlew's status code won't be received and the job will hang. + shell: bash.exe + # Identify all the test files and allocate them between parallelized executors by timing data. + # Then turn the array of tests into something that gradle can accept, and run the tests. + command: | + TESTGLOB=$(circleci tests glob "sfge/src/test/**/*Test.java" | circleci tests split --split-by=timings) + echo $TESTGLOB + TESTARRAY=($TESTGLOB) + TESTARG="" + for element in "${TESTARRAY[@]}" + do + TESTARG="$TESTARG --tests `basename $element .java`" + done + echo $TESTARG + yarn test-sfge $TESTARG + when: always + + - when: + condition: + equal: [ cli-messaging, << parameters.test-type >> ] + steps: + - run: + name: test-cli-messaging + # Necessary to explicitly use bash, otherwise gradlew's status code won't be received and the job will hang. + shell: bash.exe + # This unit test suite is fast, so we have the first parallel executor run the tests, and all others exit early. + command: | + if [[ $CIRCLE_NODE_INDEX -gt 0 ]] + then + exit 0 + fi + yarn test-cli-messaging + when: always + + - when: + condition: + equal: [ pmd-cataloger, << parameters.test-type >> ] + steps: + - run: + name: test-pmd-cataloger + # Necessary to explicitly use bash, otherwise gradlew's status code won't be received and the job will hang. + shell: bash.exe + # This unit test suite is fast, so we have the first parallel executor run the tests, and all others exit early. + command: | + if [[ $CIRCLE_NODE_INDEX -gt 0 ]] + then + exit 0 + fi + yarn test-pmd-cataloger + when: always + + - when: + condition: + equal: [ ts, << parameters.test-type >> ] + steps: + - run: + name: test-ts + # Explicitly using bash, for simplicity of required shell script. + shell: bash.exe + # This unit test suite is relatively fast, so we have the first parallel executor run the tests, and all others exit early. + command: | + if [[ $CIRCLE_NODE_INDEX -gt 0 ]] + then + exit 0 + fi + yarn test-ts --reporter mocha-junit-reporter --reporter-option mochaFile=test-results/mocha/test-results.xml + when: always # Linting - run: @@ -510,6 +600,11 @@ workflows: <<: *testing_filters requires: - setup + matrix: + parameters: + # The values of the parameters will be appended to the jobs they create. + # So we'll get "windows-unit-tests-pmd-cataloger", "windows-unit-tests-ts", etc. + test-type: [pmd-cataloger, cli-messaging, ts, sfge] - linux-tarball-test: filters: <<: *testing_filters diff --git a/package.json b/package.json index 756d0a15f..c29919e27 100644 --- a/package.json +++ b/package.json @@ -133,6 +133,10 @@ "postpack": "rm -f oclif.manifest.json", "lint": "eslint ./src --ext .ts", "test": "./gradlew test jacocoTestCoverageVerification && nyc mocha --timeout 10000 --retries 5 \"./test/**/*.test.ts\"", + "test-cli-messaging": "./gradlew cli-messaging:test", + "test-pmd-cataloger": "./gradlew pmd-cataloger:test", + "test-sfge": "./gradlew sfge:test", + "test-ts": "nyc mocha --timeout 10000 --retries 5 \"./test/**/*.test.ts\"", "coverage": "nyc report --reporter text", "version": "oclif-dev readme && git add README.md" } diff --git a/sfge/src/test/java/com/salesforce/graph/ops/MethodUtilTest.java b/sfge/src/test/java/com/salesforce/graph/ops/MethodUtilTest.java index a139421a8..6790847d9 100644 --- a/sfge/src/test/java/com/salesforce/graph/ops/MethodUtilTest.java +++ b/sfge/src/test/java/com/salesforce/graph/ops/MethodUtilTest.java @@ -175,12 +175,21 @@ public void getTargetMethods_targetMultipleMethods() { List methodVertices = MethodUtil.getTargetedMethods(g, targets); MatcherAssert.assertThat(methodVertices, hasSize(equalTo(2))); - MethodVertex firstVertex = methodVertices.get(0); - assertEquals(METHOD_WITHOUT_OVERLOADS_1, firstVertex.getName()); - - MethodVertex secondVertex = methodVertices.get(1); - assertEquals(METHOD_WITHOUT_OVERLOADS_2, secondVertex.getName()); + boolean method1Found = false; + boolean method2Found = false; + for (MethodVertex methodVertex : methodVertices) { + String name = methodVertex.getName(); + if (METHOD_WITHOUT_OVERLOADS_1.equals(name)) { + method1Found = true; + } else if (METHOD_WITHOUT_OVERLOADS_2.equals(name)) { + method2Found = true; + } else { + fail("Unexpected method name " + name); + } + } + assertTrue(method1Found); + assertTrue(method2Found); String messages = CliMessager.getInstance().getAllMessages(); assertEquals("[]", messages); } @@ -227,13 +236,21 @@ public void getTargetMethods_targetNameDupedMethods() { List methodVertices = MethodUtil.getTargetedMethods(g, targets); MatcherAssert.assertThat(methodVertices, hasSize(equalTo(2))); - MethodVertex firstVertex = methodVertices.get(0); - assertEquals(METHOD_WITH_EXTERNAL_NAME_DUPLICATION, firstVertex.getName()); - assertEquals(18, firstVertex.getBeginLine()); - MethodVertex secondVertex = methodVertices.get(1); - assertEquals(METHOD_WITH_EXTERNAL_NAME_DUPLICATION, secondVertex.getName()); - assertEquals(22, secondVertex.getBeginLine()); + boolean line18Found = false; + boolean line22Found = false; + for (MethodVertex methodVertex : methodVertices) { + assertEquals(METHOD_WITH_EXTERNAL_NAME_DUPLICATION, methodVertex.getName()); + if (methodVertex.getBeginLine() == 18) { + line18Found = true; + } else if (methodVertex.getBeginLine() == 22) { + line22Found = true; + } else { + fail("Unexpected line number " + methodVertex.getBeginLine()); + } + } + assertTrue(line18Found); + assertTrue(line22Found); String messages = CliMessager.getInstance().getAllMessages(); MatcherAssert.assertThat(