diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 144a8b71fca39..6a5db93053e3b 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -26,4 +26,5 @@ BWC_VERSION: - "2.10.1" - "2.11.0" - "2.11.1" + - "2.11.2" - "2.12.0" diff --git a/.gitattributes b/.gitattributes index 65f909981595f..b74462afb27bd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ * text eol=lf +*.jar binary *.bat binary *.zip binary *.exe binary diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0defcaf727771..8c4f4d59ea1fc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -711,6 +711,14 @@ updates: labels: - "dependabot" - "dependencies" + - directory: /modules/crypto/ + open-pull-requests-limit: 1 + package-ecosystem: gradle + schedule: + interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 87cecdf38c072..382105364c048 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -12,7 +12,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: ${{ matrix.java }} distribution: temurin diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index ec5893ea546a9..05ca93e7be2aa 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -7,7 +7,8 @@ on: # Inputs the workflow accepts. inputs: ref: - description: + description: 'Lucene ref in github.com/apache/lucene' + type: string required: false default: 'main' @@ -21,33 +22,52 @@ jobs: contents: read steps: - - uses: actions/checkout@v4 - - name: Set up JDK 17 - uses: actions/setup-java@v3 - with: - java-version: '17' - distribution: 'adopt' - - - name: Checkout Lucene + - name: Checkout Lucene ref:${{ github.event.inputs.ref }} uses: actions/checkout@v4 with: repository: 'apache/lucene' - path: lucene ref: ${{ github.event.inputs.ref }} - - name: Set hash - working-directory: ./lucene + - name: Get Java Min Version and Lucene Revision from Lucene Repository run: | - echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - id: version + java_version=`cat build.gradle | grep minJavaVersion | head -1 | grep -Eo '_[0-9]+$' | tr -d '_'` + echo "JAVA_VERSION=$java_version" >> $GITHUB_ENV + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + + - name: Setup JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' - name: Initialize gradle settings - working-directory: ./lucene run: ./gradlew localSettings - name: Publish Lucene to local maven repo. - working-directory: ./lucene - run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} + run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ env.REVISION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_SECRET_ROLE }} + aws-region: us-east-1 + + - name: Get S3 Bucket + id: get_s3_bucket + run: | + lucene_snapshots_bucket=`aws secretsmanager get-secret-value --secret-id jenkins-artifact-bucket-name --query SecretString --output text` + echo "::add-mask::$lucene_snapshots_bucket" + echo "LUCENE_SNAPSHOTS_BUCKET=$lucene_snapshots_bucket" >> $GITHUB_OUTPUT + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_S3_ROLE }} + aws-region: us-east-1 + + - name: Copy files to S3 with the aws CLI (New) + run: | + aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ steps.get_s3_bucket.outputs.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 @@ -55,6 +75,7 @@ jobs: role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} aws-region: us-west-2 - - name: Copy files to S3 with the aws CLI. + # We will remove this step once all the lucene snapshots old links are updated with the new one + - name: Copy files to S3 with the aws CLI (Old) run: | aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ secrets.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index cd75eb47946a4..800aacec98516 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -12,7 +12,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: ${{ matrix.java }} distribution: temurin diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 93bbfb8bbeab8..1b2db22c7c20b 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK 17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: adopt java-version: 17 diff --git a/CHANGELOG.md b/CHANGELOG.md index eff7656cc9f1c..444ca7c0d1ae8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,17 +12,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) -- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) -- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) -- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) -- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) -- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) -- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- [Admission Control] Integrate CPU AC with ResourceUsageCollector and add CPU AC stats to nodes/stats ([#10887](https://github.com/opensearch-project/OpenSearch/pull/10887)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -55,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) - Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -97,28 +91,43 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) -- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) -- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) -- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) -- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) -- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) - Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) - Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) +- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) ### Dependencies -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) - Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270)) @@ -127,6 +136,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) - Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) - Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.5 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163)) +- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) +- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) +- Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.15.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339)) +- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) +- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) +- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -140,8 +158,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895)) - Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) - Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) +- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) ### Deprecated @@ -159,6 +181,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) - Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) - Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) +- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) +- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) ### Security diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 02aa9319cc583..6b4634c7e791c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -40,7 +40,7 @@ application { } base { - archivesBaseName = 'opensearch-benchmarks' + archivesName = 'opensearch-benchmarks' } test.enabled = false diff --git a/build.gradle b/build.gradle index 9d62e942a4431..b1cd1d532bfeb 100644 --- a/build.gradle +++ b/build.gradle @@ -375,7 +375,7 @@ allprojects { } else { // Link to non-shadowed dependant projects project.javadoc.dependsOn "${upstreamProject.path}:javadoc" - String externalLinkName = upstreamProject.base.archivesBaseName + String externalLinkName = upstreamProject.base.archivesName String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + externalLinkName.replaceAll('\\.', '/') + '/' + dep.version String projectRelativePath = project.relativePath(upstreamProject.buildDir) project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${projectRelativePath}/docs/javadoc/" diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6d3e0f018657e..b8db8504d5b85 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -109,15 +109,15 @@ dependencies { api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.15.1' api "net.java.dev.jna:jna:5.13.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.6' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.5' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.9.4' + api 'org.apache.maven:maven-model:3.9.6' api 'com.networknt:json-schema-validator:1.0.86' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 556763333d279..13f5f8724c6f2 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -89,7 +89,7 @@ class PluginBuildPlugin implements Plugin { String name = extension1.name BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) - base.archivesBaseName = name + base.archivesName = name project.description = extension1.description if (extension1.name == null) { @@ -155,7 +155,7 @@ class PluginBuildPlugin implements Plugin { // Only configure publishing if applied externally if (extension.hasClientJar) { project.pluginManager.apply('com.netflix.nebula.maven-base-publish') - // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + // Only change Jar tasks, we don't want a -client zip so we can't change archivesName project.tasks.withType(Jar) { archiveBaseName = archiveBaseName.get() + "-client" } @@ -163,7 +163,7 @@ class PluginBuildPlugin implements Plugin { project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> - generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesBaseName}-client-${project.versions.opensearch}.pom" + generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 97e923c366598..7ec21bba18c64 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -77,7 +77,7 @@ public void apply(Project project) { } private static String getArchivesBaseName(Project project) { - return project.getExtensions().getByType(BasePluginExtension.class).getArchivesBaseName(); + return project.getExtensions().getByType(BasePluginExtension.class).getArchivesName().get(); } /**Configuration generation of maven poms. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index 3aba941875115..c9e18426966f9 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -170,6 +170,7 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); + composeExtension.getUseDockerComposeV2().set(false); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c11e0aa25fa48..74d655cfb1045 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -30,7 +30,7 @@ netty = 4.1.101.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.12 +reactor_netty = 1.1.13 reactor = 3.5.11 # client dependencies @@ -70,5 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.31.0 -opentelemetrysemconv = 1.21.0-alpha +opentelemetry = 1.32.0 +opentelemetrysemconv = 1.23.1-alpha diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 6fd5262f0ab4f..c1af5fa92e35c 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'application' base { group = 'org.opensearch.client' - archivesBaseName = 'client-benchmarks' + archivesName = 'client-benchmarks' } // Not published so no need to assemble diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 770cb3f78ca47..fdc93d8037ce6 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -39,7 +39,7 @@ apply plugin: 'opensearch.rest-resources' base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-high-level-client' + archivesName = 'opensearch-rest-high-level-client' } restResources { diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 2c437c909fb03..ff3c322c5ccf7 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,7 +40,7 @@ java { base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client' + archivesName = 'opensearch-rest-client' } dependencies { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f645b2dbbc933..4b50a996d1f9f 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -37,7 +37,7 @@ java { base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client-sniffer' + archivesName = 'opensearch-rest-client-sniffer' } dependencies { diff --git a/distribution/archives/darwin-arm64-tar/build.gradle b/distribution/archives/darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/darwin-tar/build.gradle b/distribution/archives/darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/freebsd-tar/build.gradle b/distribution/archives/freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 9418223b0a44d..ffaea5e8ca771 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -38,7 +38,7 @@ apply plugin: 'com.netflix.nebula.maven-publish' base { group = "org.opensearch.distribution.integ-test-zip" - archivesBaseName = "opensearch" + archivesName = "opensearch" } integTest { diff --git a/distribution/archives/jre-linux-tar/build.gradle b/distribution/archives/jre-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/jre-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-arm64-tar/build.gradle b/distribution/archives/linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-ppc64le-tar/build.gradle b/distribution/archives/linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-s390x-tar/build.gradle b/distribution/archives/linux-s390x-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-s390x-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-tar/build.gradle b/distribution/archives/linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-tar/build.gradle b/distribution/archives/no-jdk-darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-freebsd-tar/build.gradle b/distribution/archives/no-jdk-freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-arm64-tar/build.gradle b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-tar/build.gradle b/distribution/archives/no-jdk-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-windows-zip/build.gradle b/distribution/archives/no-jdk-windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/windows-zip/build.gradle b/distribution/archives/windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-arm64-build-context/build.gradle b/distribution/docker/docker-arm64-build-context/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/docker/docker-arm64-build-context/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-arm64-export/build.gradle b/distribution/docker/docker-arm64-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-arm64-export/build.gradle +++ b/distribution/docker/docker-arm64-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-export/build.gradle b/distribution/docker/docker-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-export/build.gradle +++ b/distribution/docker/docker-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-ppc64le-export/build.gradle b/distribution/docker/docker-ppc64le-export/build.gradle index 820a0cdf69dfc..ae7def32c4d6c 100644 --- a/distribution/docker/docker-ppc64le-export/build.gradle +++ b/distribution/docker/docker-ppc64le-export/build.gradle @@ -10,3 +10,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-s390x-export/build.gradle b/distribution/docker/docker-s390x-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-s390x-export/build.gradle +++ b/distribution/docker/docker-s390x-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-deb/build.gradle b/distribution/packages/arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-deb/build.gradle b/distribution/packages/arm64-no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-rpm/build.gradle b/distribution/packages/arm64-no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-rpm/build.gradle b/distribution/packages/arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/deb/build.gradle b/distribution/packages/deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-deb/build.gradle b/distribution/packages/no-jdk-arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-rpm/build.gradle b/distribution/packages/no-jdk-arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-deb/build.gradle b/distribution/packages/no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-rpm/build.gradle b/distribution/packages/no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/rpm/build.gradle b/distribution/packages/rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index e75267f7c4a74..aee205a24dea3 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -39,7 +39,7 @@ dependencies { } base { - archivesBaseName = 'opensearch-launchers' + archivesName = 'opensearch-launchers' } tasks.withType(CheckForbiddenApis).configureEach { diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index b61a00aba04bc..f40fb1c4b0a9f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-plugin-cli' + archivesName = 'opensearch-plugin-cli' } dependencies { diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index 99824463f14f8..92c043132c021 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -10,7 +10,7 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-upgrade-cli' + archivesName = 'opensearch-upgrade-cli' } dependencies { diff --git a/docs/build.gradle b/docs/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/docs/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 7f93135c49b76..d64cd4917707c 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index adfb521550eb9..f1d76d80bbfa3 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 +distributionSha256Sum=c16d517b50dd28b3f5838f0e844b7520b8f1eb610f2f29de7e4e04a1b7c9c79b diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 973fe30d09842..4f89b81636420 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -14,7 +14,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-common' + archivesName = 'opensearch-common' } dependencies { diff --git a/libs/common/src/main/java/org/opensearch/common/Booleans.java b/libs/common/src/main/java/org/opensearch/common/Booleans.java index 2ca061820b2eb..ab7ad37e92612 100644 --- a/libs/common/src/main/java/org/opensearch/common/Booleans.java +++ b/libs/common/src/main/java/org/opensearch/common/Booleans.java @@ -45,30 +45,72 @@ private Booleans() { /** * Parses a char[] representation of a boolean value to boolean. * - * @return true iff the sequence of chars is "true", false iff the sequence of chars is "false" or the - * provided default value iff either text is null or length == 0. + * @return true iff the sequence of chars is "true", false iff the sequence of + * chars is "false" or the provided default value iff either text is null or length == 0. * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { + if (text == null) { return defaultValue; - } else { - return parseBoolean(new String(text, offset, length)); } + + switch (length) { + case 0: + return defaultValue; + case 1: + case 2: + case 3: + default: + break; + case 4: + if (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e') { + return true; + } + break; + case 5: + if (text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e') { + return false; + } + break; + } + + throw new IllegalArgumentException( + "Failed to parse value [" + new String(text, offset, length) + "] as only [true] or [false] are allowed." + ); } /** - * returns true iff the sequence of chars is one of "true","false". + * Returns true iff the sequence of chars is one of "true", "false". * * @param text sequence to check * @param offset offset to start * @param length length to check */ public static boolean isBoolean(char[] text, int offset, int length) { - if (text == null || length == 0) { + if (text == null) { return false; } - return isBoolean(new String(text, offset, length)); + + switch (length) { + case 0: + case 1: + case 2: + case 3: + default: + return false; + case 4: + return text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'; + case 5: + return text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e'; + } } public static boolean isBoolean(String value) { @@ -91,63 +133,45 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } - private static boolean hasText(CharSequence str) { - if (str == null || str.length() == 0) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (!Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - /** + * Parses a string representation of a boolean value to boolean. + * Note the subtle difference between this and {@link #parseBoolean(char[], int, int, boolean)}; this returns the + * default value even when the value is non-zero length containing all whitespaces (possibly overlooked, but + * preserving this behavior for compatibility reasons). Use {@link #parseBooleanStrict(String, boolean)} instead. * * @param value text to parse. - * @param defaultValue The default value to return if the provided value is null. + * @param defaultValue The default value to return if the provided value is null or blank. * @return see {@link #parseBoolean(String)} */ + @Deprecated public static boolean parseBoolean(String value, boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); - } - return defaultValue; - } - - public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); + if (value == null || value.isBlank()) { + return defaultValue; } - return defaultValue; + return parseBoolean(value); } - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. - */ @Deprecated - public static Boolean parseBooleanLenient(String value, Boolean defaultValue) { - if (value == null) { // only for the null case we do that here! + public static Boolean parseBoolean(String value, Boolean defaultValue) { + if (value == null || value.isBlank()) { return defaultValue; } - return parseBooleanLenient(value, false); + return parseBoolean(value); } /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. + * Parses a string representation of a boolean value to boolean. + * Analogous to {@link #parseBoolean(char[], int, int, boolean)}. * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. + * @return true iff the sequence of chars is "true", false iff the sequence of + * chars is "false", or the provided default value iff either text is null or length == 0. + * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ - @Deprecated - public static boolean parseBooleanLenient(String value, boolean defaultValue) { - if (value == null) { + public static boolean parseBooleanStrict(String value, boolean defaultValue) { + if (value == null || value.length() == 0) { return defaultValue; } - return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); + return parseBoolean(value); } /** @@ -163,71 +187,4 @@ public static boolean isFalse(String value) { public static boolean isTrue(String value) { return "true".equals(value); } - - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead - */ - @Deprecated - public static boolean parseBooleanLenient(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { - return defaultValue; - } - if (length == 1) { - return text[offset] != '0'; - } - if (length == 2) { - return !(text[offset] == 'n' && text[offset + 1] == 'o'); - } - if (length == 3) { - return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f'); - } - if (length == 5) { - return !(text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return true; - } - - /** - * returns true if the a sequence of chars is one of "true","false","on","off","yes","no","0","1" - * - * @param text sequence to check - * @param offset offset to start - * @param length length to check - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #isBoolean(char[], int, int)} instead. - */ - @Deprecated - public static boolean isBooleanLenient(char[] text, int offset, int length) { - if (text == null || length == 0) { - return false; - } - if (length == 1) { - return text[offset] == '0' || text[offset] == '1'; - } - if (length == 2) { - return (text[offset] == 'n' && text[offset + 1] == 'o') || (text[offset] == 'o' && text[offset + 1] == 'n'); - } - if (length == 3) { - return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') - || (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's'); - } - if (length == 4) { - return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'); - } - if (length == 5) { - return (text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return false; - } - } diff --git a/server/src/test/java/org/opensearch/common/BooleansTests.java b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java similarity index 58% rename from server/src/test/java/org/opensearch/common/BooleansTests.java rename to libs/common/src/test/java/org/opensearch/common/BooleansTests.java index 7e4a0ad8e456b..578ec742d126d 100644 --- a/server/src/test/java/org/opensearch/common/BooleansTests.java +++ b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java @@ -34,10 +34,7 @@ import org.opensearch.test.OpenSearchTestCase; -import java.util.Locale; - import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class BooleansTests extends OpenSearchTestCase { private static final String[] NON_BOOLEANS = new String[] { @@ -81,8 +78,23 @@ public void testParseBooleanWithFallback() { assertFalse(Booleans.parseBoolean(null, Boolean.FALSE)); assertTrue(Booleans.parseBoolean(null, Boolean.TRUE)); + assertFalse(Booleans.parseBoolean("", false)); + assertTrue(Booleans.parseBoolean("", true)); + assertNull(Booleans.parseBoolean("", null)); + assertFalse(Booleans.parseBoolean("", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean("", Boolean.TRUE)); + + assertFalse(Booleans.parseBoolean(" \t\n", false)); + assertTrue(Booleans.parseBoolean(" \t\n", true)); + assertNull(Booleans.parseBoolean(" \t\n", null)); + assertFalse(Booleans.parseBoolean(" \t\n", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean(" \t\n", Boolean.TRUE)); + assertTrue(Booleans.parseBoolean("true", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); assertFalse(Booleans.parseBoolean("false", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); + + assertTrue(Booleans.parseBoolean(new char[0], 0, 0, true)); + assertFalse(Booleans.parseBoolean(new char[0], 0, 0, false)); } public void testParseNonBooleanWithFallback() { @@ -109,56 +121,12 @@ public void testParseNonBoolean() { } } - public void testIsBooleanLenient() { - String[] booleans = new String[] { "true", "false", "on", "off", "yes", "no", "0", "1" }; - String[] notBooleans = new String[] { "11", "00", "sdfsdfsf", "F", "T" }; - assertThat(Booleans.isBooleanLenient(null, 0, 1), is(false)); - - for (String b : booleans) { - String t = "prefix" + b + "suffix"; - assertTrue( - "failed to recognize [" + b + "] as boolean", - Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), b.length()) - ); - } - - for (String nb : notBooleans) { - String t = "prefix" + nb + "suffix"; - assertFalse("recognized [" + nb + "] as boolean", Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), nb.length())); - } - } - - public void testParseBooleanLenient() { - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomBoolean()), is(false)); - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(null, false), is(false)); - assertThat(Booleans.parseBooleanLenient(null, true), is(true)); - - assertThat( - Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(true) - ); - assertThat( - Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(false) - ); - assertThat( - Booleans.parseBooleanLenient( - randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), - randomFrom(Boolean.TRUE, Boolean.FALSE, null) - ), - is(true) - ); - assertThat(Booleans.parseBooleanLenient(null, Boolean.FALSE), is(false)); - assertThat(Booleans.parseBooleanLenient(null, Boolean.TRUE), is(true)); - assertThat(Booleans.parseBooleanLenient(null, null), nullValue()); - - char[] chars = randomFrom("true", "on", "yes", "1").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); - chars = randomFrom("false", "off", "no", "0").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(false)); - chars = randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT).toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); + public void testParseBooleanStrict() { + assertTrue(Booleans.parseBooleanStrict("true", false)); + assertFalse(Booleans.parseBooleanStrict("false", true)); + assertTrue(Booleans.parseBooleanStrict(null, true)); + assertFalse(Booleans.parseBooleanStrict("", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict("foobar", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict(" \t\n", false)); } } diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 46b6f4471731f..4850b5aea5c85 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -33,7 +33,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-core' + archivesName = 'opensearch-core' } // we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 8d9ee73a02c1d..d94be3f25b53d 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -97,6 +97,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java index 8c1efcd00c24e..a2bf7e499dee8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java @@ -53,11 +53,6 @@ public abstract class AbstractBytesReference implements BytesReference { private Integer hash = null; private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; - @Override - public int getInt(int index) { - return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; - } - @Override public int indexOf(byte marker, int from) { final int to = length(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java index ae04ddcc19eee..d7a8414935143 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.bytes; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.io.stream.StreamInput; @@ -83,6 +84,11 @@ public byte get(int index) { return bytes[offset + index]; } + @Override + public int getInt(int index) { + return (int) BitUtil.VH_BE_INT.get(bytes, offset + index); + } + @Override public int length() { return length; diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java index 9d24d3653397b..8cb65c9feb1ca 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java @@ -153,9 +153,11 @@ static BytesReference fromByteArray(ByteArray byteArray, int length) { byte get(int index); /** - * Returns the integer read from the 4 bytes (BE) starting at the given index. + * Returns the integer read from the 4 bytes (big endian) starting at the given index. */ - int getInt(int index); + default int getInt(int index) { + return ((get(index) & 0xFF) << 24) | ((get(index + 1) & 0xFF) << 16) | ((get(index + 2) & 0xFF) << 8) | (get(index + 3) & 0xFF); + } /** * Finds the index of the first occurrence of the given marker between within the given bounds. diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index 30c84708728ef..cad43f817faaf 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -8,6 +8,7 @@ package org.opensearch.core.common.io.stream; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import java.io.EOFException; @@ -121,4 +122,33 @@ public int read() throws IOException { return bytes[pos++] & 0xFF; } + @Override + public short readShort() throws IOException { + if (available() < Short.BYTES) { + throw new EOFException(); + } + short value = (short) BitUtil.VH_BE_SHORT.get(bytes, pos); + pos += Short.BYTES; + return value; + } + + @Override + public int readInt() throws IOException { + if (available() < Integer.BYTES) { + throw new EOFException(); + } + int value = (int) BitUtil.VH_BE_INT.get(bytes, pos); + pos += Integer.BYTES; + return value; + } + + @Override + public long readLong() throws IOException { + if (available() < Long.BYTES) { + throw new EOFException(); + } + long value = (long) BitUtil.VH_BE_LONG.get(bytes, pos); + pos += Long.BYTES; + return value; + } } diff --git a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java index 30c9f182fcae6..2aad068534b9d 100644 --- a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java @@ -32,6 +32,7 @@ package org.opensearch.core.util; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; @@ -103,12 +104,12 @@ public static int sortAndDedup(final BytesRefArray bytes, final int[] indices) { return uniqueCount; } + /** + * Decodes a long value written as bytes in big endian order. + * @param bytes in big endian order + * @return long value + */ public static long bytesToLong(BytesRef bytes) { - int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); - int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); - return (((long) high) << 32) | (low & 0x0ffffffffL); + return (long) BitUtil.VH_BE_LONG.get(bytes.bytes, bytes.offset); } - } diff --git a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java index 421263b883f2a..214f9292ae3a5 100644 --- a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -90,8 +89,12 @@ public void testSortByteRefArray() { } public void testBytesToLong() { - final long value = randomLong(); - final BytesReference buffer = BytesReference.fromByteBuffer(ByteBuffer.allocate(8).putLong(value).flip()); - assertThat(BytesRefUtils.bytesToLong(buffer.toBytesRef()), equalTo(value)); + long value = randomLong(); + int paddingStart = randomIntBetween(0, 10); + int paddingEnd = randomIntBetween(0, 10); + byte[] bytes = new byte[paddingStart + Long.BYTES + paddingEnd]; + ByteBuffer.wrap(bytes).putLong(paddingStart, value); + BytesRef bytesRef = new BytesRef(bytes, paddingStart, Long.BYTES); + assertThat(BytesRefUtils.bytesToLong(bytesRef), equalTo(value)); } } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index d7af8621c478a..fb51a0bb7f157 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,7 +33,6 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' -apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -62,30 +61,6 @@ dependencies { api project('spi') } -test { - doFirst { - test.classpath -= project.files(project.tasks.named('shadowJar')) - test.classpath -= project.configurations.getByName(ShadowBasePlugin.CONFIGURATION_NAME) - test.classpath += project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath - } -} - -shadowJar { - archiveClassifier.set('') - relocate 'org.objectweb', 'org.opensearch.repackage.org.objectweb' - dependencies { - include(dependency("org.ow2.asm:asm:${versions.asm}")) - include(dependency("org.ow2.asm:asm-util:${versions.asm}")) - include(dependency("org.ow2.asm:asm-tree:${versions.asm}")) - include(dependency("org.ow2.asm:asm-commons:${versions.asm}")) - include(dependency("org.ow2.asm:asm-analysis:${versions.asm}")) - } -} - -tasks.validateNebulaPom.dependsOn tasks.generatePomFileForShadowPublication -tasks.validateShadowPom.dependsOn tasks.generatePomFileForNebulaPublication -tasks.withType(AbstractPublishToMaven)*.dependsOn "generatePomFileForShadowPublication", "generatePomFileForNebulaPublication" - tasks.named("dependencyLicenses").configure { mapping from: /asm-.*/, to: 'asm' } diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 59a77870b4987..32556f907fdc0 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.publish' base { group = 'org.opensearch.plugin' - archivesBaseName = 'opensearch-scripting-painless-spi' + archivesName = 'opensearch-scripting-painless-spi' } dependencies { diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java new file mode 100644 index 0000000000000..6ddc22420416b --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * Helper for map abstractions passed to scripting processors. Throws {@link UnsupportedOperationException} for almost + * all methods. Subclasses just need to implement get and put. + */ +abstract class BasicMap implements Map { + + /** + * No-args constructor. + */ + protected BasicMap() {} + + private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; + + @Override + public boolean isEmpty() { + throw new UnsupportedOperationException("isEmpty" + UNSUPPORTED_OP_ERR); + } + + public int size() { + throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); + } + + public boolean containsKey(Object key) { + return get(key) != null; + } + + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); + } + + public Object remove(Object key) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + public void putAll(Map m) { + throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); + } + + public void clear() { + throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); + } + + public Set keySet() { + throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); + } + + public Collection values() { + throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); + } + + public Set> entrySet() { + throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object getOrDefault(Object key, Object defaultValue) { + throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); + } + + @Override + public void forEach(BiConsumer action) { + throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); + } + + @Override + public void replaceAll(BiFunction function) { + throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object putIfAbsent(String key, Object value) { + throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean remove(Object key, Object value) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean replace(String key, Object oldValue, Object newValue) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object replace(String key, Object value) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfAbsent(String key, Function mappingFunction) { + throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfPresent(String key, BiFunction remappingFunction) { + throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object compute(String key, BiFunction remappingFunction) { + throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object merge(String key, Object value, BiFunction remappingFunction) { + throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java new file mode 100644 index 0000000000000..3e6c4fef6a559 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A simple implementation of field collapsing on search responses. Note that this is not going to work as well as + * field collapsing at the shard level, as implemented with the "collapse" parameter in a search request. Mostly + * just using this to demo the oversample / truncate_hits processors. + */ +public class CollapseResponseProcessor extends AbstractProcessor implements SearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "collapse"; + static final String COLLAPSE_FIELD = "field"; + private final String collapseField; + + private CollapseResponseProcessor(String tag, String description, boolean ignoreFailure, String collapseField) { + super(tag, description, ignoreFailure); + this.collapseField = Objects.requireNonNull(collapseField); + } + + @Override + public String getType() { + return TYPE; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) { + + if (response.getHits() != null) { + if (response.getHits().getCollapseField() != null) { + throw new IllegalStateException( + "Cannot collapse on " + collapseField + ". Results already collapsed on " + response.getHits().getCollapseField() + ); + } + Map collapsedHits = new LinkedHashMap<>(); + List collapseValues = new ArrayList<>(); + for (SearchHit hit : response.getHits()) { + Object fieldValue = null; + DocumentField docField = hit.getFields().get(collapseField); + if (docField != null) { + if (docField.getValues().size() > 1) { + throw new IllegalStateException( + "Failed to collapse " + hit.getId() + ": doc has multiple values for field " + collapseField + ); + } + fieldValue = docField.getValues().get(0); + } else if (hit.getSourceAsMap() != null) { + fieldValue = hit.getSourceAsMap().get(collapseField); + } + String fieldValueString; + if (fieldValue == null) { + fieldValueString = "__missing__"; + } else { + fieldValueString = fieldValue.toString(); + } + + // Results are already sorted by sort criterion. Only keep the first hit for each field. + if (collapsedHits.containsKey(fieldValueString) == false) { + collapsedHits.put(fieldValueString, hit); + collapseValues.add(fieldValue); + } + } + SearchHit[] newHits = new SearchHit[collapsedHits.size()]; + int i = 0; + for (SearchHit collapsedHit : collapsedHits.values()) { + newHits[i++] = collapsedHit; + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + collapseField, + collapseValues.toArray() + ); + return SearchResponseUtil.replaceHits(searchHits, response); + } + return response; + } + + static class Factory implements Processor.Factory { + + @Override + public CollapseResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + String collapseField = ConfigurationUtils.readStringProperty(TYPE, tag, config, COLLAPSE_FIELD); + return new CollapseResponseProcessor(tag, description, ignoreFailure, collapseField); + } + } + +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java new file mode 100644 index 0000000000000..182cf6ba79504 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchService; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchRequestProcessor; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Multiplies the "size" parameter on the {@link SearchRequest} by the given scaling factor, storing the original value + * in the request context as "original_size". + */ +public class OversampleRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { + + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "oversample"; + static final String SAMPLE_FACTOR = "sample_factor"; + static final String ORIGINAL_SIZE = "original_size"; + private final double sampleFactor; + private final String contextPrefix; + + private OversampleRequestProcessor(String tag, String description, boolean ignoreFailure, double sampleFactor, String contextPrefix) { + super(tag, description, ignoreFailure); + this.sampleFactor = sampleFactor; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) { + if (request.source() != null) { + int originalSize = request.source().size(); + if (originalSize == -1) { + originalSize = SearchService.DEFAULT_SIZE; + } + requestContext.setAttribute(applyContextPrefix(contextPrefix, ORIGINAL_SIZE), originalSize); + int newSize = (int) Math.ceil(originalSize * sampleFactor); + request.source().size(newSize); + } + return request; + } + + @Override + public String getType() { + return TYPE; + } + + static class Factory implements Processor.Factory { + @Override + public OversampleRequestProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + double sampleFactor = ConfigurationUtils.readDoubleProperty(TYPE, tag, config, SAMPLE_FACTOR); + if (sampleFactor < 1.0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, SAMPLE_FACTOR, "Value must be >= 1.0"); + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new OversampleRequestProcessor(tag, description, ignoreFailure, sampleFactor, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java index 90f71fd1754e4..a4052d0892ee6 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java @@ -23,9 +23,10 @@ import org.opensearch.script.ScriptType; import org.opensearch.script.SearchScript; import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; import java.io.InputStream; import java.util.HashMap; @@ -38,7 +39,7 @@ * Processor that evaluates a script with a search request in its context * and then returns the modified search request. */ -public final class ScriptRequestProcessor extends AbstractProcessor implements SearchRequestProcessor { +public final class ScriptRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { /** * Key to reference this processor type from a search pipeline. */ @@ -72,15 +73,8 @@ public final class ScriptRequestProcessor extends AbstractProcessor implements S this.scriptService = scriptService; } - /** - * Executes the script with the search request in context. - * - * @param request The search request passed into the script context. - * @return The modified search request. - * @throws Exception if an error occurs while processing the request. - */ @Override - public SearchRequest processRequest(SearchRequest request) throws Exception { + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { // assert request is not null and source is not null if (request == null || request.source() == null) { throw new IllegalArgumentException("search request must not be null"); @@ -93,10 +87,33 @@ public SearchRequest processRequest(SearchRequest request) throws Exception { searchScript = precompiledSearchScript; } // execute the script with the search request in context - searchScript.execute(Map.of("_source", new SearchRequestMap(request))); + searchScript.execute(Map.of("_source", new SearchRequestMap(request), "request_context", new RequestContextMap(requestContext))); return request; } + private static class RequestContextMap extends BasicMap { + private final PipelineProcessingContext pipelinedRequestContext; + + private RequestContextMap(PipelineProcessingContext pipelinedRequestContext) { + this.pipelinedRequestContext = pipelinedRequestContext; + } + + @Override + public Object get(Object key) { + if (key instanceof String) { + return pipelinedRequestContext.getAttribute(key.toString()); + } + return null; + } + + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + pipelinedRequestContext.setAttribute(key, value); + return originalValue; + } + } + /** * Returns the type of the processor. * diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 49681b80fdead..5378a6721efb2 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -38,12 +38,21 @@ public Map> getRequestProcesso FilterQueryRequestProcessor.TYPE, new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), ScriptRequestProcessor.TYPE, - new ScriptRequestProcessor.Factory(parameters.scriptService) + new ScriptRequestProcessor.Factory(parameters.scriptService), + OversampleRequestProcessor.TYPE, + new OversampleRequestProcessor.Factory() ); } @Override public Map> getResponseProcessors(Parameters parameters) { - return Map.of(RenameFieldResponseProcessor.TYPE, new RenameFieldResponseProcessor.Factory()); + return Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + TruncateHitsResponseProcessor.TYPE, + new TruncateHitsResponseProcessor.Factory(), + CollapseResponseProcessor.TYPE, + new CollapseResponseProcessor.Factory() + ); } } diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java new file mode 100644 index 0000000000000..c6430b96dcbed --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; + +import java.util.Map; + +/** + * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s + * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. + */ +class SearchRequestMap extends BasicMap implements Map { + + private final SearchSourceBuilder source; + + /** + * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. + * + * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. + */ + public SearchRequestMap(SearchRequest searchRequest) { + source = searchRequest.source(); + } + + /** + * Checks if the SearchSourceBuilder is empty. + * + * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. + */ + @Override + public boolean isEmpty() { + return source == null; + } + + /** + * Retrieves the value associated with the specified property from the SearchSourceBuilder. + * + * @param key The SearchSourceBuilder property whose value is to be retrieved. + * @return The value associated with the specified property or null if the property has not been initialized. + * @throws IllegalArgumentException if the property name is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported. + */ + @Override + public Object get(Object key) { + if (!(key instanceof String)) { + throw new IllegalArgumentException("key must be a String"); + } + // This is the explicit implementation of fetch value from source + switch ((String) key) { + case "from": + return source.from(); + case "size": + return source.size(); + case "explain": + return source.explain(); + case "version": + return source.version(); + case "seq_no_primary_term": + return source.seqNoAndPrimaryTerm(); + case "track_scores": + return source.trackScores(); + case "track_total_hits": + return source.trackTotalHitsUpTo(); + case "min_score": + return source.minScore(); + case "terminate_after": + return source.terminateAfter(); + case "profile": + return source.profile(); + default: + throw new SearchRequestMapProcessingException("Unsupported key: " + key); + } + } + + /** + * Sets the value for the specified property in the SearchSourceBuilder. + * + * @param key The property whose value is to be set. + * @param value The value to be set for the specified property. + * @return The original value associated with the property, or null if none existed. + * @throws IllegalArgumentException if the property is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. + */ + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + try { + switch (key) { + case "from": + source.from((Integer) value); + break; + case "size": + source.size((Integer) value); + break; + case "explain": + source.explain((Boolean) value); + break; + case "version": + source.version((Boolean) value); + break; + case "seq_no_primary_term": + source.seqNoAndPrimaryTerm((Boolean) value); + break; + case "track_scores": + source.trackScores((Boolean) value); + break; + case "track_total_hits": + source.trackTotalHitsUpTo((Integer) value); + break; + case "min_score": + source.minScore((Float) value); + break; + case "terminate_after": + source.terminateAfter((Integer) value); + break; + case "profile": + source.profile((Boolean) value); + break; + case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling + case "sort": + case "timeout": + case "docvalue_fields": + case "indices_boost": + default: + throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); + } + } catch (Exception e) { + throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); + } + return originalValue; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java similarity index 76% rename from modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java rename to modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java index cb1e45a20b624..2f00d0f82c2f1 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchWrapperException; @@ -14,12 +14,12 @@ /** * An exception that indicates an error occurred while processing a {@link SearchRequestMap}. */ -public class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { +class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { /** * Constructs a new SearchRequestMapProcessingException with the specified message. * - * @param msg The error message. + * @param msg The error message. * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Object... args) { @@ -29,9 +29,9 @@ public SearchRequestMapProcessingException(String msg, Object... args) { /** * Constructs a new SearchRequestMapProcessingException with the specified message and cause. * - * @param msg The error message. + * @param msg The error message. * @param cause The cause of the exception. - * @param args Arguments to substitute in the error message. + * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Throwable cause, Object... args) { super(msg, cause, args); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java new file mode 100644 index 0000000000000..e3413bf41720f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.StatefulSearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Truncates the returned search hits from the {@link SearchResponse}. If no target size is specified in the pipeline, then + * we try using the "original_size" value from the request context, which may have been set by {@link OversampleRequestProcessor}. + */ +public class TruncateHitsResponseProcessor extends AbstractProcessor implements StatefulSearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "truncate_hits"; + static final String TARGET_SIZE = "target_size"; + private final int targetSize; + private final String contextPrefix; + + @Override + public String getType() { + return TYPE; + } + + private TruncateHitsResponseProcessor(String tag, String description, boolean ignoreFailure, int targetSize, String contextPrefix) { + super(tag, description, ignoreFailure); + this.targetSize = targetSize; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) { + int size; + if (targetSize < 0) { // No value specified in processor config. Use context value instead. + String key = applyContextPrefix(contextPrefix, OversampleRequestProcessor.ORIGINAL_SIZE); + Object o = requestContext.getAttribute(key); + if (o == null) { + throw new IllegalStateException("Must specify " + TARGET_SIZE + " unless an earlier processor set " + key); + } + size = (int) o; + } else { + size = targetSize; + } + if (response.getHits() != null && response.getHits().getHits().length > size) { + SearchHit[] newHits = new SearchHit[size]; + System.arraycopy(response.getHits().getHits(), 0, newHits, 0, size); + return SearchResponseUtil.replaceHits(newHits, response); + } + return response; + } + + static class Factory implements Processor.Factory { + @Override + public TruncateHitsResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + Integer targetSize = ConfigurationUtils.readIntProperty(TYPE, tag, config, TARGET_SIZE, null); + if (targetSize == null) { + // Use -1 as an "unset" marker to avoid repeated unboxing of an Integer. + targetSize = -1; + } else { + // Explicitly set values must be >= 0. + if (targetSize < 0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, TARGET_SIZE, "Value must be >= 0"); + } + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new TruncateHitsResponseProcessor(tag, description, ignoreFailure, targetSize, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java new file mode 100644 index 0000000000000..9697da85dbecf --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +/** + * Helpers for working with request-scoped context. + */ +public final class ContextUtils { + private ContextUtils() {} + + /** + * Parameter that can be passed to a stateful processor to avoid collisions between contextual variables by + * prefixing them with distinct qualifiers. + */ + public static final String CONTEXT_PREFIX_PARAMETER = "context_prefix"; + + /** + * Replaces a "global" variable name with one scoped to a given context prefix (unless prefix is null or empty). + * @param contextPrefix the prefix qualifier for the variable + * @param variableName the generic "global" form of the context variable + * @return the variableName prefixed with contextPrefix followed by ".", or just variableName if contextPrefix is null or empty + */ + public static String applyContextPrefix(String contextPrefix, String variableName) { + String contextVariable; + if (contextPrefix != null && contextPrefix.isEmpty() == false) { + contextVariable = contextPrefix + "." + variableName; + } else { + contextVariable = variableName; + } + return contextVariable; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java deleted file mode 100644 index 7af3ac66be146..0000000000000 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.pipeline.common.helpers; - -import org.opensearch.action.search.SearchRequest; -import org.opensearch.search.builder.SearchSourceBuilder; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import java.util.function.Function; - -/** - * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s - * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. - */ -public class SearchRequestMap implements Map { - private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; - - private final SearchSourceBuilder source; - - /** - * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. - * - * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. - */ - public SearchRequestMap(SearchRequest searchRequest) { - source = searchRequest.source(); - } - - /** - * Retrieves the number of properties in the SearchSourceBuilder. - * - * @return The number of properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public int size() { - throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); - } - - /** - * Checks if the SearchSourceBuilder is empty. - * - * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. - */ - @Override - public boolean isEmpty() { - return source == null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified property. - * - * @param key The property to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified property, {@code false} otherwise. - */ - @Override - public boolean containsKey(Object key) { - return get(key) != null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified value. - * - * @param value The value to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified value, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean containsValue(Object value) { - throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); - } - - /** - * Retrieves the value associated with the specified property from the SearchSourceBuilder. - * - * @param key The SearchSourceBuilder property whose value is to be retrieved. - * @return The value associated with the specified property or null if the property has not been initialized. - * @throws IllegalArgumentException if the property name is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported. - */ - @Override - public Object get(Object key) { - if (!(key instanceof String)) { - throw new IllegalArgumentException("key must be a String"); - } - // This is the explicit implementation of fetch value from source - switch ((String) key) { - case "from": - return source.from(); - case "size": - return source.size(); - case "explain": - return source.explain(); - case "version": - return source.version(); - case "seq_no_primary_term": - return source.seqNoAndPrimaryTerm(); - case "track_scores": - return source.trackScores(); - case "track_total_hits": - return source.trackTotalHitsUpTo(); - case "min_score": - return source.minScore(); - case "terminate_after": - return source.terminateAfter(); - case "profile": - return source.profile(); - default: - throw new SearchRequestMapProcessingException("Unsupported key: " + key); - } - } - - /** - * Sets the value for the specified property in the SearchSourceBuilder. - * - * @param key The property whose value is to be set. - * @param value The value to be set for the specified property. - * @return The original value associated with the property, or null if none existed. - * @throws IllegalArgumentException if the property is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. - */ - @Override - public Object put(String key, Object value) { - Object originalValue = get(key); - try { - switch (key) { - case "from": - source.from((Integer) value); - break; - case "size": - source.size((Integer) value); - break; - case "explain": - source.explain((Boolean) value); - break; - case "version": - source.version((Boolean) value); - break; - case "seq_no_primary_term": - source.seqNoAndPrimaryTerm((Boolean) value); - break; - case "track_scores": - source.trackScores((Boolean) value); - break; - case "track_total_hits": - source.trackTotalHitsUpTo((Integer) value); - break; - case "min_score": - source.minScore((Float) value); - break; - case "terminate_after": - source.terminateAfter((Integer) value); - break; - case "profile": - source.profile((Boolean) value); - break; - case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling - case "sort": - case "timeout": - case "docvalue_fields": - case "indices_boost": - default: - throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); - } - } catch (Exception e) { - throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); - } - return originalValue; - } - - /** - * Removes the specified property from the SearchSourceBuilder. - * - * @param key The name of the property that will be removed. - * @return The value associated with the property before it was removed, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object remove(Object key) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Sets all the properties from the specified map to the SearchSourceBuilder. - * - * @param m The map containing the properties to be set. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void putAll(Map m) { - throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes all properties from the SearchSourceBuilder. - * - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void clear() { - throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the property names in the SearchSourceBuilder. - * - * @return A set view of the property names in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set keySet() { - throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a collection view of the property values in the SearchSourceBuilder. - * - * @return A collection view of the property values in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Collection values() { - throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the properties in the SearchSourceBuilder. - * - * @return A set view of the properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set> entrySet() { - throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns the value to which the specified property has, or the defaultValue if the property is not present in the - * SearchSourceBuilder. - * - * @param key The property whose associated value is to be returned. - * @param defaultValue The default value to be returned if the property is not present. - * @return The value to which the specified property has, or the defaultValue if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object getOrDefault(Object key, Object defaultValue) { - throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); - } - - /** - * Performs the given action for each property in the SearchSourceBuilder until all properties have been processed or the - * action throws an exception - * - * @param action The action to be performed for each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void forEach(BiConsumer action) { - throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces each property's value with the result of invoking the given function on that property until all properties have - * been processed or the function throws an exception. - * - * @param function The function to apply to each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void replaceAll(BiFunction function) { - throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value, associates it with the given value and returns null, - * else returns the current value. - * - * @param key The property whose value is to be set if absent. - * @param value The value to be associated with the specified property. - * @return The current value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object putIfAbsent(String key, Object value) { - throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes the property only if it has the given value. - * - * @param key The property to be removed. - * @param value The value expected to be associated with the property. - * @return {@code true} if the entry was removed, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean remove(Object key, Object value) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param oldValue The value expected to be associated with the property. - * @param newValue The value to be associated with the property. - * @return {@code true} if the property was replaced, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean replace(String key, Object oldValue, Object newValue) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param value The value to be associated with the property. - * @return The previous value associated with the property, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object replace(String key, Object value) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * The computed value associated with the property, or null if the property is not present. - * - * @param key The property whose value is to be computed if absent. - * @param mappingFunction The function to compute a value based on the property. - * @return The computed value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfAbsent(String key, Function mappingFunction) { - throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfPresent(String key, BiFunction remappingFunction) { - throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value, or removes the property if the computed value is null. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object compute(String key, BiFunction remappingFunction) { - throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value or is associated with null, associates it with the - * given non-null value. Otherwise, replaces the associated value with the results of applying the given - * remapping function to the current and new values. - * - * @param key The property for which the mapping is to be merged. - * @param value The non-null value to be merged with the existing value. - * @param remappingFunction The function to merge the existing and new values. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object merge(String key, Object value, BiFunction remappingFunction) { - throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); - } -} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java new file mode 100644 index 0000000000000..0710548c6429f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.SearchProfileShardResults; + +/** + * Helper methods for manipulating {@link SearchResponse}. + */ +public final class SearchResponseUtil { + private SearchResponseUtil() { + + } + + /** + * Construct a new {@link SearchResponse} based on an existing one, replacing just the {@link SearchHits}. + * @param newHits new {@link SearchHits} + * @param response the existing search response + * @return a new search response where the {@link SearchHits} has been replaced + */ + public static SearchResponse replaceHits(SearchHits newHits, SearchResponse response) { + SearchResponseSections searchResponseSections; + if (response.getAggregations() == null || response.getAggregations() instanceof InternalAggregations) { + // We either have no aggregations, or we have Writeable InternalAggregations. + // Either way, we can produce a Writeable InternalSearchResponse. + searchResponseSections = new InternalSearchResponse( + newHits, + (InternalAggregations) response.getAggregations(), + response.getSuggest(), + new SearchProfileShardResults(response.getProfileResults()), + response.isTimedOut(), + response.isTerminatedEarly(), + response.getNumReducePhases() + ); + } else { + // We have non-Writeable Aggregations, so the whole SearchResponseSections is non-Writeable. + searchResponseSections = new SearchResponseSections( + newHits, + response.getAggregations(), + response.getSuggest(), + response.isTimedOut(), + response.isTerminatedEarly(), + new SearchProfileShardResults(response.getProfileResults()), + response.getNumReducePhases() + ); + } + + return new SearchResponse( + searchResponseSections, + response.getScrollId(), + response.getTotalShards(), + response.getSuccessfulShards(), + response.getSkippedShards(), + response.getTook().millis(), + response.getShardFailures(), + response.getClusters(), + response.pointInTimeId() + ); + } + + /** + * Convenience method when only replacing the {@link SearchHit} array within the {@link SearchHits} in a {@link SearchResponse}. + * @param newHits the new array of {@link SearchHit} elements. + * @param response the search response to update + * @return a {@link SearchResponse} where the underlying array of {@link SearchHit} within the {@link SearchHits} has been replaced. + */ + public static SearchResponse replaceHits(SearchHit[] newHits, SearchResponse response) { + if (response.getHits() == null) { + throw new IllegalStateException("Response must have hits"); + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + response.getHits().getCollapseField(), + response.getHits().getCollapseValues() + ); + return replaceHits(searchHits, response); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java new file mode 100644 index 0000000000000..cda011f24fea1 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class CollapseResponseProcessorTests extends OpenSearchTestCase { + public void testWithDocumentFields() { + testProcessor(true); + } + + public void testWithSourceField() { + testProcessor(false); + } + + private void testProcessor(boolean includeDocField) { + Map config = new HashMap<>(Map.of(CollapseResponseProcessor.COLLAPSE_FIELD, "groupid")); + CollapseResponseProcessor processor = new CollapseResponseProcessor.Factory().create( + Collections.emptyMap(), + null, + null, + false, + config, + null + ); + int numHits = randomIntBetween(1, 100); + SearchResponse inputResponse = generateResponse(numHits, includeDocField); + + SearchResponse processedResponse = processor.processResponse(new SearchRequest(), inputResponse); + if (numHits % 2 == 0) { + assertEquals(numHits / 2, processedResponse.getHits().getHits().length); + } else { + assertEquals(numHits / 2 + 1, processedResponse.getHits().getHits().length); + } + for (SearchHit collapsedHit : processedResponse.getHits()) { + assertEquals(0, collapsedHit.docId() % 2); + } + assertEquals("groupid", processedResponse.getHits().getCollapseField()); + assertEquals(processedResponse.getHits().getHits().length, processedResponse.getHits().getCollapseValues().length); + for (int i = 0; i < processedResponse.getHits().getHits().length; i++) { + assertEquals(i, processedResponse.getHits().getCollapseValues()[i]); + } + } + + private static SearchResponse generateResponse(int numHits, boolean includeDocField) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + Map docFields; + int groupValue = i / 2; + if (includeDocField) { + docFields = Map.of("groupid", new DocumentField("groupid", List.of(groupValue))); + } else { + docFields = Collections.emptyMap(); + } + SearchHit hit = new SearchHit(i, Integer.toString(i), docFields, Collections.emptyMap()); + hit.sourceRef(new BytesArray("{\"groupid\": " + groupValue + "}")); + hitsArray[i] = hit; + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java new file mode 100644 index 0000000000000..96e99dff9cc03 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class OversampleRequestProcessorTests extends OpenSearchTestCase { + + public void testEmptySource() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchRequest request = new SearchRequest(); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(request, transformedRequest); + assertNull(context.getAttribute("original_size")); + } + + public void testBasicBehavior() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("original_size")); + } + + public void testContextPrefix() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>( + Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0, ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo") + ); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("foo.original_size")); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java index fde9757312e30..b372b220b71ac 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java @@ -18,7 +18,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.script.SearchScript; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.PipelineProcessingContext; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -27,8 +27,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.hamcrest.core.Is.is; - public class ScriptRequestProcessorTests extends OpenSearchTestCase { private ScriptService scriptService; @@ -87,7 +85,7 @@ public void testScriptingWithoutPrecompiledScriptFactory() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -104,7 +102,7 @@ public void testScriptingWithPrecompiledIngestScript() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -124,15 +122,15 @@ private SearchSourceBuilder createSearchSourceBuilder() { } private void assertSearchRequest(SearchRequest searchRequest) { - assertThat(searchRequest.source().from(), is(20)); - assertThat(searchRequest.source().size(), is(30)); - assertThat(searchRequest.source().explain(), is(false)); - assertThat(searchRequest.source().version(), is(false)); - assertThat(searchRequest.source().seqNoAndPrimaryTerm(), is(false)); - assertThat(searchRequest.source().trackScores(), is(false)); - assertThat(searchRequest.source().trackTotalHitsUpTo(), is(4)); - assertThat(searchRequest.source().minScore(), is(2.0f)); - assertThat(searchRequest.source().timeout(), is(new TimeValue(60, TimeUnit.SECONDS))); - assertThat(searchRequest.source().terminateAfter(), is(6)); + assertEquals(20, searchRequest.source().from()); + assertEquals(30, searchRequest.source().size()); + assertFalse(searchRequest.source().explain()); + assertFalse(searchRequest.source().version()); + assertFalse(searchRequest.source().seqNoAndPrimaryTerm()); + assertFalse(searchRequest.source().trackScores()); + assertEquals(4, searchRequest.source().trackTotalHitsUpTo().intValue()); + assertEquals(2.0f, searchRequest.source().minScore(), 0.0001); + assertEquals(new TimeValue(60, TimeUnit.SECONDS), searchRequest.source().timeout()); + assertEquals(6, searchRequest.source().terminateAfter()); } } diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java similarity index 99% rename from modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java rename to modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java index 5572f28335e1c..c982ada7b5ea5 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java @@ -5,7 +5,7 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.action.search.SearchRequest; import org.opensearch.search.builder.SearchSourceBuilder; diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java new file mode 100644 index 0000000000000..7615225c7f77e --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class TruncateHitsResponseProcessorTests extends OpenSearchTestCase { + + public void testBasicBehavior() { + int targetSize = randomInt(50); + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map config = new HashMap<>(Map.of(TruncateHitsResponseProcessor.TARGET_SIZE, targetSize)); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContext() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContextWithPrefix() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map config = new HashMap<>(Map.of(ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo")); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("foo.original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizeMissing() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + assertThrows( + IllegalStateException.class, + () -> processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()) + ); + } + + private static SearchResponse constructResponse(int numHits) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + hitsArray[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml new file mode 100644 index 0000000000000..1f9e95084322d --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml @@ -0,0 +1,105 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from oversample to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "oversample" : { + "sample_factor" : 2 + } + } + ], + "response_processors": [ + { + "collapse" : { + "field" : "group_id" + } + }, + { + "truncate_hits" : {} + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: { + "group_id": "a", + "popularity" : 1 + } + - do: + index: + index: test + id: 2 + body: { + "group_id": "a", + "popularity" : 2 + } + - do: + index: + index: test + id: 3 + body: { + "group_id": "b", + "popularity" : 3 + } + - do: + index: + index: test + id: 4 + body: { + "group_id": "b", + "popularity" : 4 + } + - do: + indices.refresh: + index: test + + - do: + search: + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "3" } + + - do: + search: + search_pipeline: my_pipeline + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "2" } diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml new file mode 100644 index 0000000000000..9c9f6747e9bdc --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml @@ -0,0 +1,70 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from script request to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "script" : { + "source" : "ctx.request_context['foo.original_size'] = 2" + } + } + ], + "response_processors": [ + { + "truncate_hits" : { + "context_prefix" : "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: {} + - do: + index: + index: test + id: 2 + body: {} + - do: + index: + index: test + id: 3 + body: {} + - do: + index: + index: test + id: 4 + body: {} + - do: + indices.refresh: + index: test + + - do: + search: + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + + - do: + search: + search_pipeline: my_pipeline + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java index 9003e8bebd5ff..19e000628a9c8 100644 --- a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java @@ -59,7 +59,10 @@ public DataKeyPair generateDataPair() { @Override public byte[] decryptKey(byte[] encryptedKey) { try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { - DecryptRequest decryptRequest = DecryptRequest.builder().ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)).build(); + DecryptRequest decryptRequest = DecryptRequest.builder() + .ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)) + .encryptionContext(encryptionContext) + .build(); DecryptResponse decryptResponse = SocketAccess.doPrivileged(() -> clientReference.get().decrypt(decryptRequest)); return decryptResponse.plaintext().asByteArray(); } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 1fed446016647..16f2d2c5f23c6 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -53,7 +53,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" - api "commons-io:commons-io:2.13.0" + api "commons-io:commons-io:2.15.0" api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1 new file mode 100644 index 0000000000000..73709383fd130 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-io-2.15.0.jar.sha1 @@ -0,0 +1 @@ +5c3c2db10f6f797430a7f9c696b4d1273768c924 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 8945c09fca28b..57a2493053956 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.14.0' + api 'commons-io:commons-io:2.15.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.1' + api 'org.apache.xmlbeans:xmlbeans:5.2.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 deleted file mode 100644 index 33c5cfe53e01d..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 new file mode 100644 index 0000000000000..73709383fd130 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 @@ -0,0 +1 @@ +5c3c2db10f6f797430a7f9c696b4d1273768c924 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 deleted file mode 100644 index 4d1d2ad0807e7..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 new file mode 100644 index 0000000000000..f34274d593697 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 @@ -0,0 +1 @@ +6198ac997b3f234f2b5393fa415f78fac2e06510 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 deleted file mode 100644 index 352d69396d0c9..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..5eaf96739ed72 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 @@ -0,0 +1 @@ +faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 deleted file mode 100644 index 1bcb0e0c52950..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..091125169c696 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 @@ -0,0 +1 @@ +c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 262304029a0d3..d691cad9c9d03 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -374,7 +374,7 @@ private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonAsyncS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index c1180aab0e0c7..3a55fcb0bdbcd 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -91,6 +91,7 @@ import org.opensearch.repositories.s3.async.UploadRequest; import org.opensearch.repositories.s3.utils.HttpRangeUtils; +import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -188,10 +189,38 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp writeContext.getWritePriority(), writeContext.getUploadFinalizer(), writeContext.doRemoteDataIntegrityCheck(), - writeContext.getExpectedChecksum() + writeContext.getExpectedChecksum(), + blobStore.isUploadRetryEnabled() ); try { - long partSize = blobStore.getAsyncTransferManager().calculateOptimalPartSize(writeContext.getFileSize()); + if (uploadRequest.getContentLength() > ByteSizeUnit.GB.toBytes(10) && blobStore.isRedirectLargeUploads()) { + StreamContext streamContext = SocketAccess.doPrivileged( + () -> writeContext.getStreamProvider(uploadRequest.getContentLength()) + ); + InputStreamContainer inputStream = streamContext.provideStream(0); + try { + executeMultipartUpload( + blobStore, + uploadRequest.getKey(), + inputStream.getInputStream(), + uploadRequest.getContentLength() + ); + completionListener.onResponse(null); + } catch (Exception ex) { + logger.error( + () -> new ParameterizedMessage( + "Failed to upload large file {} of size {} ", + uploadRequest.getKey(), + uploadRequest.getContentLength() + ), + ex + ); + completionListener.onFailure(ex); + } + return; + } + long partSize = blobStore.getAsyncTransferManager() + .calculateOptimalPartSize(writeContext.getFileSize(), writeContext.getWritePriority(), blobStore.isUploadRetryEnabled()); StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { @@ -537,8 +566,14 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin PutObjectRequest putObjectRequest = putObjectRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (blobSize + 1)); + } else { + requestInputStream = input; + } SocketAccess.doPrivilegedVoid( - () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(input, blobSize)) + () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(requestInputStream, blobSize)) ); } catch (final SdkException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); @@ -578,6 +613,13 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (partSize + 1)); + } else { + requestInputStream = input; + } + CreateMultipartUploadRequest createMultipartUploadRequest = createMultipartUploadRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { uploadId.set( @@ -601,10 +643,9 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, .build(); bytesCount += uploadPartRequest.contentLength(); - final UploadPartResponse uploadResponse = SocketAccess.doPrivileged( () -> clientReference.get() - .uploadPart(uploadPartRequest, RequestBody.fromInputStream(input, uploadPartRequest.contentLength())) + .uploadPart(uploadPartRequest, RequestBody.fromInputStream(requestInputStream, uploadPartRequest.contentLength())) ); parts.add(CompletedPart.builder().partNumber(uploadPartRequest.partNumber()).eTag(uploadResponse.eTag()).build()); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index e8e043357e126..fc70fbb0db00e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -56,8 +56,10 @@ import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; +import static org.opensearch.repositories.s3.S3Repository.REDIRECT_LARGE_S3_UPLOAD; import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; +import static org.opensearch.repositories.s3.S3Repository.UPLOAD_RETRY_ENABLED; class S3BlobStore implements BlobStore { @@ -71,6 +73,10 @@ class S3BlobStore implements BlobStore { private volatile ByteSizeValue bufferSize; + private volatile boolean redirectLargeUploads; + + private volatile boolean uploadRetryEnabled; + private volatile boolean serverSideEncryption; private volatile ObjectCannedACL cannedACL; @@ -119,6 +125,9 @@ class S3BlobStore implements BlobStore { this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.urgentExecutorBuilder = urgentExecutorBuilder; + // Settings to initialize blobstore with. + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } @Override @@ -130,6 +139,8 @@ public void reload(RepositoryMetadata repositoryMetadata) { this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } @Override @@ -149,6 +160,14 @@ int getMaxRetries() { return service.settings(repositoryMetadata).maxRetries; } + public boolean isRedirectLargeUploads() { + return redirectLargeUploads; + } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } + public String bucket() { return bucket; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 2392c66329e06..4fda0ee95a3ec 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -177,7 +177,7 @@ final class S3ClientSettings { static final Setting.AffixSetting REQUEST_TIMEOUT_SETTING = Setting.affixKeySetting( PREFIX, "request_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(5), Property.NodeScope) ); /** The connection timeout for connecting to s3. */ @@ -198,14 +198,14 @@ final class S3ClientSettings { static final Setting.AffixSetting MAX_CONNECTIONS_SETTING = Setting.affixKeySetting( PREFIX, "max_connections", - key -> Setting.intSetting(key, 100, Property.NodeScope) + key -> Setting.intSetting(key, 500, Property.NodeScope) ); /** Connection acquisition timeout for new connections to S3. */ static final Setting.AffixSetting CONNECTION_ACQUISITION_TIMEOUT = Setting.affixKeySetting( PREFIX, "connection_acquisition_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(15), Property.NodeScope) ); /** The maximum pending connections to S3. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 728a99b1220a6..f7772a57c9afd 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -147,6 +147,20 @@ class S3Repository extends MeteredBlobStoreRepository { */ static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB); + /** + * Whether large uploads need to be redirected to slow sync s3 client. + */ + static final Setting REDIRECT_LARGE_S3_UPLOAD = Setting.boolSetting( + "redirect_large_s3_upload", + true, + Setting.Property.NodeScope + ); + + /** + * Whether retry on uploads are enabled. This setting wraps inputstream with buffered stream to enable retries. + */ + static final Setting UPLOAD_RETRY_ENABLED = Setting.boolSetting("s3_upload_retry_enabled", true, Setting.Property.NodeScope); + /** * Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and @@ -391,7 +405,9 @@ public void reload(RepositoryMetadata newRepositoryMetadata) { // Reload configs for S3RepositoryPlugin service.settings(metadata); + service.releaseCachedClients(); s3AsyncService.settings(metadata); + s3AsyncService.releaseCachedClients(); // Reload configs for S3BlobStore BlobStore blobStore = getBlobStore(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 9ed232464d080..e7d2a4d024e60 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -99,23 +99,32 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); - int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); + int halfProc = halfNumberOfProcessors(allocatedProcessors(settings)); executorBuilders.add( new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) ); - executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new ScalingExecutorBuilder(PRIORITY_FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) ); - executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); - executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add( + new ScalingExecutorBuilder(FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) + ); + executorBuilders.add( + new ScalingExecutorBuilder( + STREAM_READER, + allocatedProcessors(settings), + 4 * allocatedProcessors(settings), + TimeValue.timeValueMinutes(5) + ) + ); return executorBuilders; } - static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { - return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + static int halfNumberOfProcessors(int numberOfProcessors) { + return (numberOfProcessors + 1) / 2; } S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { @@ -252,7 +261,9 @@ public List> getSettings() { S3ClientSettings.IDENTITY_TOKEN_FILE_SETTING, S3ClientSettings.ROLE_SESSION_NAME_SETTING, S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING, + S3Repository.REDIRECT_LARGE_S3_UPLOAD, + S3Repository.UPLOAD_RETRY_ENABLED ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index b1b3e19eac275..24387fb98a425 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -438,7 +438,7 @@ private static IrsaCredentials buildFromEnviroment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index 0c63bfdb1ff97..8d2772d42ebca 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -12,6 +12,8 @@ import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.metrics.MetricRecord; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.blobstore.BlobStore; import java.time.Duration; @@ -21,6 +23,7 @@ public class StatsMetricPublisher { + private static final Logger LOGGER = LogManager.getLogger(StatsMetricPublisher.class); private final Stats stats = new Stats(); private final Map extendedStats = new HashMap<>() { @@ -35,6 +38,7 @@ public class StatsMetricPublisher { public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "List objects request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -64,6 +68,7 @@ public void close() {} public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Delete objects request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -93,6 +98,7 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Get object request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -122,6 +128,7 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Put object request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -151,6 +158,7 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Multi-part request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index 933ee6dc29513..b4c4ed0ecaa75 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,8 +23,8 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; import java.io.BufferedInputStream; @@ -54,6 +54,7 @@ public class AsyncPartsHandler { * @param uploadId Upload Id against which multi-part is being performed * @param completedParts Reference of completed parts * @param inputStreamContainers Checksum containers + * @param statsMetricPublisher sdk metric publisher * @return list of completable futures * @throws IOException thrown in case of an IO error */ @@ -66,7 +67,9 @@ public static List> uploadParts( StreamContext streamContext, String uploadId, AtomicReferenceArray completedParts, - AtomicReferenceArray inputStreamContainers + AtomicReferenceArray inputStreamContainers, + StatsMetricPublisher statsMetricPublisher, + boolean uploadRetryEnabled ) throws IOException { List> futures = new ArrayList<>(); for (int partIdx = 0; partIdx < streamContext.getNumberOfParts(); partIdx++) { @@ -77,6 +80,7 @@ public static List> uploadParts( .partNumber(partIdx + 1) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .contentLength(inputStreamContainer.getContentLength()); if (uploadRequest.doRemoteDataIntegrityCheck()) { uploadPartRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); @@ -91,7 +95,8 @@ public static List> uploadParts( futures, uploadPartRequestBuilder.build(), inputStreamContainer, - uploadRequest + uploadRequest, + uploadRetryEnabled ); } @@ -128,6 +133,18 @@ public static void cleanUpParts(S3AsyncClient s3AsyncClient, UploadRequest uploa })); } + public static InputStream maybeRetryInputStream( + InputStream inputStream, + WritePriority writePriority, + boolean uploadRetryEnabled, + long contentLength + ) { + if (uploadRetryEnabled == true && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new BufferedInputStream(inputStream, (int) (contentLength + 1)); + } + return inputStream; + } + private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, @@ -138,7 +155,8 @@ private static void uploadPart( List> futures, UploadPartRequest uploadPartRequest, InputStreamContainer inputStreamContainer, - UploadRequest uploadRequest + UploadRequest uploadRequest, + boolean uploadRetryEnabled ) { Integer partNumber = uploadPartRequest.partNumber(); @@ -150,9 +168,13 @@ private static void uploadPart( } else { streamReadExecutor = executorService; } - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); + + InputStream inputStream = maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRetryEnabled, + uploadPartRequest.contentLength() + ); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 4f1ab9764702e..2259780c95276 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -34,11 +34,11 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; -import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; @@ -146,7 +146,14 @@ private void uploadInParts( handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); } else { log.debug(() -> "Initiated new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); - doUploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, createMultipartUploadResponse.uploadId()); + doUploadInParts( + s3AsyncClient, + uploadRequest, + streamContext, + returnFuture, + createMultipartUploadResponse.uploadId(), + statsMetricPublisher + ); } }); } @@ -156,7 +163,8 @@ private void doUploadInParts( UploadRequest uploadRequest, StreamContext streamContext, CompletableFuture returnFuture, - String uploadId + String uploadId, + StatsMetricPublisher statsMetricPublisher ) { // The list of completed parts must be sorted @@ -174,7 +182,9 @@ private void doUploadInParts( streamContext, uploadId, completedParts, - inputStreamContainers + inputStreamContainers, + statsMetricPublisher, + uploadRequest.isUploadRetryEnabled() ); } catch (Exception ex) { try { @@ -198,7 +208,7 @@ private void doUploadInParts( } return null; }) - .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts)) + .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts, statsMetricPublisher)) .handle(handleExceptionOrResponse(s3AsyncClient, uploadRequest, returnFuture, uploadId)) .exceptionally(throwable -> { handleException(returnFuture, () -> "Unexpected exception occurred", throwable); @@ -245,7 +255,8 @@ private CompletableFuture completeMultipartUplo S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, String uploadId, - AtomicReferenceArray completedParts + AtomicReferenceArray completedParts, + StatsMetricPublisher statsMetricPublisher ) { log.debug(() -> new ParameterizedMessage("Sending completeMultipartUploadRequest, uploadId: {}", uploadId)); @@ -254,6 +265,7 @@ private CompletableFuture completeMultipartUplo .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .multipartUpload(CompletedMultipartUpload.builder().parts(parts).build()) .build(); @@ -291,10 +303,13 @@ private static void handleException(CompletableFuture returnFuture, Suppli /** * Calculates the optimal part size of each part request if the upload operation is carried out as multipart upload. */ - public long calculateOptimalPartSize(long contentLengthOfSource) { + public long calculateOptimalPartSize(long contentLengthOfSource, WritePriority writePriority, boolean uploadRetryEnabled) { if (contentLengthOfSource < ByteSizeUnit.MB.toBytes(5)) { return contentLengthOfSource; } + if (uploadRetryEnabled && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new ByteSizeValue(5, ByteSizeUnit.MB).getBytes(); + } double optimalPartSize = contentLengthOfSource / (double) MAX_UPLOAD_PARTS; optimalPartSize = Math.ceil(optimalPartSize); return (long) Math.max(optimalPartSize, minimumPartSize); @@ -324,9 +339,13 @@ private void uploadInOneChunk( } else { streamReadExecutor = executorService; } - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); + + InputStream inputStream = AsyncPartsHandler.maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRequest.isUploadRetryEnabled(), + uploadRequest.getContentLength() + ); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java index 3804c8417eb9f..a5304dc4a97d6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java @@ -25,6 +25,8 @@ public class UploadRequest { private final boolean doRemoteDataIntegrityCheck; private final Long expectedChecksum; + private boolean uploadRetryEnabled; + /** * Construct a new UploadRequest object * @@ -43,7 +45,8 @@ public UploadRequest( WritePriority writePriority, CheckedConsumer uploadFinalizer, boolean doRemoteDataIntegrityCheck, - Long expectedChecksum + Long expectedChecksum, + boolean uploadRetryEnabled ) { this.bucket = bucket; this.key = key; @@ -52,6 +55,7 @@ public UploadRequest( this.uploadFinalizer = uploadFinalizer; this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; this.expectedChecksum = expectedChecksum; + this.uploadRetryEnabled = uploadRetryEnabled; } public String getBucket() { @@ -81,4 +85,8 @@ public boolean doRemoteDataIntegrityCheck() { public Long getExpectedChecksum() { return expectedChecksum; } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 7c67519f2f3b0..8c7e196d7c812 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -9,7 +9,10 @@ package org.opensearch.repositories.s3; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; @@ -18,8 +21,10 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectResponse; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.StorageClass; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; @@ -37,6 +42,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.AsyncExecutorContainer; import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; @@ -61,15 +67,21 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.IntStream; +import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class S3BlobContainerMockClientTests extends OpenSearchTestCase implements ConfigPathSupport { @@ -516,7 +528,7 @@ public InputStreamContainer apply(Integer partNo, Long size, Long position) thro } }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); } - }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { + }, blobSize, false, WritePriority.NORMAL, uploadSuccess -> { assertTrue(uploadSuccess); if (throwExceptionOnFinalizeUpload) { throw new RuntimeException(); @@ -546,4 +558,115 @@ private long calculateLastPartSize(long totalSize, long partSize) { private int calculateNumberOfParts(long contentLength, long partSize) { return (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); } + + public void testFailureWhenLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(true); + } + + public void testLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(false); + } + + private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException) throws IOException, InterruptedException { + final ByteSizeValue partSize = new ByteSizeValue(1024, ByteSizeUnit.MB); + + int numberOfParts = 20; + final long lastPartSize = new ByteSizeValue(20, ByteSizeUnit.MB).getBytes(); + final long blobSize = ((numberOfParts - 1) * partSize.getBytes()) + lastPartSize; + CountDownLatch countDownLatch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + ActionListener completionListener = ActionListener.wrap(resp -> { countDownLatch.countDown(); }, ex -> { + exceptionRef.set(ex); + countDownLatch.countDown(); + }); + + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final BlobPath blobPath = new BlobPath(); + if (randomBoolean()) { + IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value)); + } + + final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024)); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize); + + final boolean serverSideEncryption = randomBoolean(); + when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption); + + final StorageClass storageClass = randomFrom(StorageClass.values()); + when(blobStore.getStorageClass()).thenReturn(storageClass); + when(blobStore.isRedirectLargeUploads()).thenReturn(true); + + final ObjectCannedACL cannedAccessControlList = randomBoolean() ? randomFrom(ObjectCannedACL.values()) : null; + if (cannedAccessControlList != null) { + when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList); + } + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = Mockito.spy(new AmazonS3Reference(client)); + doNothing().when(clientReference).close(); + when(blobStore.clientReference()).thenReturn(clientReference); + final CreateMultipartUploadResponse createMultipartUploadResponse = CreateMultipartUploadResponse.builder() + .uploadId(randomAlphaOfLength(10)) + .build(); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(createMultipartUploadResponse); + if (expectException) { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenThrow( + SdkException.create("Expected upload part request to fail", new RuntimeException()) + ); + } else { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenReturn(UploadPartResponse.builder().build()); + } + + // Fail the completion request + when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn( + CompleteMultipartUploadResponse.builder().build() + ); + when(client.abortMultipartUpload(any(AbortMultipartUploadRequest.class))).thenReturn( + AbortMultipartUploadResponse.builder().build() + ); + + List openInputStreams = new ArrayList<>(); + final S3BlobContainer s3BlobContainer = Mockito.spy(new S3BlobContainer(blobPath, blobStore)); + s3BlobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { + @Override + public StreamContext supplyStreamContext(long partSize) { + return new StreamContext(new CheckedTriFunction() { + @Override + public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + } + }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); + } + }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { assertTrue(uploadSuccess); }, false, null), completionListener); + + assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); + if (expectException) { + assertNotNull(exceptionRef.get()); + } else { + assertNull(exceptionRef.get()); + } + verify(s3BlobContainer, times(1)).executeMultipartUpload(any(S3BlobStore.class), anyString(), any(InputStream.class), anyLong()); + + if (expectException) { + verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } else { + verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } + + openInputStreams.forEach(inputStream -> { + try { + inputStream.close(); + } catch (IOException ex) { + logger.error("Error closing input stream"); + } + }); + } + } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 61c9c998b1dec..f27c8387b6e45 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -70,10 +70,10 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxySettings, is(ProxySettings.NO_PROXY_SETTINGS)); assertThat(defaultSettings.readTimeoutMillis, is(50 * 1000)); - assertThat(defaultSettings.requestTimeoutMillis, is(120 * 1000)); + assertThat(defaultSettings.requestTimeoutMillis, is(5 * 60 * 1000)); assertThat(defaultSettings.connectionTimeoutMillis, is(10 * 1000)); assertThat(defaultSettings.connectionTTLMillis, is(5 * 1000)); - assertThat(defaultSettings.maxConnections, is(100)); + assertThat(defaultSettings.maxConnections, is(500)); assertThat(defaultSettings.maxRetries, is(3)); assertThat(defaultSettings.throttleRetries, is(true)); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 2437547a80a6f..b753b847df869 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -82,7 +82,7 @@ public void testOneChunkUpload() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), + }, false, null, true), new StreamContext((partIdx, partSize, position) -> { streamRef.set(new ZeroInputStream(partSize)); return new InputStreamContainer(streamRef.get(), partSize, position); @@ -127,7 +127,7 @@ public void testOneChunkUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), + }, false, null, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), @@ -180,7 +180,7 @@ public void testMultipartUpload() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 3376132981L), + }, true, 3376132981L, true), new StreamContext((partIdx, partSize, position) -> { InputStream stream = new ZeroInputStream(partSize); streams.add(stream); @@ -240,7 +240,7 @@ public void testMultipartUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 0L), + }, true, 0L, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index f5c367cb7643b..9be83e30c3183 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -78,6 +78,7 @@ thirdPartyAudit { 'org.conscrypt.ConscryptHostnameVerifier', 'org.openjsse.javax.net.ssl.SSLParameters', 'org.openjsse.javax.net.ssl.SSLSocket', + 'io.opentelemetry.api.events.EventBuilder', 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', @@ -86,7 +87,8 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', - 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 deleted file mode 100644 index eae141a8d1a23..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2c038aad4b934 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 @@ -0,0 +1 @@ +a5c081d8f877225732efe13908f350029c811709 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 deleted file mode 100644 index 6e42973adc581..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3243f524432eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 @@ -0,0 +1 @@ +c5f8bb68084ea5709a27e935907b1bb49d0bd049 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 deleted file mode 100644 index b119468e7f88b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..1d7da47286ae0 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +3643061da474061ffa7f2036a58a7a0d40212276 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 deleted file mode 100644 index 8f653922d6418..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3fab0e47adcbe --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 @@ -0,0 +1 @@ +ab56c7223112fac13a66e3f667c5fc666f4a3707 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 deleted file mode 100644 index 103da4720de96..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..f93cf7a63bfad --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 @@ -0,0 +1 @@ +5752d171cd08ac84f9273258a315bc5f97e1187e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 deleted file mode 100644 index 3db07532ceea9..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2fc33b62aee54 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +6b41cd66a385d513b58b6617f20b701435b64abd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 deleted file mode 100644 index 10d9b7cdfe3e3..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..99f758b047aa2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 @@ -0,0 +1 @@ +9346006cead763247a786b5cabf3e1ae3c88eadb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 deleted file mode 100644 index 162890965a6eb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..705a342a684c4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fab56e187e3fb3c70c18223184d53a76500114ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 deleted file mode 100644 index d6ce31a31cc6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..31818695cc774 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 @@ -0,0 +1 @@ +504de8cc7dc68e84c8c7c2757522d934e9c50d35 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 deleted file mode 100644 index 8a6a9705d836d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3cf3080a98bd9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +454c7a6afab864de9f0c166246f28f16aaa824c1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 deleted file mode 100644 index 37d79f5c573f7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..41b0dca07556e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 @@ -0,0 +1 @@ +b054760243906af0a327a8f5bd99adc2826ccd88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 deleted file mode 100644 index 80179e4808f50..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2f71fd5cc780a --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 @@ -0,0 +1 @@ +bff24f085193e105d4e23e3db27bf81ccb3d830e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 deleted file mode 100644 index fd917a58ba77c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..f0060b8a0f78f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 @@ -0,0 +1 @@ +d80ad3210fa890a856a1d04379d134ab44a09501 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 deleted file mode 100644 index 77b12c99464f6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 new file mode 100644 index 0000000000000..e730c83af905e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 @@ -0,0 +1 @@ +218e361772670212a46be5940010222d68e66f2a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 deleted file mode 100644 index 352d69396d0c9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..5eaf96739ed72 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 @@ -0,0 +1 @@ +faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 deleted file mode 100644 index 1bcb0e0c52950..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..091125169c696 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 @@ -0,0 +1 @@ +c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index 3b16cdb13a22f..e7da9a0bc454c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -598,7 +598,6 @@ setup: - match: { aggregations.histo.buckets.0.doc_count: 2 } - match: { profile.shards.0.aggregations.0.type: DateHistogramAggregator } - match: { profile.shards.0.aggregations.0.description: histo } - - match: { profile.shards.0.aggregations.0.breakdown.collect_count: 4 } - match: { profile.shards.0.aggregations.0.debug.total_buckets: 3 } --- diff --git a/server/build.gradle b/server/build.gradle index 17b8ff0469ad8..30e6c732c3e2d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -47,7 +47,7 @@ publishing { } base { - archivesBaseName = 'opensearch' + archivesName = 'opensearch' } sourceSets { diff --git a/server/cli/build.gradle b/server/cli/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/server/cli/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java index 54824b67b7abc..25fa7ae7eb8eb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java @@ -35,23 +35,27 @@ import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class ClusterIndexRefreshIntervalIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class ClusterIndexRefreshIntervalIT extends AbstractSnapshotIntegTestCase { public static final String INDEX_NAME = "test-index"; @@ -69,9 +73,39 @@ public void setUp() throws Exception { internalCluster().startClusterManagerOnlyNode(); } + static void putIndexTemplate(String refreshInterval) { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put("index.refresh_interval", refreshInterval) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + public void testIndexTemplateCreationSucceedsWhenNoMinimumRefreshInterval() throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + putIndexTemplate("2s"); + + // Test index creation using template with valid refresh interval + String indexName = "log-myindex-1"; + createIndex(indexName); + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + assertEquals(TimeValue.timeValueSeconds(2), indexService.getRefreshTaskInterval()); + } + public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws Exception { String clusterManagerName = internalCluster().getClusterManagerName(); - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -90,7 +124,7 @@ public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws .get(); assertEquals(refreshInterval, indexService.getRefreshTaskInterval()); - // Update of cluster.minimum.index.refresh_interval setting to value less than refreshInterval above will fail + // Update of cluster.minimum.index.refresh_interval setting to value more than default refreshInterval above will fail TimeValue invalidMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() + randomIntBetween(1, 1000)); IllegalArgumentException exceptionDuringMinUpdate = assertThrows( IllegalArgumentException.class, @@ -205,7 +239,7 @@ public void testRefreshIntervalDisabled() throws ExecutionException, Interrupted .getAsTime(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE); boolean createIndexSuccess = clusterMinimumRefreshInterval.equals(TimeValue.MINUS_ONE); String clusterManagerName = internalCluster().getClusterManagerName(); - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings settings = Settings.builder() .put(indexSettings()) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.MINIMUM_REFRESH_INTERVAL) @@ -236,7 +270,7 @@ protected TimeValue getMinRefreshIntervalForRefreshDisabled() { public void testInvalidRefreshInterval() { String invalidRefreshInterval = "-10s"; - internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings settings = Settings.builder() .put(indexSettings()) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) @@ -251,7 +285,7 @@ public void testInvalidRefreshInterval() { } public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionException, InterruptedException { - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings indexSettings = Settings.builder() .put(indexSettings()) .putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) @@ -278,7 +312,7 @@ public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionExc * the index setting. The underlying index should continue to use the same refresh interval as earlier. */ public void testClusterMinimumChangeOnIndexWithCustomRefreshInterval() throws ExecutionException, InterruptedException { - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); TimeValue customRefreshInterval = TimeValue.timeValueSeconds(getDefaultRefreshInterval().getSeconds() + randomIntBetween(1, 5)); Settings indexSettings = Settings.builder() .put(indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java index 5fc7bfcbcd442..2817d0e6a5951 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java @@ -8,9 +8,27 @@ package org.opensearch.cluster.metadata; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; + +import java.util.Locale; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateMissing; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ClusterIndexRefreshIntervalWithNodeSettingsIT extends ClusterIndexRefreshIntervalIT { @@ -26,6 +44,166 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } + public void testIndexTemplateCreationFailsWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> putIndexTemplate("0s")); + assertEquals( + throwable.getMessage(), + String.format( + Locale.ROOT, + "invalid index.refresh_interval [%s]: cannot be smaller than cluster.minimum.index.refresh_interval [%s]", + "0s", + getMinRefreshIntervalForRefreshDisabled() + ) + ); + } + + public void testIndexTemplateSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + putIndexTemplate("2s"); + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(0)); + + assertThat(client().admin().indices().prepareDeleteTemplate("my-template").get().isAcknowledged(), equalTo(true)); + + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateMissing(getIndexTemplatesResponse, "my-template"); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> restore cluster state"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); + + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateExists(getIndexTemplatesResponse, "my-template"); + + } + + public void testIndexSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + createIndex( + "my-index", + Settings.builder() + .put(indexSettings()) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), "2s") + .build() + ); + + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(1)); + assertThat(snapshotInfo.successfulShards(), equalTo(1)); + + GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(1, getIndexResponse.indices().length); + assertEquals("2s", getIndexResponse.getSetting("my-index", INDEX_REFRESH_INTERVAL_SETTING.getKey())); + + assertThat(client().admin().indices().prepareDelete("my-index").get().isAcknowledged(), equalTo(true)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); + assertEquals(getIndexResponse.indices().length, 0); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> try restore cluster state -- should pass"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(1)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(getIndexResponse.indices().length, 1); + } + @Override protected TimeValue getMinRefreshIntervalForRefreshDisabled() { return TimeValue.timeValueSeconds(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java index 385d33c359559..8321630d34229 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java @@ -36,6 +36,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; @@ -65,6 +67,7 @@ public class MaxDocsLimitIT extends OpenSearchIntegTestCase { private static final AtomicInteger maxDocs = new AtomicInteger(); + private static final ShardId shardId = new ShardId(new Index("test", "_na_"), 0); public static class TestEnginePlugin extends Plugin implements EnginePlugin { @Override @@ -122,7 +125,10 @@ public void testMaxDocsLimit() throws Exception { IllegalArgumentException.class, () -> client().prepareDelete("test", "any-id").get() ); - assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + deleteError.getMessage(), + containsString("Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs.get() + "] documents per shard") + ); client().admin().indices().prepareRefresh("test").get(); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(new MatchAllQueryBuilder()) @@ -208,7 +214,16 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio assertThat(resp.status(), equalTo(RestStatus.CREATED)); } catch (IllegalArgumentException e) { numFailure.incrementAndGet(); - assertThat(e.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + e.getMessage(), + containsString( + "Number of documents in shard " + + shardId + + " exceeds the limit of [" + + maxDocs.get() + + "] documents per shard" + ) + ); } } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java new file mode 100644 index 0000000000000..0af3d31f9e846 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 1) +public class AdmissionControlMultiNodeIT extends OpenSearchIntegTestCase { + + public static final Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) + .build(); + + private static final Logger LOGGER = LogManager.getLogger(AdmissionControlMultiNodeIT.class); + + public static final String INDEX_NAME = "test_index"; + + @Before + public void init() { + assertAcked( + prepareCreate( + INDEX_NAME, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + ensureGreen(INDEX_NAME); + } + + @After + public void cleanup() { + client().admin().indices().prepareDelete(INDEX_NAME).get(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(settings).build(); + } + + public void testAdmissionControlRejectionOnEnforced() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + assertEquals(admissionControlPrimaryStats.rejectionCount.get(AdmissionControlActionType.INDEXING.getType()).longValue(), 1); + Arrays.stream(res.getItems()).forEach(bulkItemResponse -> { + assertTrue(bulkItemResponse.getFailureMessage().contains("OpenSearchRejectedExecutionException")); + }); + SearchResponse searchResponse; + try { + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + } catch (Exception exception) { + assertTrue(((SearchPhaseExecutionException) exception).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + AdmissionControllerStats primaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + assertEquals(primaryStats.rejectionCount.get(AdmissionControlActionType.SEARCH.getType()).longValue(), 1); + } + + public void testAdmissionControlEnforcedOnNonACEnabledActions() throws ExecutionException, InterruptedException { + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); + nodesStatsRequest.clear() + .indices(true) + .addMetrics( + NodesStatsRequest.Metric.JVM.metricName(), + NodesStatsRequest.Metric.OS.metricName(), + NodesStatsRequest.Metric.FS.metricName(), + NodesStatsRequest.Metric.PROCESS.metricName(), + NodesStatsRequest.Metric.ADMISSION_CONTROL.metricName() + ); + NodesStatsResponse nodesStatsResponse = client(coordinatingOnlyNode).admin().cluster().nodesStats(nodesStatsRequest).actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(new ClusterHealthRequest()).actionGet(); + assertEquals(200, clusterHealthResponse.status().getStatus()); + assertFalse(nodesStatsResponse.hasFailures()); + } + + public void testAdmissionControlRejectionOnMonitor() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() + .getAdmissionControllerStatsList() + .get(0); + long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + assertEquals(primaryRejectionCount, 1); + assertEquals(replicaRejectionCount, 0); + SearchResponse searchResponse; + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); + primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + assertTrue(primaryRejectionCount == 1 || replicaRejectionCount == 1); + assertFalse(primaryRejectionCount == 1 && replicaRejectionCount == 1); + } + + public void testAdmissionControlRejectionOnDisabled() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.DISABLED.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() + .getAdmissionControllerStatsList() + .get(0); + long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + assertEquals(primaryRejectionCount, 0); + assertEquals(replicaRejectionCount, 0); + SearchResponse searchResponse; + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); + primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + assertTrue(primaryRejectionCount == 0 && replicaRejectionCount == 0); + } + + private Tuple getPrimaryReplicaNodeNames(String indexName) { + IndicesStatsResponse response = client().admin().indices().prepareStats(indexName).get(); + String primaryId = Stream.of(response.getShards()) + .map(ShardStats::getShardRouting) + .filter(ShardRouting::primary) + .findAny() + .get() + .currentNodeId(); + String replicaId = Stream.of(response.getShards()) + .map(ShardStats::getShardRouting) + .filter(sr -> sr.primary() == false) + .findAny() + .get() + .currentNodeId(); + DiscoveryNodes nodes = client().admin().cluster().prepareState().get().getState().nodes(); + String primaryName = nodes.get(primaryId).getName(); + String replicaName = nodes.get(replicaId).getName(); + return new Tuple<>(primaryName, replicaName); + } + + private String getCoordinatingOnlyNode() { + return client().admin() + .cluster() + .prepareState() + .get() + .getState() + .nodes() + .getCoordinatingOnlyNodes() + .values() + .iterator() + .next() + .getName(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index 99c5d7fb2bae7..d29dacb001434 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -8,9 +8,7 @@ package org.opensearch.remotestore; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.transport.MockTransportService; @@ -20,7 +18,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { @@ -49,18 +46,6 @@ protected void restore(String... indices) { restore(randomBoolean(), indices); } - protected void restore(boolean restoreAllShards, String... indices) { - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - } - protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 8b4981a15433a..8c15ebd0505d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -8,12 +8,16 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; @@ -23,9 +27,13 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -43,6 +51,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -51,6 +60,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -380,4 +390,25 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { return filesExisting.get(); } + + protected IndexShard getIndexShard(String dataNode, String indexName) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + return indexService.getShard(0); + } + + protected void restore(boolean restoreAllShards, String... indices) { + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b3b4f8e10fd31..e1997fea3433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,44 +8,54 @@ package org.opensearch.remotestore; +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; -import org.opensearch.core.index.Index; -import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import org.hamcrest.MatcherAssert; +import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.shard.RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -167,7 +177,7 @@ public void testRemoteTranslogCleanup() throws Exception { } public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - internalCluster().startNode(); + String dataNode = internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); @@ -177,17 +187,20 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); - if (numberOfIterations <= LAST_N_METADATA_FILES_TO_KEEP) { + if (numberOfIterations <= lastNMetadataFilesToKeep) { MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. MatcherAssert.assertThat( actualFileCount, - is(oneOf(LAST_N_METADATA_FILES_TO_KEEP - 1, LAST_N_METADATA_FILES_TO_KEEP, LAST_N_METADATA_FILES_TO_KEEP + 1)) + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) ); } }, 30, TimeUnit.SECONDS); @@ -209,6 +222,44 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } + public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(12, 18); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); + } + /** * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster * default. @@ -222,7 +273,7 @@ public void testDefaultBufferInterval() throws ExecutionException, InterruptedEx ensureGreen(INDEX_NAME); assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); @@ -255,7 +306,7 @@ public void testOverriddenBufferInterval() throws ExecutionException, Interrupte ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); assertBufferInterval(bufferInterval, indexShard); @@ -371,7 +422,7 @@ private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durab .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) .build(); createIndex(INDEX_NAME, indexSettings); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); durability = randomFrom(Durability.values()); @@ -404,7 +455,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E // Case 2 - Test update index fails createIndex(INDEX_NAME); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); exception = assertThrows( IllegalArgumentException.class, @@ -416,15 +467,6 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E assertEquals(expectedExceptionMsg, exception.getMessage()); } - private IndexShard getIndexShard(String dataNode) throws ExecutionException, InterruptedException { - String clusterManagerName = internalCluster().getClusterManagerName(); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); - String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); - IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); - return indexService.getShard(0); - } - private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); @@ -516,7 +558,7 @@ public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, Inte createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureGreen(INDEX_NAME); - IndexShard indexShard = getIndexShard(primaryShardNode); + IndexShard indexShard = getIndexShard(primaryShardNode, INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); @@ -529,7 +571,235 @@ public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, Inte ensureGreen(INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); - indexShard = getIndexShard(replicaShardNode); + indexShard = getIndexShard(replicaShardNode, INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); } + + public void testFallbackToNodeToNodeSegmentCopy() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + // 3. Delete data from remote segment store + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + + try (Stream files = Files.list(segmentDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + // 4. Start recovery by changing number of replicas to 1 + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // 5. Ensure green and verify number of docs + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { + // In this test, we trigger a force flush on existing primary while the primary mode on new primary has been + // activated. There was a bug in primary relocation of remote store enabled indexes where the new primary + // starts uploading translog and segments even before the cluster manager has started this shard. With this test, + // we check that we do not overwrite any file on remote store. Here we will also increase the replica count to + // check that there are no duplicate metadata files for translog or upload. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexBulk(INDEX_NAME, randomIntBetween(5, 10)); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch flushLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + flushLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + CountDownLatch flushDone = new CountDownLatch(1); + Thread flushThread = new Thread(() -> { + try { + flushLatch.await(2, TimeUnit.SECONDS); + oldPrimaryIndexShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + // newPrimaryTranslogRepo.setSleepSeconds(0); + } catch (IndexShardClosedException e) { + // this is fine + } catch (InterruptedException e) { + throw new AssertionError(e); + } finally { + flushDone.countDown(); + } + }); + flushThread.start(); + flushDone.await(5, TimeUnit.SECONDS); + flushThread.join(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ) + .get(); + + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + } + + public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { + // In this test, we fail the hand off during the primary relocation. This will undo the drainRefreshes and + // drainSync performed as part of relocation handoff (before performing the handoff transport action). + // We validate the same here by failing the peer recovery and ensuring we can index afterward as well. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + int docs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, docs); + flushAndRefresh(INDEX_NAME); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch handOffLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + handOffLatch.countDown(); + throw new OpenSearchException("failing recovery for test purposes"); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + handOffLatch.await(30, TimeUnit.SECONDS); + + assertTrue(oldPrimaryIndexShard.isStartedPrimary()); + assertEquals(oldPrimary, primaryNodeName(INDEX_NAME)); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + + SearchPhaseExecutionException ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + + int moreDocs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, moreDocs); + flushAndRefresh(INDEX_NAME); + int uncommittedOps = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, uncommittedOps); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs + moreDocs); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + restore(true, INDEX_NAME); + ensureGreen(INDEX_NAME); + assertHitCount( + client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + + String newNode = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, newPrimary, newNode)) + .execute() + .actionGet(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(10)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + assertHitCount( + client(newNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java index b7b3f1d14f422..d5cdc22a15478 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -8,10 +8,16 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationState; @@ -20,10 +26,12 @@ import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.SlowClusterStateProcessing; import java.nio.file.Path; import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; /** * This class runs tests with remote store + segRep while blocking file downloads @@ -111,6 +119,75 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { cleanupRepo(); } + public void testUpdateVisibleCheckpointWithLaggingClusterStateUpdates_primaryRelocation() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + Settings nodeSettings = Settings.builder().put(buildRemoteStoreNodeAttributes(location, 0d, "metadata", Long.MAX_VALUE)).build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNodes(2, nodeSettings); + final Settings indexSettings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String oldPrimary = getNode(dataNodeNames, true); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + logger.info("--> start another node"); + final String newPrimary = internalCluster().startDataOnlyNode(nodeSettings); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("4") + .get(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(replicaNode, random(), 0, 0, 1000, 2000); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + + // relocate the primary + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + IndexShard newPrimary_shard = getIndexShard(newPrimary, INDEX_NAME); + IndexShard replica = getIndexShard(replicaNode, INDEX_NAME); + assertBusy(() -> { + assertEquals( + newPrimary_shard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + disruption.stopDisrupting(); + disableRepoConsistencyCheck("Remote Store Creates System Repository"); + cleanupRepo(); + } + private String getNode(Set dataNodeNames, boolean primary) { assertEquals(2, dataNodeNames.size()); for (String name : dataNodeNames) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 8b375841c2913..6d0b074c3a660 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -501,10 +501,6 @@ public void testExplain() throws Exception { } public void testSimpleNestedSorting() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -604,10 +600,6 @@ public void testSimpleNestedSorting() throws Exception { } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.refresh_interval", -1)) .setMapping( @@ -740,10 +732,6 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { } public void testNestedSortWithMultiLevelFiltering() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( "{\n" @@ -986,10 +974,6 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) .setMapping( @@ -1079,10 +1063,6 @@ public void testLeakingSortValues() throws Exception { } public void testSortNestedWithNestedFilter() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1481,10 +1461,6 @@ public void testSortNestedWithNestedFilter() throws Exception { // Issue #9305 public void testNestedSortingWithNestedFilterAsFilter() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11187", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index c95b179c917e5..81e948640ee94 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -1863,10 +1863,6 @@ public void testSortMetaField() throws Exception { * Test case for issue 6150: https://github.com/elastic/elasticsearch/issues/6150 */ public void testNestedSort() throws IOException, InterruptedException, ExecutionException { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11258", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 4478a3432e519..9a92ddc81852a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -458,7 +458,9 @@ private void testUpdateIndexSettingsOnlyNotAllowedSettings(String index) { private void testUpdateIndexSettingsOnlyAllowedSettings(String index) { final UpdateSettingsRequestBuilder builder = client().admin().indices().prepareUpdateSettings(index); - builder.setSettings(Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s")); + builder.setSettings( + Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s", "index.number_of_replicas", 0) + ); AcknowledgedResponse settingsResponse = builder.execute().actionGet(); assertThat(settingsResponse, notNullValue()); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 874713b51d627..8293a5bb27612 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -58,6 +58,7 @@ import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -154,6 +155,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private RepositoriesStats repositoriesStats; + @Nullable + private AdmissionControlStats admissionControlStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -225,6 +229,12 @@ public NodeStats(StreamInput in) throws IOException { } else { repositoriesStats = null; } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + admissionControlStats = in.readOptionalWriteable(AdmissionControlStats::new); + } else { + admissionControlStats = null; + } } public NodeStats( @@ -254,7 +264,8 @@ public NodeStats( @Nullable TaskCancellationStats taskCancellationStats, @Nullable SearchPipelineStats searchPipelineStats, @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, - @Nullable RepositoriesStats repositoriesStats + @Nullable RepositoriesStats repositoriesStats, + @Nullable AdmissionControlStats admissionControlStats ) { super(node); this.timestamp = timestamp; @@ -283,6 +294,7 @@ public NodeStats( this.searchPipelineStats = searchPipelineStats; this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; this.repositoriesStats = repositoriesStats; + this.admissionControlStats = admissionControlStats; } public long getTimestamp() { @@ -435,6 +447,11 @@ public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } + @Nullable + public AdmissionControlStats getAdmissionControlStats() { + return admissionControlStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -487,6 +504,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(admissionControlStats); + } } @Override @@ -587,6 +608,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getRepositoriesStats() != null) { getRepositoriesStats().toXContent(builder, params); } + if (getAdmissionControlStats() != null) { + getAdmissionControlStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 22a667e7e8f6f..1af56f10b95ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -218,7 +218,8 @@ public enum Metric { SEARCH_PIPELINE("search_pipeline"), RESOURCE_USAGE_STATS("resource_usage_stats"), SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), - REPOSITORIES("repositories"); + REPOSITORIES("repositories"), + ADMISSION_CONTROL("admission_control"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 99cf42cfdc4d0..1df73d3b4394d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -127,7 +127,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), - NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), + NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 5efec8b876435..9c5dcc9e9de3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -171,6 +171,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 1c57dc27df8c6..14c985f1d3427 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -55,6 +55,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; import java.util.Set; import java.util.stream.Stream; @@ -77,7 +78,8 @@ public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAc "index.max_script_fields", "index.max_terms_count", "index.max_regex_length", - "index.highlight.max_analyzed_offset" + "index.highlight.max_analyzed_offset", + "index.number_of_replicas" ); private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog" }; @@ -145,10 +147,10 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste } } + final String[] requestIndexNames = Arrays.stream(requestIndices).map(Index::getName).toArray(String[]::new); return allowSearchableSnapshotSettingsUpdate ? null - : state.blocks() - .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + : state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, requestIndexNames); } @Override diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 268a6ed6f85b8..b8517f53ff294 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -98,6 +98,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -180,7 +181,8 @@ public TransportShardBulkAction( false, indexingPressureService, systemIndices, - tracer + tracer, + AdmissionControlActionType.INDEXING ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 056cb474eaf32..19ce0beb3c493 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -20,17 +20,17 @@ * @opensearch.internal */ @InternalApi -interface SearchRequestOperationsListener { +abstract class SearchRequestOperationsListener { - void onPhaseStart(SearchPhaseContext context); + abstract void onPhaseStart(SearchPhaseContext context); - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); + abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); - void onPhaseFailure(SearchPhaseContext context); + abstract void onPhaseFailure(SearchPhaseContext context); - default void onRequestStart(SearchRequestContext searchRequestContext) {} + void onRequestStart(SearchRequestContext searchRequestContext) {} - default void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} /** * Holder of Composite Listeners @@ -38,17 +38,17 @@ default void onRequestEnd(SearchPhaseContext context, SearchRequestContext searc * @opensearch.internal */ - final class CompositeListener implements SearchRequestOperationsListener { + static final class CompositeListener extends SearchRequestOperationsListener { private final List listeners; private final Logger logger; - public CompositeListener(List listeners, Logger logger) { + CompositeListener(List listeners, Logger logger) { this.listeners = listeners; this.logger = logger; } @Override - public void onPhaseStart(SearchPhaseContext context) { + void onPhaseStart(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseStart(context); @@ -59,7 +59,7 @@ public void onPhaseStart(SearchPhaseContext context) { } @Override - public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseEnd(context, searchRequestContext); @@ -70,7 +70,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) { + void onPhaseFailure(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseFailure(context); @@ -81,7 +81,7 @@ public void onPhaseFailure(SearchPhaseContext context) { } @Override - public void onRequestStart(SearchRequestContext searchRequestContext) { + void onRequestStart(SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onRequestStart(searchRequestContext); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index 6a0d60ffd3984..a55cfd463a78f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public final class SearchRequestSlowLog implements SearchRequestOperationsListener { +public final class SearchRequestSlowLog extends SearchRequestOperationsListener { private static final Charset UTF_8 = StandardCharsets.UTF_8; private long warnThreshold; @@ -134,19 +134,19 @@ public SearchRequestSlowLog(ClusterService clusterService) { } @Override - public void onPhaseStart(SearchPhaseContext context) {} + void onPhaseStart(SearchPhaseContext context) {} @Override - public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - public void onPhaseFailure(SearchPhaseContext context) {} + void onPhaseFailure(SearchPhaseContext context) {} @Override - public void onRequestStart(SearchRequestContext searchRequestContext) {} + void onRequestStart(SearchRequestContext searchRequestContext) {} @Override - public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 2813c41e043ee..262750849eaa9 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -23,7 +23,7 @@ * @opensearch.api */ @PublicApi(since = "2.11.0") -public final class SearchRequestStats implements SearchRequestOperationsListener { +public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); @Inject @@ -46,12 +46,12 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { } @Override - public void onPhaseStart(SearchPhaseContext context) { + void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); } @Override - public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); phaseStats.current.dec(); phaseStats.total.inc(); @@ -59,7 +59,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) { + void onPhaseFailure(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index a723937afd2ed..64c738f633f2e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -45,6 +45,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.dfs.DfsSearchResult; @@ -542,6 +543,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( DFS_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> searchService.executeDfsPhase( request, @@ -556,6 +560,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -575,6 +582,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, QuerySearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -633,6 +643,7 @@ public static void registerRequestHandler(TransportService transportService, Sea ThreadPool.Names.SAME, true, true, + AdmissionControlActionType.SEARCH, ShardFetchSearchRequest::new, (request, channel, task) -> { searchService.executeFetchPhase( diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 62886f7e9d981..05f4308df74fa 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -300,7 +300,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider implements SearchRequestOperationsListener { + static final class SearchTimeProvider extends SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; @@ -352,10 +352,10 @@ SearchResponse.PhaseTook getPhaseTook() { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); @Override - public void onPhaseStart(SearchPhaseContext context) {} + void onPhaseStart(SearchPhaseContext context) {} @Override - public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { phaseStatsMap.put( context.getCurrentPhase().getSearchPhaseName(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) @@ -363,7 +363,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) {} + void onPhaseFailure(SearchPhaseContext context) {} public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index ddebdc5530e70..95f998e2d89c2 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -82,6 +82,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; @@ -201,6 +202,40 @@ protected TransportReplicationAction( String executor, boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + requestReader, + replicaRequestReader, + executor, + syncGlobalCheckpointAfterOperation, + forceExecutionOnPrimary, + null + ); + } + + protected TransportReplicationAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader requestReader, + Writeable.Reader replicaRequestReader, + String executor, + boolean syncGlobalCheckpointAfterOperation, + boolean forceExecutionOnPrimary, + AdmissionControlActionType admissionControlActionType ) { super(actionName, actionFilters, transportService.getTaskManager()); this.threadPool = threadPool; @@ -219,14 +254,8 @@ protected TransportReplicationAction( transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest); - transportService.registerRequestHandler( - transportPrimaryAction, - executor, - forceExecutionOnPrimary, - true, - in -> new ConcreteShardRequest<>(requestReader, in), - this::handlePrimaryRequest - ); + // This method will register Primary Request Handler Based on AdmissionControlActionType + registerPrimaryRequestHandler(requestReader, admissionControlActionType); // we must never reject on because of thread pool capacity on replicas transportService.registerRequestHandler( @@ -247,6 +276,38 @@ protected TransportReplicationAction( clusterSettings.addSettingsUpdateConsumer(REPLICATION_RETRY_TIMEOUT, (v) -> retryTimeout = v); } + /** + * This method will register handler as based on admissionControlActionType and AdmissionControlHandler will be + * invoked for registered action + * @param requestReader instance of the request reader + * @param admissionControlActionType type of AdmissionControlActionType + */ + private void registerPrimaryRequestHandler( + Writeable.Reader requestReader, + AdmissionControlActionType admissionControlActionType + ) { + if (admissionControlActionType != null) { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + admissionControlActionType, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } else { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } + } + @Override protected void doExecute(Task task, Request request, ActionListener listener) { assert request.shardId() != null : "request shardId must be set"; diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 9ebfa8cfd0df8..27f9e6dee83de 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,7 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanBuilder; import org.opensearch.telemetry.tracing.SpanScope; @@ -104,7 +105,8 @@ protected TransportWriteAction( boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, SystemIndices systemIndices, - Tracer tracer + Tracer tracer, + AdmissionControlActionType admissionControlActionType ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -121,7 +123,8 @@ protected TransportWriteAction( replicaRequest, ThreadPool.Names.SAME, true, - forceExecutionOnPrimary + forceExecutionOnPrimary, + admissionControlActionType ); this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; @@ -129,6 +132,43 @@ protected TransportWriteAction( this.tracer = tracer; } + protected TransportWriteAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader request, + Writeable.Reader replicaRequest, + Function executorFunction, + boolean forceExecutionOnPrimary, + IndexingPressureService indexingPressureService, + SystemIndices systemIndices, + Tracer tracer + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + request, + replicaRequest, + executorFunction, + forceExecutionOnPrimary, + indexingPressureService, + systemIndices, + tracer, + null + ); + } + protected String executor(IndexShard shard) { return executorFunction.apply(shard); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 8d76a39712ee3..2704dccd46076 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1496,7 +1496,7 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { * @param requestSettings settings passed in during index create/update request * @param clusterSettings cluster setting */ - static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { + public static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false) { return; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 1093ac09777e7..5b03d3f7b19ce 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -93,6 +93,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -1529,6 +1530,9 @@ private void validate(String name, @Nullable Settings settings, List ind Optional.empty() ); validationErrors.addAll(indexSettingsValidation); + + // validate index refresh interval settings + validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); } if (indexPatterns.stream().anyMatch(Regex::isMatchAllPattern)) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 75448520a499c..45f64a5b29b04 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -65,7 +65,6 @@ public class LocalShardsBalancer extends ShardsBalancer { private final float threshold; private final Metadata metadata; - private final float avgShardsPerNode; private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; @@ -85,7 +84,6 @@ public LocalShardsBalancer( this.threshold = threshold; this.routingNodes = allocation.routingNodes(); this.metadata = allocation.metadata(); - avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); avgPrimaryShardsPerNode = (float) (StreamSupport.stream(metadata.spliterator(), false) .mapToInt(IndexMetadata::getNumberOfShards) .sum()) / routingNodes.size(); @@ -663,7 +661,6 @@ MoveDecision decideMove(final ShardRouting shardRouting) { RoutingNode targetNode = null; final List nodeExplanationMap = explain ? new ArrayList<>() : null; int weightRanking = 0; - int targetNodeProcessed = 0; for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { if (currentNode != sourceNode) { RoutingNode target = currentNode.getRoutingNode(); @@ -677,7 +674,6 @@ MoveDecision decideMove(final ShardRouting shardRouting) { continue; } } - targetNodeProcessed++; // don't use canRebalance as we want hard filtering rules to apply. See #17698 Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); if (explain) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index cbbcd61fc5295..8a14ce3f1a288 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -406,7 +406,7 @@ private void tryAllocateUnassignedShard(Queue nodeQueue, ShardRouti allocation.metadata(), allocation.routingTable() ); - ShardRouting initShard = routingNodes.initializeShard(shard, node.nodeId(), null, shardSize, allocation.changes()); + routingNodes.initializeShard(shard, node.nodeId(), null, shardSize, allocation.changes()); nodeQueue.offer(node); allocated = true; break; @@ -444,7 +444,6 @@ private void tryAllocateUnassignedShard(Queue nodeQueue, ShardRouti // Break out if all nodes in the queue have been checked for this shard if (nodeQueue.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { - throttled = true; break; } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java index 22c3156fb3537..1263efd19ac46 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -257,7 +257,7 @@ public Decision canAllocateAnyShardToNode(RoutingNode node, RoutingAllocation al Decision.Multi ret = new Decision.Multi(); for (AllocationDecider decider : allocations) { Decision decision = decider.canAllocateAnyShardToNode(node, allocation); - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (logger.isTraceEnabled()) { logger.trace("Shard can not be allocated on node [{}] due to [{}]", node.nodeId(), decider.getClass().getSimpleName()); } @@ -279,7 +279,7 @@ public Decision canMoveAway(ShardRouting shardRouting, RoutingAllocation allocat for (AllocationDecider decider : allocations) { Decision decision = decider.canMoveAway(shardRouting, allocation); // short track if a NO is returned. - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (logger.isTraceEnabled()) { logger.trace("Shard [{}] can not be moved away due to [{}]", shardRouting, decider.getClass().getSimpleName()); } @@ -301,7 +301,7 @@ public Decision canMoveAnyShard(RoutingAllocation allocation) { for (AllocationDecider decider : allocations) { Decision decision = decider.canMoveAnyShard(allocation); // short track if a NO is returned. - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (allocation.debugDecision() == false) { return decision; } else { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java index ac5a18c3fcb21..938c457606c79 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java @@ -144,7 +144,7 @@ public boolean higherThan(Type other) { return false; } - public boolean canPremptivelyReturn() { + public boolean canPreemptivelyReturn() { return this == THROTTLE || this == NO; } diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index 5a740b8527704..6f5f1e4328758 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -54,6 +54,7 @@ import java.time.OffsetDateTime; import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.format.TextStyle; import java.time.temporal.ChronoField; import java.time.temporal.ChronoUnit; import java.time.temporal.IsoFields; @@ -64,6 +65,7 @@ import java.util.List; import java.util.Locale; import java.util.Objects; +import java.util.OptionalLong; import java.util.concurrent.TimeUnit; /** @@ -179,7 +181,7 @@ long roundFloor(long utcMillis) { return DateUtils.roundFloor(utcMillis, ratio); } - long extraLocalOffsetLookup() { + public long extraLocalOffsetLookup() { return ratio; } }; @@ -1368,4 +1370,40 @@ public static Rounding read(StreamInput in) throws IOException { throw new OpenSearchException("unknown rounding id [" + id + "]"); } } + + /** + * Extracts the interval value from the {@link Rounding} instance + * @param rounding {@link Rounding} instance + * @return the interval value from the {@link Rounding} instance or {@code OptionalLong.empty()} + * if the interval is not available + */ + public static OptionalLong getInterval(Rounding rounding) { + long interval = 0; + + if (rounding instanceof TimeUnitRounding) { + interval = (((TimeUnitRounding) rounding).unit).extraLocalOffsetLookup(); + if (!isUTCTimeZone(((TimeUnitRounding) rounding).timeZone)) { + // Fast filter aggregation cannot be used if it needs time zone rounding + return OptionalLong.empty(); + } + } else if (rounding instanceof TimeIntervalRounding) { + interval = ((TimeIntervalRounding) rounding).interval; + if (!isUTCTimeZone(((TimeIntervalRounding) rounding).timeZone)) { + // Fast filter aggregation cannot be used if it needs time zone rounding + return OptionalLong.empty(); + } + } else { + return OptionalLong.empty(); + } + + return OptionalLong.of(interval); + } + + /** + * Helper function for checking if the time zone requested for date histogram + * aggregation is utc or not + */ + private static boolean isUTCTimeZone(final ZoneId zoneId) { + return "Z".equals(zoneId.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); + } } diff --git a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java index 6eb613daf5133..bb273b14c42e2 100644 --- a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java +++ b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.BitUtil; import java.io.EOFException; import java.io.IOException; @@ -121,47 +122,25 @@ public void readBytes(final byte[] b, final int offset, int len) throws IOExcept @Override public byte readByte(long pos) throws IOException { validatePos(pos, Byte.BYTES); - return internalReadByte(pos); + return bytes[offset + (int) pos]; } @Override public short readShort(long pos) throws IOException { validatePos(pos, Short.BYTES); - return internalReadShort(pos); + return (short) BitUtil.VH_LE_SHORT.get(bytes, offset + (int) pos); } @Override public int readInt(long pos) throws IOException { validatePos(pos, Integer.BYTES); - return internalReadInt(pos); + return (int) BitUtil.VH_LE_INT.get(bytes, offset + (int) pos); } @Override public long readLong(long pos) throws IOException { validatePos(pos, Long.BYTES); - return internalReadLong(pos); - } - - private byte internalReadByte(long pos) { - return bytes[offset + (int) pos]; - } - - private short internalReadShort(long pos) { - final byte p1 = internalReadByte(pos); - final byte p2 = internalReadByte(pos + 1); - return (short) (((p2 & 0xFF) << 8) | (p1 & 0xFF)); - } - - private int internalReadInt(long pos) { - final short p1 = internalReadShort(pos); - final short p2 = internalReadShort(pos + Short.BYTES); - return ((p2 & 0xFFFF) << 16) | (p1 & 0xFFFF); - } - - public long internalReadLong(long pos) { - final int p1 = internalReadInt(pos); - final int p2 = internalReadInt(pos + Integer.BYTES); - return (((long) p2) << 32) | (p1 & 0xFFFFFFFFL); + return (long) BitUtil.VH_LE_LONG.get(bytes, offset + (int) pos); } private void validatePos(long pos, int len) throws EOFException { diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 821d48fccf48c..2edf3967c61b0 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -55,6 +55,7 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.Tracer; @@ -153,9 +154,6 @@ public NetworkModule( List transportInterceptors ) { this.settings = settings; - if (transportInterceptors != null) { - transportInterceptors.forEach(this::registerTransportInterceptor); - } for (NetworkPlugin plugin : plugins) { Map> httpTransportFactory = plugin.getHttpTransports( settings, @@ -192,6 +190,10 @@ public NetworkModule( registerTransportInterceptor(interceptor); } } + // Adding last because interceptors are triggered from last to first order from the list + if (transportInterceptors != null) { + transportInterceptors.forEach(this::registerTransportInterceptor); + } } /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ @@ -299,6 +301,30 @@ public TransportRequestHandler interceptHandler( return actualHandler; } + /** + * Intercept the transport action and perform admission control if applicable + * @param action The action the request handler is associated with + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param actualHandler The handler itself that implements the request handling + * @param admissionControlActionType Admission control based on resource usage limits of provided action type + * @return returns the actual TransportRequestHandler after intercepting all previous handlers + * @param + */ + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler, + AdmissionControlActionType admissionControlActionType + ) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler, admissionControlActionType); + } + return actualHandler; + } + @Override public AsyncSender interceptSender(AsyncSender sender) { for (TransportInterceptor interceptor : this.transportInterceptors) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index a817e63328110..ab0ea89f4734d 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -136,7 +136,7 @@ import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; -import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; @@ -292,6 +292,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -698,9 +699,9 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, - CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, - CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT ) ) ); diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index dbc8a1aead65c..c945d082c9a35 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -1321,6 +1321,7 @@ public void failEngine(String reason, @Nullable Exception failure) { // clean all unreferenced files on best effort basis created during failed merge and reset the // shard state back to last Lucene Commit. if (shouldCleanupUnreferencedFiles() && isMergeFailureDueToIOException(failure, reason)) { + logger.info("Cleaning up unreferenced files as merge failed due to: {}", reason); cleanUpUnreferencedFiles(); } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index af65e993fcf26..bf3e10d684c94 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -108,7 +108,7 @@ public final class EngineConfig { private final LongSupplier globalCheckpointSupplier; private final Supplier retentionLeasesSupplier; private final boolean isReadOnlyReplica; - private final BooleanSupplier primaryModeSupplier; + private final BooleanSupplier startedPrimarySupplier; private final Comparator leafSorter; /** @@ -287,7 +287,7 @@ private EngineConfig(Builder builder) { this.primaryTermSupplier = builder.primaryTermSupplier; this.tombstoneDocSupplier = builder.tombstoneDocSupplier; this.isReadOnlyReplica = builder.isReadOnlyReplica; - this.primaryModeSupplier = builder.primaryModeSupplier; + this.startedPrimarySupplier = builder.startedPrimarySupplier; this.translogFactory = builder.translogFactory; this.leafSorter = builder.leafSorter; } @@ -495,11 +495,11 @@ public boolean isReadOnlyReplica() { } /** - * Returns the underlying primaryModeSupplier. + * Returns the underlying startedPrimarySupplier. * @return the primary mode supplier. */ - public BooleanSupplier getPrimaryModeSupplier() { - return primaryModeSupplier; + public BooleanSupplier getStartedPrimarySupplier() { + return startedPrimarySupplier; } /** @@ -577,7 +577,7 @@ public static class Builder { private TombstoneDocSupplier tombstoneDocSupplier; private TranslogDeletionPolicyFactory translogDeletionPolicyFactory; private boolean isReadOnlyReplica; - private BooleanSupplier primaryModeSupplier; + private BooleanSupplier startedPrimarySupplier; private TranslogFactory translogFactory = new InternalTranslogFactory(); Comparator leafSorter; @@ -701,8 +701,8 @@ public Builder readOnlyReplica(boolean isReadOnlyReplica) { return this; } - public Builder primaryModeSupplier(BooleanSupplier primaryModeSupplier) { - this.primaryModeSupplier = primaryModeSupplier; + public Builder startedPrimarySupplier(BooleanSupplier startedPrimarySupplier) { + this.startedPrimarySupplier = startedPrimarySupplier; return this; } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index 38eea92b6c757..77e2f1c55201d 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -152,7 +152,7 @@ public EngineConfig newEngineConfig( LongSupplier primaryTermSupplier, EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, boolean isReadOnlyReplica, - BooleanSupplier primaryModeSupplier, + BooleanSupplier startedPrimarySupplier, TranslogFactory translogFactory, Comparator leafSorter ) { @@ -185,7 +185,7 @@ public EngineConfig newEngineConfig( .primaryTermSupplier(primaryTermSupplier) .tombstoneDocSupplier(tombstoneDocSupplier) .readOnlyReplica(isReadOnlyReplica) - .primaryModeSupplier(primaryModeSupplier) + .startedPrimarySupplier(startedPrimarySupplier) .translogFactory(translogFactory) .leafSorter(leafSorter) .build(); diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 8e1627af274c5..e204656d3f106 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -292,7 +292,7 @@ public void onFailure(String reason, Exception ex) { new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), this::ensureOpen, engineConfig.getTranslogFactory(), - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); @@ -1412,7 +1412,9 @@ private Exception tryAcquireInFlightDocs(Operation operation, int addingDocs) { final long totalDocs = indexWriter.getPendingNumDocs() + inFlightDocCount.addAndGet(addingDocs); if (totalDocs > maxDocs) { releaseInFlightDocs(addingDocs); - return new IllegalArgumentException("Number of documents in the index can't exceed [" + maxDocs + "]"); + return new IllegalArgumentException( + "Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs + "] documents per shard" + ); } else { return null; } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 3b55c4e648f1c..ed8dba2f8902d 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -125,7 +125,7 @@ public void onAfterTranslogSync() { }, this, engineConfig.getTranslogFactory(), - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ); this.translogManager = translogManagerRef; success = true; diff --git a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java index ca31d5518df47..9071b0e7a1eb3 100644 --- a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java @@ -203,7 +203,7 @@ public void trimUnreferencedTranslogFiles() throws TranslogException { engineConfig.getGlobalCheckpointSupplier(), engineConfig.getPrimaryTermSupplier(), seqNo -> {}, - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ) ) { translog.trimUnreferencedReaders(); diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 80cef214a08cd..7ff3145055df8 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -278,7 +278,7 @@ private static TranslogStats translogStats(final EngineConfig config, final Segm config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), seqNo -> {}, - config.getPrimaryModeSupplier() + config.getStartedPrimarySupplier() ) ) { return translog.stats(); diff --git a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java index 22eeed205e2c8..3c7925809415a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java @@ -193,7 +193,7 @@ protected Boolean parseSourceValue(Object value) { return (Boolean) value; } else { String textValue = value.toString(); - return Booleans.parseBoolean(textValue.toCharArray(), 0, textValue.length(), false); + return Booleans.parseBooleanStrict(textValue, false); } } }; diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 3b832628695fe..d98e6ea6af83d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -409,6 +409,16 @@ public long parse(String value) { return resolution.convert(DateFormatters.from(dateTimeFormatter().parse(value), dateTimeFormatter().locale()).toInstant()); } + public long convertNanosToMillis(long nanoSecondsSinceEpoch) { + if (resolution.numericType.equals(NumericType.DATE_NANOSECONDS)) return DateUtils.toMilliSeconds(nanoSecondsSinceEpoch); + return nanoSecondsSinceEpoch; + } + + public long convertRoundedMillisToNanos(long milliSecondsSinceEpoch) { + if (resolution.numericType.equals(NumericType.DATE_NANOSECONDS)) return DateUtils.toNanoSeconds(milliSecondsSinceEpoch); + return milliSecondsSinceEpoch; + } + @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { DateFormatter defaultFormatter = dateTimeFormatter(); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 3113428ec60ef..7b9c1d3aa548f 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1322,8 +1322,10 @@ private SegmentReplicationShardStats buildShardStats(final String allocationId, allocationId, cps.checkpointTimers.size(), bytesBehind, - cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::time).max().orElse(0), - cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::totalElapsedTime).max().orElse(0), + bytesBehind > 0L ? cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::time).max().orElse(0) : 0, + bytesBehind > 0L + ? cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::totalElapsedTime).max().orElse(0) + : 0, cps.lastCompletedReplicationLag ); } diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 85d744e58265f..675d60ec2b63d 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -21,7 +21,7 @@ * * @opensearch.internal */ -public class CheckpointRefreshListener extends CloseableRetryableRefreshListener { +public class CheckpointRefreshListener extends ReleasableRetryableRefreshListener { protected static Logger logger = LogManager.getLogger(CheckpointRefreshListener.class); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f1873ac659400..cbb246219546b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -345,6 +345,7 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; + private final RecoverySettings recoverySettings; public IndexShard( final ShardRouting shardRouting, @@ -469,6 +470,7 @@ public boolean shouldCache(Query query) { ? false : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.recoverySettings = recoverySettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); } @@ -567,6 +569,10 @@ public String getNodeId() { return translogConfig.getNodeId(); } + public RecoverySettings getRecoverySettings() { + return recoverySettings; + } + public RemoteStoreFileDownloader getFileDownloader() { return fileDownloader; } @@ -845,6 +851,9 @@ public void relocated( final Runnable performSegRep ) throws IllegalIndexShardStateException, IllegalStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; + // The below list of releasable ensures that if the relocation does not happen, we undo the activity of close and + // acquire all permits. This will ensure that the remote store uploads can still be done by the existing primary shard. + List releasablesOnHandoffFailures = new ArrayList<>(2); try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { forceRefreshes.close(); @@ -857,11 +866,15 @@ public void relocated( maybeSync(); } - // Ensures all in-flight remote store operations drain, before we perform the handoff. - internalRefreshListener.stream() - .filter(refreshListener -> refreshListener instanceof Closeable) - .map(refreshListener -> (Closeable) refreshListener) - .close(); + // Ensures all in-flight remote store refreshes drain, before we perform the performSegRep. + for (ReferenceManager.RefreshListener refreshListener : internalRefreshListener) { + if (refreshListener instanceof ReleasableRetryableRefreshListener) { + releasablesOnHandoffFailures.add(((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes()); + } + } + + // Ensure all in-flight remote store translog upload drains, before we perform the performSegRep. + releasablesOnHandoffFailures.add(getEngine().translogManager().drainSync()); // no shard operation permits are being held here, move state from started to relocated assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED @@ -896,6 +909,13 @@ public void relocated( // Fail primary relocation source and target shards. failShard("timed out waiting for relocation hand-off to complete", null); throw new IndexShardClosedException(shardId(), "timed out waiting for relocation hand-off to complete"); + } catch (Exception ex) { + assert replicationTracker.isPrimaryMode(); + // If the primary mode is still true after the end of handoff attempt, it basically means that the relocation + // failed. The existing primary will continue to be the primary, so we need to allow the segments and translog + // upload to resume. + Releasables.close(releasablesOnHandoffFailures); + throw ex; } } @@ -1744,8 +1764,8 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp if (isSegmentReplicationAllowed() == false) { return false; } - ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); - if (localCheckpoint.isAheadOf(requestCheckpoint)) { + final ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); + if (requestCheckpoint.isAheadOf(localCheckpoint) == false) { logger.trace( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - Shard is already on checkpoint {} that is ahead of {}", @@ -1755,12 +1775,6 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp ); return false; } - if (localCheckpoint.equals(requestCheckpoint)) { - logger.trace( - () -> new ParameterizedMessage("Ignoring new replication checkpoint - Shard is already on checkpoint {}", requestCheckpoint) - ); - return false; - } return true; } @@ -3865,10 +3879,10 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro circuitBreakerService, globalCheckpointSupplier, replicationTracker::getRetentionLeases, - () -> getOperationPrimaryTerm(), + this::getOperationPrimaryTerm, tombstoneDocSupplier(), isReadOnlyReplica, - replicationTracker::isPrimaryMode, + this::isStartedPrimary, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for // timeseries @@ -3883,6 +3897,15 @@ public boolean isRemoteTranslogEnabled() { return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); } + /** + * This checks if we are in state to upload to remote store. Until the cluster-manager informs the shard through + * cluster state, the shard will not be in STARTED state. This method is used to prevent pre-emptive segment or + * translog uploads. + */ + public boolean isStartedPrimary() { + return getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED; + } + /** * @return true if segment reverse search optimization is enabled for time series based workload. */ diff --git a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java similarity index 80% rename from server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java rename to server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java index 3ee74e5267718..757275932c5f1 100644 --- a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java @@ -10,10 +10,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ReferenceManager; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; -import java.io.Closeable; import java.io.IOException; import java.util.Objects; import java.util.concurrent.Semaphore; @@ -22,11 +23,11 @@ import java.util.concurrent.atomic.AtomicBoolean; /** - * RefreshListener that runs afterRefresh method if and only if there is a permit available. Once the listener - * is closed, all the permits are acquired and there are no available permits to afterRefresh. This abstract class provides + * RefreshListener that runs afterRefresh method if and only if there is a permit available. Once the {@code drainRefreshes()} + * is called, all the permits are acquired and there are no available permits to afterRefresh. This abstract class provides * necessary abstract methods to schedule retry. */ -public abstract class CloseableRetryableRefreshListener implements ReferenceManager.RefreshListener, Closeable { +public abstract class ReleasableRetryableRefreshListener implements ReferenceManager.RefreshListener { /** * Total permits = 1 ensures that there is only single instance of runAfterRefreshWithPermit that is running at a time. @@ -34,6 +35,8 @@ public abstract class CloseableRetryableRefreshListener implements ReferenceMana */ private static final int TOTAL_PERMITS = 1; + private static final TimeValue DRAIN_TIMEOUT = TimeValue.timeValueMinutes(10); + private final AtomicBoolean closed = new AtomicBoolean(false); private final Semaphore semaphore = new Semaphore(TOTAL_PERMITS); @@ -45,11 +48,11 @@ public abstract class CloseableRetryableRefreshListener implements ReferenceMana */ private final AtomicBoolean retryScheduled = new AtomicBoolean(false); - public CloseableRetryableRefreshListener() { + public ReleasableRetryableRefreshListener() { this.threadPool = null; } - public CloseableRetryableRefreshListener(ThreadPool threadPool) { + public ReleasableRetryableRefreshListener(ThreadPool threadPool) { assert Objects.nonNull(threadPool); this.threadPool = threadPool; } @@ -184,23 +187,38 @@ private void scheduleRetry(boolean afterRefreshSuccessful, boolean didRefresh) { */ protected abstract boolean performAfterRefreshWithPermit(boolean didRefresh); - @Override - public final void close() throws IOException { + public final Releasable drainRefreshes() { try { - if (semaphore.tryAcquire(TOTAL_PERMITS, 10, TimeUnit.MINUTES)) { + TimeValue timeout = getDrainTimeout(); + if (semaphore.tryAcquire(TOTAL_PERMITS, timeout.seconds(), TimeUnit.SECONDS)) { boolean result = closed.compareAndSet(false, true); assert result && semaphore.availablePermits() == 0; getLogger().info("All permits are acquired and refresh listener is closed"); + return Releasables.releaseOnce(() -> { + semaphore.release(TOTAL_PERMITS); + boolean wasClosed = closed.getAndSet(false); + assert semaphore.availablePermits() == TOTAL_PERMITS : "Available permits is " + semaphore.availablePermits(); + assert wasClosed : "RefreshListener is not closed before reopening it"; + getLogger().info("All permits are released and refresh listener is open"); + }); } else { - throw new TimeoutException("timeout while closing gated refresh listener"); + throw new TimeoutException("Timeout while acquiring all permits"); } } catch (InterruptedException | TimeoutException e) { - throw new RuntimeException("Failed to close the closeable retryable listener", e); + throw new RuntimeException("Failed to acquire all permits", e); } } protected abstract Logger getLogger(); + // Made available for unit testing purpose only + /** + * Returns the timeout which is used while draining refreshes. + */ + TimeValue getDrainTimeout() { + return DRAIN_TIMEOUT; + } + // Visible for testing /** * Returns if the retry is scheduled or not. @@ -210,4 +228,14 @@ public final void close() throws IOException { boolean getRetryScheduledStatus() { return retryScheduled.get(); } + + // Visible for testing + int availablePermits() { + return semaphore.availablePermits(); + } + + // Visible for testing + boolean isClosed() { + return closed.get(); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index dd40327298874..d96a7e7c95ecf 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshListener { +public final class RemoteStoreRefreshListener extends ReleasableRetryableRefreshListener { private final Logger logger; @@ -79,8 +79,6 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL ); public static final Set EXCLUDE_FILES = Set.of("write.lock"); - // Visible for testing - public static final int LAST_N_METADATA_FILES_TO_KEEP = 10; private final IndexShard indexShard; private final Directory storeDirectory; @@ -205,7 +203,7 @@ private boolean syncSegments() { // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(LAST_N_METADATA_FILES_TO_KEEP); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { @@ -486,11 +484,10 @@ private void initializeRemoteDirectoryOnTermUpdate() throws IOException { /** * This checks for readiness of the index shard and primary mode. This has separated from shouldSync since we use the * returned value of this method for scheduling retries in syncSegments method. - * @return true iff primaryMode is true and index shard is not in closed state. + * @return true iff the shard is a started with primary mode true or it is local or snapshot recovery. */ private boolean isReadyForUpload() { - boolean isReady = (indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED) - || isLocalOrSnapshotRecovery(); + boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery(); if (isReady == false) { StringBuilder sb = new StringBuilder("Skipped syncing segments with"); @@ -503,11 +500,11 @@ private boolean isReadyForUpload() { if (indexShard.getEngineOrNull() != null) { sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); } - if (isLocalOrSnapshotRecovery() == false) { + if (indexShard.recoveryState() != null) { sb.append(" recoverySourceType=").append(indexShard.recoveryState().getRecoverySource().getType()); sb.append(" primary=").append(indexShard.shardRouting.primary()); } - logger.trace(sb.toString()); + logger.info(sb.toString()); } return isReady; } @@ -516,8 +513,8 @@ private boolean isLocalOrSnapshotRecovery() { // In this case when the primary mode is false, we need to upload segments to Remote Store // This is required in case of snapshots/shrink/ split/clone where we need to durable persist // all segments to remote before completing the recovery to ensure durability. - return (indexShard.state() == IndexShardState.RECOVERING && indexShard.shardRouting.primary()) + && indexShard.recoveryState() != null && (indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS || indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index c7d0d5713925f..7b57fabdf1486 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -727,6 +727,12 @@ public Map getSegmentsUploadedToRemoteStore() { * @throws IOException in case of I/O error while reading from / writing to remote segment store */ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + if (lastNMetadataFilesToKeep == -1) { + logger.info( + "Stale segment deletion is disabled if cluster.remote_store.index.segment_metadata.retention.max_count is set to -1" + ); + return; + } List sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, Integer.MAX_VALUE diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java index 8c9ccc3b487df..f75f27b7bcb91 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java @@ -33,6 +33,7 @@ package org.opensearch.index.translog; import org.apache.lucene.store.BufferedChecksum; +import org.apache.lucene.util.BitUtil; import org.opensearch.core.common.io.stream.FilterStreamInput; import org.opensearch.core.common.io.stream.StreamInput; @@ -92,22 +93,21 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { public short readShort() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 2); - return (short) (((buf[0] & 0xFF) << 8) | (buf[1] & 0xFF)); + return (short) BitUtil.VH_BE_SHORT.get(buf, 0); } @Override public int readInt() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 4); - return ((buf[0] & 0xFF) << 24) | ((buf[1] & 0xFF) << 16) | ((buf[2] & 0xFF) << 8) | (buf[3] & 0xFF); + return (int) BitUtil.VH_BE_INT.get(buf, 0); } @Override public long readLong() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 8); - return (((long) (((buf[0] & 0xFF) << 24) | ((buf[1] & 0xFF) << 16) | ((buf[2] & 0xFF) << 8) | (buf[3] & 0xFF))) << 32) | ((((buf[4] - & 0xFF) << 24) | ((buf[5] & 0xFF) << 16) | ((buf[6] & 0xFF) << 8) | (buf[7] & 0xFF)) & 0xFFFFFFFFL); + return (long) BitUtil.VH_BE_LONG.get(buf, 0); } @Override diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java index d7be1250c0b5b..415d7dc4d1a9d 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -28,7 +28,7 @@ public Translog newTranslog( LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier, LongConsumer persistedSequenceNumberConsumer, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { return new LocalTranslog( diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 4d0fc13d433c6..a22c538286a88 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; @@ -58,7 +59,7 @@ public InternalTranslogManager( TranslogEventListener translogEventListener, LifecycleAware engineLifeCycleAware, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { this.shardId = shardId; this.readLock = readLock; @@ -71,7 +72,7 @@ public InternalTranslogManager( if (tracker != null) { tracker.markSeqNoAsPersisted(seqNo); } - }, translogUUID, translogFactory, primaryModeSupplier); + }, translogUUID, translogFactory, startedPrimarySupplier); assert translog.getGeneration() != null; this.translog = translog; assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; @@ -301,10 +302,16 @@ public void setMinSeqNoToKeep(long seqNo) { translog.setMinSeqNoToKeep(seqNo); } + @Override public void onDelete() { translog.onDelete(); } + @Override + public Releasable drainSync() { + return translog.drainSync(); + } + @Override public Translog.TranslogGeneration getTranslogGeneration() { return translog.getGeneration(); @@ -362,7 +369,7 @@ protected Translog openTranslog( LongConsumer persistedSequenceNumberConsumer, String translogUUID, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { return translogFactory.newTranslog( translogConfig, @@ -371,7 +378,7 @@ protected Translog openTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - primaryModeSupplier + startedPrimarySupplier ); } diff --git a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java index 22dba3973cfc1..7664631e0ed07 100644 --- a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; @@ -140,6 +141,11 @@ public TranslogStats stats() { } } + @Override + Releasable drainSync() { + return () -> {}; // noop + } + @Override public void close() throws IOException { assert Translog.calledFromOutsideOrViaTragedyClose() diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index 3e6a8e69edfbb..b4aa7865570a6 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.core.index.shard.ShardId; @@ -121,8 +122,14 @@ public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolea throw new UnsupportedOperationException("Translog snapshot unsupported with no-op translogs"); } + @Override public void onDelete() {} + @Override + public Releasable drainSync() { + return () -> {}; + } + @Override public Translog.TranslogGeneration getTranslogGeneration() { return null; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 1e2cb388e690e..e100ffaabf13d 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -59,7 +59,7 @@ public Translog newTranslog( LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier, LongConsumer persistedSequenceNumberConsumer, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -73,7 +73,7 @@ public Translog newTranslog( persistedSequenceNumberConsumer, blobStoreRepository, threadPool, - primaryModeSupplier, + startedPrimarySupplier, remoteTranslogTransferTracker ); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 8fb420e8fa1da..7b969a37e4aa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -9,8 +9,10 @@ package org.opensearch.index.translog; import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; @@ -38,6 +40,9 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -53,10 +58,9 @@ public class RemoteFsTranslog extends Translog { private final Logger logger; - private final BlobStoreRepository blobStoreRepository; private final TranslogTransferManager translogTransferManager; private final FileTransferTracker fileTransferTracker; - private final BooleanSupplier primaryModeSupplier; + private final BooleanSupplier startedPrimarySupplier; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private volatile long maxRemoteTranslogGenerationUploaded; @@ -75,6 +79,11 @@ public class RemoteFsTranslog extends Translog { // Semaphore used to allow only single remote generation to happen at a time private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); + // These permits exist to allow any inflight background triggered upload. + private static final int SYNC_PERMIT = 1; + private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); + private final AtomicBoolean pauseSync = new AtomicBoolean(false); + public RemoteFsTranslog( TranslogConfig config, String translogUUID, @@ -84,13 +93,12 @@ public RemoteFsTranslog( LongConsumer persistedSequenceNumberConsumer, BlobStoreRepository blobStoreRepository, ThreadPool threadPool, - BooleanSupplier primaryModeSupplier, + BooleanSupplier startedPrimarySupplier, RemoteTranslogTransferTracker remoteTranslogTransferTracker ) throws IOException { super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); logger = Loggers.getLogger(getClass(), shardId); - this.blobStoreRepository = blobStoreRepository; - this.primaryModeSupplier = primaryModeSupplier; + this.startedPrimarySupplier = startedPrimarySupplier; this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); this.translogTransferManager = buildTranslogTransferManager( @@ -267,6 +275,16 @@ public void rollGeneration() throws IOException { } private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + // During primary relocation, both the old and new primary have engine created with RemoteFsTranslog and having + // ReplicationTracker.primaryMode() as true. However, before we perform the `internal:index/shard/replication/segments_sync` + // action which re-downloads the segments and translog on the new primary. We are ensuring 2 things here - + // 1. Using startedPrimarySupplier, we prevent the new primary to do pre-emptive syncs + // 2. Using syncPermits, we prevent syncs at the desired time during primary relocation. + if (startedPrimarySupplier.getAsBoolean() == false || syncPermit.tryAcquire(SYNC_PERMIT) == false) { + logger.debug("skipped uploading translog for {} {} syncPermits={}", primaryTerm, generation, syncPermit.availablePermits()); + // NO-OP + return false; + } long maxSeqNo = -1; try (Releasable ignored = writeLock.acquire()) { if (generation == null || generation == current.getGeneration()) { @@ -316,16 +334,6 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { - // During primary relocation (primary-primary peer recovery), both the old and the new primary have engine - // created with the RemoteFsTranslog. Both primaries are equipped to upload the translogs. The primary mode check - // below ensures that the real primary only is uploading. Before the primary mode is set as true for the new - // primary, the engine is reset to InternalEngine which also initialises the RemoteFsTranslog which in turns - // downloads all the translogs from remote store and does a flush before the relocation finishes. - if (primaryModeSupplier.getAsBoolean() == false) { - logger.debug("skipped uploading translog for {} {}", primaryTerm, generation); - // NO-OP - return true; - } logger.trace("uploading translog for {} {}", primaryTerm, generation); try ( TranslogCheckpointTransferSnapshot transferSnapshotProvider = new TranslogCheckpointTransferSnapshot.Builder( @@ -341,6 +349,8 @@ private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws transferSnapshotProvider, new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo) ); + } finally { + syncPermit.release(SYNC_PERMIT); } } @@ -423,11 +433,39 @@ protected void setMinSeqNoToKeep(long seqNo) { this.minSeqNoToKeep = seqNo; } + @Override + protected Releasable drainSync() { + try { + if (syncPermit.tryAcquire(SYNC_PERMIT, 1, TimeUnit.MINUTES)) { + boolean result = pauseSync.compareAndSet(false, true); + assert result && syncPermit.availablePermits() == 0; + logger.info("All inflight remote translog syncs finished and further syncs paused"); + return Releasables.releaseOnce(() -> { + syncPermit.release(SYNC_PERMIT); + boolean wasSyncPaused = pauseSync.getAndSet(false); + assert syncPermit.availablePermits() == SYNC_PERMIT : "Available permits is " + syncPermit.availablePermits(); + assert wasSyncPaused : "RemoteFsTranslog sync was not paused before re-enabling it"; + logger.info("Resumed remote translog sync back on relocation failure"); + }); + } else { + throw new TimeoutException("Timeout while acquiring all permits"); + } + } catch (TimeoutException | InterruptedException e) { + throw new RuntimeException("Failed to acquire all permits", e); + } + } + @Override public void trimUnreferencedReaders() throws IOException { // clean up local translog files and updates readers super.trimUnreferencedReaders(); + // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote + // store. + if (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get()) { + return; + } + // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. // Remote generations involves 2 async operations - 1) Delete translog generation files 2) Delete metadata files // We try to acquire 2 permits and if we can not, we return from here itself. @@ -505,11 +543,7 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th } protected void onDelete() { - if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped delete translog"); - // NO-OP - return; - } + ClusterService.assertClusterOrClusterManagerStateThread(); // clean up all remote translog files translogTransferManager.delete(); } @@ -570,4 +604,9 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) thro public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { return minSeqNoToKeep; } + + // Visible for testing + int availablePermits() { + return syncPermit.availablePermits(); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 8b4662238ed25..9f877e87415dd 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -1817,6 +1817,11 @@ protected void setMinSeqNoToKeep(long seqNo) {} protected void onDelete() {} + /** + * Drains ongoing syncs to the underlying store. It returns a releasable which can be closed to resume the syncs back. + */ + abstract Releasable drainSync(); + /** * deletes all files associated with a reader. package-private to be able to simulate node failures at this point */ diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java index a139b10f563b2..4300435093b5d 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java @@ -32,6 +32,6 @@ Translog newTranslog( final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, final LongConsumer persistedSequenceNumberConsumer, - final BooleanSupplier primaryModeSupplier + final BooleanSupplier startedPrimarySupplier ) throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 4279e0289c1dc..e1a0b7d1c1293 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lease.Releasable; import java.io.IOException; import java.util.stream.Stream; @@ -135,5 +136,10 @@ public interface TranslogManager { */ void onDelete(); + /** + * Drains ongoing syncs to the underlying store. It returns a releasable which can be closed to resume the syncs back. + */ + Releasable drainSync(); + Translog.TranslogGeneration getTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java index adeeb213b2913..a7a524ad78e95 100644 --- a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java @@ -38,7 +38,7 @@ public WriteOnlyTranslogManager( TranslogEventListener translogEventListener, LifecycleAware engineLifecycleAware, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { super( translogConfig, @@ -52,7 +52,7 @@ public WriteOnlyTranslogManager( translogEventListener, engineLifecycleAware, translogFactory, - primaryModeSupplier + startedPrimarySupplier ); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 6bfcb8d9fa3d0..4232d32987e86 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -247,7 +247,18 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); + // ToDo: This is a temporary mitigation to not fail the peer recovery flow in case there is + // an exception while downloading segments from remote store. For remote backed indexes, we + // plan to revamp this flow so that node-node segment copy will not happen. + // GitHub Issue to track the revamp: https://github.com/opensearch-project/OpenSearch/issues/11331 + try { + indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); + } catch (Exception e) { + logger.error( + "Exception while downloading segment files from remote store, will continue with peer to peer segment copy", + e + ); + } } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index d91bfc19ee833..5351ae7fe08dd 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -157,6 +157,25 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + // choose 512KB-16B to ensure that the resulting byte[] is not a humongous allocation in G1. public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512 * 1024 - 16, ByteSizeUnit.BYTES); @@ -171,6 +190,7 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; + private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; @@ -212,6 +232,11 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); } public RateLimiter rateLimiter() { @@ -307,4 +332,12 @@ public int getMaxConcurrentRemoteStoreStreams() { private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStreams) { this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index cb738d74000bc..d6db154a4e0e3 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -15,10 +15,13 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -26,6 +29,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -61,7 +65,7 @@ * * @opensearch.internal */ -public class SegmentReplicationTargetService implements IndexEventListener { +public class SegmentReplicationTargetService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { private static final Logger logger = LogManager.getLogger(SegmentReplicationTargetService.class); @@ -144,6 +148,53 @@ public SegmentReplicationTargetService( ); } + @Override + protected void doStart() { + if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + clusterService.addListener(this); + } + } + + @Override + protected void doStop() { + if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + clusterService.removeListener(this); + } + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.routingTableChanged()) { + for (IndexService indexService : indicesService) { + if (indexService.getIndexSettings().isSegRepEnabled() && event.indexRoutingTableChanged(indexService.index().getName())) { + for (IndexShard shard : indexService) { + if (shard.routingEntry().primary() == false) { + // for this shard look up its primary routing, if it has completed a relocation trigger replication + final String previousNode = event.previousState() + .routingTable() + .shardRoutingTable(shard.shardId()) + .primaryShard() + .currentNodeId(); + final String currentNode = event.state() + .routingTable() + .shardRoutingTable(shard.shardId()) + .primaryShard() + .currentNodeId(); + if (previousNode.equals(currentNode) == false) { + processLatestReceivedCheckpoint(shard, Thread.currentThread()); + } + } + } + } + } + } + } + /** * Cancel any replications on this node for a replica that is about to be closed. */ @@ -395,7 +446,7 @@ private DiscoveryNode getPrimaryNode(ShardRouting primaryShard) { // visible to tests protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Thread thread) { final ReplicationCheckpoint latestPublishedCheckpoint = latestReceivedCheckpoint.get(replicaShard.shardId()); - if (latestPublishedCheckpoint != null && latestPublishedCheckpoint.isAheadOf(replicaShard.getLatestReplicationCheckpoint())) { + if (latestPublishedCheckpoint != null) { logger.trace( () -> new ParameterizedMessage( "Processing latest received checkpoint for shard {} {}", diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 5e4fbb6d86172..4cbf8dc191a9d 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -898,12 +898,24 @@ protected Node( final RestController restController = actionModule.getRestController(); - final AdmissionControlService admissionControlService = new AdmissionControlService( + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + threadPool, settings, - clusterService.getClusterSettings(), + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, threadPool ); + final AdmissionControlService admissionControlService = new AdmissionControlService( + settings, + clusterService, + threadPool, + resourceUsageCollectorService + ); + AdmissionControlTransportInterceptor admissionControlTransportInterceptor = new AdmissionControlTransportInterceptor( admissionControlService ); @@ -1105,16 +1117,6 @@ protected Node( transportService.getTaskManager(), taskCancellationMonitoringSettings ); - final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( - threadPool, - settings, - clusterService.getClusterSettings() - ); - final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( - nodeResourceUsageTracker, - clusterService, - threadPool - ); this.nodeService = new NodeService( settings, threadPool, @@ -1139,7 +1141,8 @@ protected Node( taskCancellationMonitoringService, resourceUsageCollectorService, segmentReplicationStatsTracker, - repositoryService + repositoryService, + admissionControlService ); final SearchService searchService = newSearchService( @@ -1411,6 +1414,7 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + injector.getInstance(SegmentReplicationTargetService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); final RemoteClusterStateService remoteClusterStateService = injector.getInstance(RemoteClusterStateService.class); @@ -1599,6 +1603,7 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + toClose.add(injector.getInstance(SegmentReplicationTargetService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 49dde0b81cac7..15cc8f3d20bb3 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -54,6 +54,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.plugins.PluginsService; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; @@ -96,7 +97,7 @@ public class NodeService implements Closeable { private final FileCache fileCache; private final TaskCancellationMonitoringService taskCancellationMonitoringService; private final RepositoriesService repositoriesService; - + private final AdmissionControlService admissionControlService; private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; NodeService( @@ -123,7 +124,8 @@ public class NodeService implements Closeable { TaskCancellationMonitoringService taskCancellationMonitoringService, ResourceUsageCollectorService resourceUsageCollectorService, SegmentReplicationStatsTracker segmentReplicationStatsTracker, - RepositoriesService repositoriesService + RepositoriesService repositoriesService, + AdmissionControlService admissionControlService ) { this.settings = settings; this.threadPool = threadPool; @@ -148,6 +150,7 @@ public class NodeService implements Closeable { this.taskCancellationMonitoringService = taskCancellationMonitoringService; this.resourceUsageCollectorService = resourceUsageCollectorService; this.repositoriesService = repositoriesService; + this.admissionControlService = admissionControlService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; @@ -232,7 +235,8 @@ public NodeStats stats( boolean searchPipelineStats, boolean resourceUsageStats, boolean segmentReplicationTrackerStats, - boolean repositoriesStats + boolean repositoriesStats, + boolean admissionControl ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -263,7 +267,8 @@ public NodeStats stats( taskCancellation ? this.taskCancellationMonitoringService.stats() : null, searchPipelineStats ? this.searchPipelineService.stats() : null, segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, - repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null + repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null, + admissionControl ? this.admissionControlService.stats() : null ); } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java index 2cc409b0e4465..adca6992833bd 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java @@ -10,10 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; -import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -21,7 +25,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import static org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER; +import static org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER; /** * Admission control Service that bootstraps and manages all the Admission Controllers in OpenSearch. @@ -29,23 +33,31 @@ public class AdmissionControlService { private final ThreadPool threadPool; public final AdmissionControlSettings admissionControlSettings; - private final ConcurrentMap ADMISSION_CONTROLLERS; + private final ConcurrentMap admissionControllers; private static final Logger logger = LogManager.getLogger(AdmissionControlService.class); - private final ClusterSettings clusterSettings; + private final ClusterService clusterService; private final Settings settings; + private final ResourceUsageCollectorService resourceUsageCollectorService; /** * * @param settings Immutable settings instance - * @param clusterSettings ClusterSettings Instance + * @param clusterService ClusterService Instance * @param threadPool ThreadPool Instance + * @param resourceUsageCollectorService Instance used to get node resource usage stats */ - public AdmissionControlService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public AdmissionControlService( + Settings settings, + ClusterService clusterService, + ThreadPool threadPool, + ResourceUsageCollectorService resourceUsageCollectorService + ) { this.threadPool = threadPool; - this.admissionControlSettings = new AdmissionControlSettings(clusterSettings, settings); - this.ADMISSION_CONTROLLERS = new ConcurrentHashMap<>(); - this.clusterSettings = clusterSettings; + this.admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + this.admissionControllers = new ConcurrentHashMap<>(); + this.clusterService = clusterService; this.settings = settings; + this.resourceUsageCollectorService = resourceUsageCollectorService; this.initialise(); } @@ -58,10 +70,14 @@ private void initialise() { } /** - * Handler to trigger registered admissionController + * + * @param action Transport action name + * @param admissionControlActionType admissionControllerActionType value */ - public void applyTransportAdmissionControl(String action) { - this.ADMISSION_CONTROLLERS.forEach((name, admissionController) -> { admissionController.apply(action); }); + public void applyTransportAdmissionControl(String action, AdmissionControlActionType admissionControlActionType) { + this.admissionControllers.forEach( + (name, admissionController) -> { admissionController.apply(action, admissionControlActionType); } + ); } /** @@ -70,7 +86,7 @@ public void applyTransportAdmissionControl(String action) { */ public void registerAdmissionController(String admissionControllerName) { AdmissionController admissionController = this.controllerFactory(admissionControllerName); - this.ADMISSION_CONTROLLERS.put(admissionControllerName, admissionController); + this.admissionControllers.put(admissionControllerName, admissionController); } /** @@ -79,7 +95,12 @@ public void registerAdmissionController(String admissionControllerName) { private AdmissionController controllerFactory(String admissionControllerName) { switch (admissionControllerName) { case CPU_BASED_ADMISSION_CONTROLLER: - return new CPUBasedAdmissionController(admissionControllerName, this.settings, this.clusterSettings); + return new CpuBasedAdmissionController( + admissionControllerName, + this.resourceUsageCollectorService, + this.clusterService, + this.settings + ); default: throw new IllegalArgumentException("Not Supported AdmissionController : " + admissionControllerName); } @@ -90,7 +111,7 @@ private AdmissionController controllerFactory(String admissionControllerName) { * @return list of the registered admissionControllers */ public List getAdmissionControllers() { - return new ArrayList<>(this.ADMISSION_CONTROLLERS.values()); + return new ArrayList<>(this.admissionControllers.values()); } /** @@ -99,6 +120,21 @@ public List getAdmissionControllers() { * @return instance of the AdmissionController Instance */ public AdmissionController getAdmissionController(String controllerName) { - return this.ADMISSION_CONTROLLERS.getOrDefault(controllerName, null); + return this.admissionControllers.getOrDefault(controllerName, null); + } + + /** + * Return admission control stats + */ + public AdmissionControlStats stats() { + List statsList = new ArrayList<>(); + if (this.admissionControllers.size() > 0) { + this.admissionControllers.forEach((controllerName, admissionController) -> { + AdmissionControllerStats admissionControllerStats = new AdmissionControllerStats(admissionController); + statsList.add(admissionControllerStats); + }); + return new AdmissionControlStats(statsList); + } + return null; } } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java index 00564a9967f31..2246ce34dd399 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java @@ -8,8 +8,14 @@ package org.opensearch.ratelimitting.admissioncontrol.controllers; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; /** @@ -19,17 +25,25 @@ */ public abstract class AdmissionController { - private final AtomicLong rejectionCount; private final String admissionControllerName; + final ResourceUsageCollectorService resourceUsageCollectorService; + public final Map rejectionCountMap; + public final ClusterService clusterService; /** - * - * @param rejectionCount initialised rejectionCount value for AdmissionController - * @param admissionControllerName name of the admissionController + * @param admissionControllerName name of the admissionController + * @param resourceUsageCollectorService instance used to get resource usage stats of the node + * @param clusterService instance of the clusterService */ - public AdmissionController(AtomicLong rejectionCount, String admissionControllerName) { - this.rejectionCount = rejectionCount; + public AdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService + ) { this.admissionControllerName = admissionControllerName; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.clusterService = clusterService; + this.rejectionCountMap = ConcurrentCollections.newConcurrentMap(); } /** @@ -41,10 +55,17 @@ public boolean isEnabledForTransportLayer(AdmissionControlMode admissionControlM } /** - * Increment the tracking-objects and apply the admission control if threshold is breached. - * Mostly applicable while applying admission controller + * + * @return true if admissionController is Enforced Mode else false + */ + public Boolean isAdmissionControllerEnforced(AdmissionControlMode admissionControlMode) { + return admissionControlMode == AdmissionControlMode.ENFORCED; + } + + /** + * Apply admission control based on the resource usage for an action */ - public abstract void apply(String action); + public abstract void apply(String action, AdmissionControlActionType admissionControlActionType); /** * @return name of the admission-controller @@ -54,17 +75,31 @@ public String getName() { } /** - * Adds the rejection count for the controller. Primarily used when copying controller states. - * @param count To add the value of the tracking resource object as the provided count + * Add rejection count to the rejection count metric tracked by the admission controller */ - public void addRejectionCount(long count) { - this.rejectionCount.addAndGet(count); + public void addRejectionCount(String admissionControlActionType, long count) { + if (!this.rejectionCountMap.containsKey(admissionControlActionType)) { + this.rejectionCountMap.put(admissionControlActionType, new AtomicLong(0)); + } + this.rejectionCountMap.get(admissionControlActionType).getAndAdd(count); } /** * @return current value of the rejection count metric tracked by the admission-controller. */ - public long getRejectionCount() { - return this.rejectionCount.get(); + public long getRejectionCount(String admissionControlActionType) { + if (this.rejectionCountMap.containsKey(admissionControlActionType)) { + return this.rejectionCountMap.get(admissionControlActionType).get(); + } + return 0; + } + + /** + * Get rejection stats of the admission controller + */ + public Map getRejectionStats() { + Map rejectionStats = new HashMap<>(); + rejectionCountMap.forEach((actionType, count) -> rejectionStats.put(actionType, count.get())); + return rejectionStats; } } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java deleted file mode 100644 index 3a8956b2cce87..0000000000000 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionController.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.ratelimitting.admissioncontrol.controllers; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * Class for CPU Based Admission Controller in OpenSearch, which aims to provide CPU utilisation admission control. - * It provides methods to apply admission control if configured limit has been reached - */ -public class CPUBasedAdmissionController extends AdmissionController { - private static final Logger LOGGER = LogManager.getLogger(CPUBasedAdmissionController.class); - public CPUBasedAdmissionControllerSettings settings; - - /** - * - * @param admissionControllerName State of the admission controller - */ - public CPUBasedAdmissionController(String admissionControllerName, Settings settings, ClusterSettings clusterSettings) { - super(new AtomicLong(0), admissionControllerName); - this.settings = new CPUBasedAdmissionControllerSettings(clusterSettings, settings); - } - - /** - * This function will take of applying admission controller based on CPU usage - * @param action is the transport action - */ - @Override - public void apply(String action) { - // TODO Will extend this logic further currently just incrementing rejectionCount - if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { - this.applyForTransportLayer(action); - } - } - - private void applyForTransportLayer(String actionName) { - // currently incrementing counts to evaluate the controller triggering as expected and using in testing so limiting to 10 - // TODO will update rejection logic further in next PR's - if (this.getRejectionCount() < 10) { - this.addRejectionCount(1); - } - } -} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java new file mode 100644 index 0000000000000..5c180346c05e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; + +import java.util.Locale; +import java.util.Optional; + +/** + * Class for CPU Based Admission Controller in OpenSearch, which aims to provide CPU utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class CpuBasedAdmissionController extends AdmissionController { + public static final String CPU_BASED_ADMISSION_CONTROLLER = "global_cpu_usage"; + private static final Logger LOGGER = LogManager.getLogger(CpuBasedAdmissionController.class); + public CpuBasedAdmissionControllerSettings settings; + + /** + * @param admissionControllerName Name of the admission controller + * @param resourceUsageCollectorService Instance used to get node resource usage stats + * @param clusterService ClusterService Instance + * @param settings Immutable settings instance + */ + public CpuBasedAdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService, + Settings settings + ) { + super(admissionControllerName, resourceUsageCollectorService, clusterService); + this.settings = new CpuBasedAdmissionControllerSettings(clusterService.getClusterSettings(), settings); + } + + /** + * Apply admission control based on process CPU usage + * @param action is the transport action + */ + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action, admissionControlActionType); + } + } + + /** + * Apply transport layer admission control if configured limit has been reached + */ + private void applyForTransportLayer(String actionName, AdmissionControlActionType admissionControlActionType) { + if (isLimitsBreached(actionName, admissionControlActionType)) { + this.addRejectionCount(admissionControlActionType.getType(), 1); + if (this.isAdmissionControllerEnforced(this.settings.getTransportLayerAdmissionControllerMode())) { + throw new OpenSearchRejectedExecutionException( + String.format( + Locale.ROOT, + "CPU usage admission controller rejected the request for action [%s] as CPU limit reached", + admissionControlActionType.name() + ) + ); + } + } + } + + /** + * Check if the configured resource usage limits are breached for the action + */ + private boolean isLimitsBreached(String actionName, AdmissionControlActionType admissionControlActionType) { + // check if cluster state is ready + if (clusterService.state() != null && clusterService.state().nodes() != null) { + long maxCpuLimit = this.getCpuRejectionThreshold(admissionControlActionType); + Optional nodePerformanceStatistics = this.resourceUsageCollectorService.getNodeStatistics( + this.clusterService.state().nodes().getLocalNodeId() + ); + if (nodePerformanceStatistics.isPresent()) { + double cpuUsage = nodePerformanceStatistics.get().getCpuUtilizationPercent(); + if (cpuUsage >= maxCpuLimit) { + LOGGER.warn( + "CpuBasedAdmissionController limit reached as the current CPU " + + "usage [{}] exceeds the allowed limit [{}] for transport action [{}] in admissionControlMode [{}]", + cpuUsage, + maxCpuLimit, + actionName, + this.settings.getTransportLayerAdmissionControllerMode() + ); + return true; + } + } + } + return false; + } + + /** + * Get CPU rejection threshold based on action type + */ + private long getCpuRejectionThreshold(AdmissionControlActionType admissionControlActionType) { + switch (admissionControlActionType) { + case SEARCH: + return this.settings.getSearchCPULimit(); + case INDEXING: + return this.settings.getIndexingCPULimit(); + default: + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Admission control not Supported for AdmissionControlActionType: %s", + admissionControlActionType.getType() + ) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java similarity index 85% rename from server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java rename to server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java index f2fdca0cfe49b..8cf6e973ceb64 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionType.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java @@ -13,13 +13,13 @@ /** * Enums that defines the type of the transport requests */ -public enum TransportActionType { +public enum AdmissionControlActionType { INDEXING("indexing"), SEARCH("search"); private final String type; - TransportActionType(String uriType) { + AdmissionControlActionType(String uriType) { this.type = uriType; } @@ -31,7 +31,7 @@ public String getType() { return type; } - public static TransportActionType fromName(String name) { + public static AdmissionControlActionType fromName(String name) { name = name.toLowerCase(Locale.ROOT); switch (name) { case "indexing": diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java similarity index 82% rename from server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java rename to server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java index 141e9b68db145..1bddd1446a4c4 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettings.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java @@ -14,29 +14,22 @@ import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; -import java.util.Arrays; -import java.util.List; - /** * Settings related to cpu based admission controller. * @opensearch.internal */ -public class CPUBasedAdmissionControllerSettings { - public static final String CPU_BASED_ADMISSION_CONTROLLER = "global_cpu_usage"; +public class CpuBasedAdmissionControllerSettings { /** - * Default parameters for the CPUBasedAdmissionControllerSettings + * Default parameters for the CpuBasedAdmissionControllerSettings */ public static class Defaults { - public static final long CPU_USAGE = 95; - public static List TRANSPORT_LAYER_DEFAULT_URI_TYPE = Arrays.asList("indexing", "search"); + public static final long CPU_USAGE_LIMIT = 95; } private AdmissionControlMode transportLayerMode; private Long searchCPULimit; private Long indexingCPULimit; - - private final List transportActionsList; /** * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set * rejection will be performed, otherwise only rejection metrics will be populated. @@ -54,7 +47,7 @@ public static class Defaults { */ public static final Setting SEARCH_CPU_USAGE_LIMIT = Setting.longSetting( "admission_control.search.cpu_usage.limit", - Defaults.CPU_USAGE, + Defaults.CPU_USAGE_LIMIT, Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -64,18 +57,17 @@ public static class Defaults { */ public static final Setting INDEXING_CPU_USAGE_LIMIT = Setting.longSetting( "admission_control.indexing.cpu_usage.limit", - Defaults.CPU_USAGE, + Defaults.CPU_USAGE_LIMIT, Setting.Property.Dynamic, Setting.Property.NodeScope ); // currently limited to one setting will add further more settings in follow-up PR's - public CPUBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + public CpuBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); - this.transportActionsList = Defaults.TRANSPORT_LAYER_DEFAULT_URI_TYPE; clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); } @@ -103,8 +95,4 @@ public void setIndexingCPULimit(Long indexingCPULimit) { public void setSearchCPULimit(Long searchCPULimit) { this.searchCPULimit = searchCPULimit; } - - public List getTransportActionsList() { - return transportActionsList; - } } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java new file mode 100644 index 0000000000000..39909c571c63e --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Class for admission control stats used as part of node stats + * @opensearch.internal + */ +public class AdmissionControlStats implements ToXContentFragment, Writeable { + + private final List admissionControllerStatsList; + + /** + * + * @param admissionControllerStatsList list of admissionControllerStats + */ + public AdmissionControlStats(List admissionControllerStatsList) { + this.admissionControllerStatsList = admissionControllerStatsList; + } + + /** + * + * @param in the stream to read from + * @throws IOException if an I/O error occurs + */ + public AdmissionControlStats(StreamInput in) throws IOException { + this.admissionControllerStatsList = in.readList(AdmissionControllerStats::new); + } + + /** + * Write this into the {@linkplain StreamOutput}. + * + * @param out the output stream to write entity content to + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(this.admissionControllerStatsList); + } + + public List getAdmissionControllerStatsList() { + return admissionControllerStatsList; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("admission_control"); + for (AdmissionControllerStats admissionControllerStats : this.admissionControllerStatsList) { + builder.field(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java new file mode 100644 index 0000000000000..3895cac3eaa07 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; + +import java.io.IOException; +import java.util.Map; + +/** + * Class for admission controller ( such as CPU ) stats which includes rejection count for each action type + * @opensearch.internal + */ +public class AdmissionControllerStats implements Writeable, ToXContentFragment { + public Map rejectionCount; + public String admissionControllerName; + + public AdmissionControllerStats(AdmissionController admissionController) { + this.rejectionCount = admissionController.getRejectionStats(); + this.admissionControllerName = admissionController.getName(); + } + + public AdmissionControllerStats(StreamInput in) throws IOException { + this.rejectionCount = in.readMap(StreamInput::readString, StreamInput::readLong); + this.admissionControllerName = in.readString(); + } + + public String getAdmissionControllerName() { + return admissionControllerName; + } + + public Map getRejectionCount() { + return rejectionCount; + } + + /** + * Writes this instance into a {@link StreamOutput} + * @param out the {@link StreamOutput} to write to + * @throws IOException if an error occurs while writing to the StreamOutput + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.rejectionCount, StreamOutput::writeString, StreamOutput::writeLong); + out.writeString(this.admissionControllerName); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("transport"); + { + builder.startObject("rejection_count"); + { + for (Map.Entry rejectionCountEntry : this.rejectionCount.entrySet()) { + builder.field(rejectionCountEntry.getKey(), rejectionCountEntry.getValue()); + } + } + builder.endObject(); + } + builder.endObject(); + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java new file mode 100644 index 0000000000000..7c96dcd569d64 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains stats related classes for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol.stats; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java index 7d0f5fbc17a51..1e8f309234f90 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; @@ -28,18 +29,21 @@ public class AdmissionControlTransportHandler implem protected final Logger log = LogManager.getLogger(this.getClass()); AdmissionControlService admissionControlService; boolean forceExecution; + AdmissionControlActionType admissionControlActionType; public AdmissionControlTransportHandler( String action, TransportRequestHandler actualHandler, AdmissionControlService admissionControlService, - boolean forceExecution + boolean forceExecution, + AdmissionControlActionType admissionControlActionType ) { super(); this.action = action; this.actualHandler = actualHandler; this.admissionControlService = admissionControlService; this.forceExecution = forceExecution; + this.admissionControlActionType = admissionControlActionType; } /** @@ -50,15 +54,16 @@ public AdmissionControlTransportHandler( */ @Override public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { - // intercept all the transport requests here and apply admission control - try { - // TODO Need to evaluate if we need to apply admission control or not if force Execution is true will update in next PR. - this.admissionControlService.applyTransportAdmissionControl(this.action); - } catch (final OpenSearchRejectedExecutionException openSearchRejectedExecutionException) { - log.warn(openSearchRejectedExecutionException.getMessage()); - channel.sendResponse(openSearchRejectedExecutionException); - } catch (final Exception e) { - throw e; + // skip admission control if force execution is true + if (!this.forceExecution) { + // intercept the transport requests here and apply admission control + try { + this.admissionControlService.applyTransportAdmissionControl(this.action, this.admissionControlActionType); + } catch (final OpenSearchRejectedExecutionException openSearchRejectedExecutionException) { + log.warn(openSearchRejectedExecutionException.getMessage()); + channel.sendResponse(openSearchRejectedExecutionException); + return; + } } actualHandler.messageReceived(request, channel, task); } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java index 01cfcbd780006..ae1520bca769d 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java @@ -9,12 +9,13 @@ package org.opensearch.ratelimitting.admissioncontrol.transport; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; /** - * This class allows throttling to intercept requests on both the sender and the receiver side. + * This class allows throttling by intercepting requests on both the sender and the receiver side. */ public class AdmissionControlTransportInterceptor implements TransportInterceptor { @@ -33,8 +34,15 @@ public TransportRequestHandler interceptHandler( String action, String executor, boolean forceExecution, - TransportRequestHandler actualHandler + TransportRequestHandler actualHandler, + AdmissionControlActionType admissionControlActionType ) { - return new AdmissionControlTransportHandler<>(action, actualHandler, this.admissionControlService, forceExecution); + return new AdmissionControlTransportHandler<>( + action, + actualHandler, + this.admissionControlService, + forceExecution, + admissionControlActionType + ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index b4f1e78f77aaf..a71c15d551927 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.Prepared; @@ -41,6 +42,7 @@ import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; import org.opensearch.core.common.util.ByteArray; +import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -125,9 +127,13 @@ static AutoDateHistogramAggregator build( * {@link MergingBucketsDeferringCollector#mergeBuckets(long[])}. */ private MergingBucketsDeferringCollector deferringCollector; + private final Weight[] filters; + private final DateFieldMapper.DateFieldType fieldType; protected final RoundingInfo[] roundingInfos; protected final int targetBuckets; + protected int roundingIdx; + protected Rounding.Prepared preparedRounding; private AutoDateHistogramAggregator( String name, @@ -148,8 +154,51 @@ private AutoDateHistogramAggregator( this.formatter = valuesSourceConfig.format(); this.roundingInfos = roundingInfos; this.roundingPreparer = roundingPreparer; + this.preparedRounding = prepareRounding(0); + + FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( + parent(), + subAggregators.length, + context, + b -> getMinimumRounding(b[0], b[1]), + // Passing prepared rounding as supplier to ensure the correct prepared + // rounding is set as it is done during getMinimumRounding + () -> preparedRounding, + valuesSourceConfig, + fc -> FilterRewriteHelper.getAggregationBounds(context, fc.field()) + ); + if (filterContext != null) { + fieldType = filterContext.fieldType; + filters = filterContext.filters; + } else { + fieldType = null; + filters = null; + } } + private Rounding getMinimumRounding(final long low, final long high) { + // max - min / targetBuckets = bestDuration + // find the right innerInterval this bestDuration belongs to + // since we cannot exceed targetBuckets, bestDuration should go up, + // so the right innerInterval should be an upper bound + long bestDuration = (high - low) / targetBuckets; + while (roundingIdx < roundingInfos.length - 1) { + final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; + final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; + // If the interval duration is covered by the maximum inner interval, + // we can start with this outer interval for creating the buckets + if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { + break; + } + roundingIdx++; + } + + preparedRounding = prepareRounding(roundingIdx); + return roundingInfos[roundingIdx].rounding; + } + + protected abstract LongKeyedBucketOrds getBucketOrds(); + @Override public final ScoreMode scoreMode() { if (valuesSource != null && valuesSource.needsScores()) { @@ -176,7 +225,32 @@ public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBuc if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - return getLeafCollector(valuesSource.longValues(ctx), sub); + + final SortedNumericDocValues values = valuesSource.longValues(ctx); + final LeafBucketCollector iteratingCollector = getLeafCollector(values, sub); + + // Need to be declared as final and array for usage within the + // LeafBucketCollectorBase subclass below + final boolean[] useOpt = new boolean[1]; + useOpt[0] = filters != null; + + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + // Try fast filter aggregation if the filters have been created + // Skip if tried before and gave incorrect/incomplete results + if (useOpt[0]) { + useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { + incrementBucketDocCount( + FilterRewriteHelper.getBucketOrd(getBucketOrds().add(owningBucketOrd, preparedRounding.round(key))), + count + ); + }); + } + + iteratingCollector.collect(doc, owningBucketOrd); + } + }; } protected final InternalAggregation[] buildAggregations( @@ -247,8 +321,6 @@ protected final void merge(long[] mergeMap, long newNumBuckets) { * @opensearch.internal */ private static class FromSingle extends AutoDateHistogramAggregator { - private int roundingIdx; - private Rounding.Prepared preparedRounding; /** * Map from value to bucket ordinals. *

@@ -286,10 +358,14 @@ private static class FromSingle extends AutoDateHistogramAggregator { metadata ); - preparedRounding = prepareRounding(0); bucketOrds = new LongKeyedBucketOrds.FromSingle(context.bigArrays()); } + @Override + protected LongKeyedBucketOrds getBucketOrds() { + return bucketOrds; + } + @Override protected LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) throws IOException { return new LeafBucketCollectorBase(sub, values) { @@ -507,6 +583,11 @@ private static class FromMany extends AutoDateHistogramAggregator { liveBucketCountUnderestimate = context.bigArrays().newIntArray(1, true); } + @Override + protected LongKeyedBucketOrds getBucketOrds() { + return bucketOrds; + } + @Override protected LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) throws IOException { return new LeafBucketCollectorBase(sub, values) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index f602eea7a9b12..8437e1dce9fe0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -34,10 +34,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; +import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -48,6 +50,7 @@ import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.BucketsAggregator; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; +import org.opensearch.search.aggregations.support.FieldContext; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -66,7 +69,6 @@ * @opensearch.internal */ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAggregator { - private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; @@ -76,12 +78,12 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg private final Rounding.Prepared preparedRounding; private final BucketOrder order; private final boolean keyed; - private final long minDocCount; private final LongBounds extendedBounds; private final LongBounds hardBounds; - + private final Weight[] filters; private final LongKeyedBucketOrds bucketOrds; + private final DateFieldMapper.DateFieldType fieldType; DateHistogramAggregator( String name, @@ -99,7 +101,6 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg CardinalityUpperBound cardinality, Map metadata ) throws IOException { - super(name, factories, aggregationContext, parent, CardinalityUpperBound.MANY, metadata); this.rounding = rounding; this.preparedRounding = preparedRounding; @@ -114,6 +115,35 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg this.formatter = valuesSourceConfig.format(); bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); + + FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( + parent, + subAggregators.length, + context, + x -> rounding, + () -> preparedRounding, + valuesSourceConfig, + this::computeBounds + ); + if (filterContext != null) { + fieldType = filterContext.fieldType; + filters = filterContext.filters; + } else { + filters = null; + fieldType = null; + } + } + + private long[] computeBounds(final FieldContext fieldContext) throws IOException { + final long[] bounds = FilterRewriteHelper.getAggregationBounds(context, fieldContext.field()); + if (bounds != null) { + // Update min/max limit if user specified any hard bounds + if (hardBounds != null) { + bounds[0] = Math.max(bounds[0], hardBounds.getMin()); + bounds[1] = Math.min(bounds[1], hardBounds.getMax() - 1); // hard bounds max is exclusive + } + } + return bounds; } @Override @@ -129,10 +159,27 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + + // Need to be declared as final and array for usage within the + // LeafBucketCollectorBase subclass below + final boolean[] useOpt = new boolean[1]; + useOpt[0] = filters != null; + SortedNumericDocValues values = valuesSource.longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { + // Try fast filter aggregation if the filters have been created + // Skip if tried before and gave incorrect/incomplete results + if (useOpt[0]) { + useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { + incrementBucketDocCount( + FilterRewriteHelper.getBucketOrd(bucketOrds.add(owningBucketOrd, preparedRounding.round(key))), + count + ); + }); + } + if (values.advanceExact(doc)) { int valuesCount = values.docValueCount(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java new file mode 100644 index 0000000000000..29cecd5b382cd --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java @@ -0,0 +1,259 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Rounding; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.query.DateRangeIncludingNowQuery; +import org.opensearch.search.aggregations.support.FieldContext; +import org.opensearch.search.aggregations.support.ValuesSourceConfig; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalLong; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Helpers functions to rewrite and optimize aggregations using + * range filter queries + * + * @opensearch.internal + */ +public class FilterRewriteHelper { + + static class FilterContext { + final DateFieldMapper.DateFieldType fieldType; + final Weight[] filters; + + public FilterContext(DateFieldMapper.DateFieldType fieldType, Weight[] filters) { + this.fieldType = fieldType; + this.filters = filters; + } + } + + private static final int MAX_NUM_FILTER_BUCKETS = 1024; + private static final Map, Function> queryWrappers; + + // Initialize the wrappers map for unwrapping the query + static { + queryWrappers = new HashMap<>(); + queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); + queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); + queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); + queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); + } + + /** + * Recursively unwraps query into the concrete form + * for applying the optimization + */ + private static Query unwrapIntoConcreteQuery(Query query) { + while (queryWrappers.containsKey(query.getClass())) { + query = queryWrappers.get(query.getClass()).apply(query); + } + + return query; + } + + /** + * Finds the min and max bounds for segments within the passed search context + */ + private static long[] getIndexBoundsFromLeaves(final SearchContext context, final String fieldName) throws IOException { + final List leaves = context.searcher().getIndexReader().leaves(); + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + // Since the query does not specify bounds for aggregation, we can + // build the global min/max from local min/max within each segment + for (LeafReaderContext leaf : leaves) { + final PointValues values = leaf.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) return null; + + return new long[] { min, max }; + } + + static long[] getAggregationBounds(final SearchContext context, final String fieldName) throws IOException { + final Query cq = unwrapIntoConcreteQuery(context.query()); + final long[] indexBounds = getIndexBoundsFromLeaves(context, fieldName); + if (cq instanceof PointRangeQuery) { + final PointRangeQuery prq = (PointRangeQuery) cq; + // Ensure that the query and aggregation are on the same field + if (prq.getField().equals(fieldName)) { + return new long[] { + // Minimum bound for aggregation is the max between query and global + Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]), + // Maximum bound for aggregation is the min between query and global + Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]) }; + } + } else if (cq instanceof MatchAllDocsQuery) { + return indexBounds; + } + + return null; + } + + /** + * Creates the range query filters for aggregations using the interval, min/max + * bounds and the rounding values + */ + private static Weight[] createFilterForAggregations( + final SearchContext context, + final Rounding rounding, + final Rounding.Prepared preparedRounding, + final String field, + final DateFieldMapper.DateFieldType fieldType, + final long low, + final long high + ) throws IOException { + final OptionalLong intervalOpt = Rounding.getInterval(rounding); + if (intervalOpt.isEmpty()) { + return null; + } + + final long interval = intervalOpt.getAsLong(); + // Calculate the number of buckets using range and interval + long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + long prevRounded = roundedLow; + int bucketCount = 0; + while (roundedLow <= fieldType.convertNanosToMillis(high)) { + bucketCount++; + // Below rounding is needed as the interval could return in + // non-rounded values for something like calendar month + roundedLow = preparedRounding.round(roundedLow + interval); + if (prevRounded == roundedLow) break; + prevRounded = roundedLow; + } + + Weight[] filters = null; + if (bucketCount > 0 && bucketCount <= MAX_NUM_FILTER_BUCKETS) { + int i = 0; + filters = new Weight[bucketCount]; + roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + while (i < bucketCount) { + // Calculate the lower bucket bound + final byte[] lower = new byte[8]; + NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); + // Calculate the upper bucket bound + final byte[] upper = new byte[8]; + roundedLow = preparedRounding.round(roundedLow + interval); + // Subtract -1 if the minimum is roundedLow as roundedLow itself + // is included in the next bucket + NumericUtils.longToSortableBytes( + i + 1 == bucketCount ? high : fieldType.convertRoundedMillisToNanos(roundedLow) - 1, + upper, + 0 + ); + filters[i++] = context.searcher().createWeight(new PointRangeQuery(field, lower, upper, 1) { + @Override + protected String toString(int dimension, byte[] value) { + return null; + } + }, ScoreMode.COMPLETE_NO_SCORES, 1); + } + } + + return filters; + } + + static FilterContext buildFastFilterContext( + final Object parent, + final int subAggLength, + SearchContext context, + Function roundingFunction, + Supplier preparedRoundingSupplier, + ValuesSourceConfig valuesSourceConfig, + CheckedFunction computeBounds + ) throws IOException { + // Create the filters for fast aggregation only if the query is instance + // of point range query and there aren't any parent/sub aggregations + if (parent == null && subAggLength == 0 && valuesSourceConfig.missing() == null && valuesSourceConfig.script() == null) { + final FieldContext fieldContext = valuesSourceConfig.fieldContext(); + if (fieldContext != null) { + final String fieldName = fieldContext.field(); + final long[] bounds = computeBounds.apply(fieldContext); + if (bounds != null) { + assert fieldContext.fieldType() instanceof DateFieldMapper.DateFieldType; + final DateFieldMapper.DateFieldType fieldType = (DateFieldMapper.DateFieldType) fieldContext.fieldType(); + final Rounding rounding = roundingFunction.apply(bounds); + final Weight[] filters = FilterRewriteHelper.createFilterForAggregations( + context, + rounding, + preparedRoundingSupplier.get(), + fieldName, + fieldType, + bounds[0], + bounds[1] + ); + return new FilterContext(fieldType, filters); + } + } + } + return null; + } + + static long getBucketOrd(long bucketOrd) { + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + } + + return bucketOrd; + } + + static boolean tryFastFilterAggregation( + final LeafReaderContext ctx, + final Weight[] filters, + final DateFieldMapper.DateFieldType fieldType, + final BiConsumer incrementDocCount + ) throws IOException { + final int[] counts = new int[filters.length]; + int i; + for (i = 0; i < filters.length; i++) { + counts[i] = filters[i].count(ctx); + if (counts[i] == -1) { + // Cannot use the optimization if any of the counts + // is -1 indicating the segment might have deleted documents + return false; + } + } + + for (i = 0; i < filters.length; i++) { + if (counts[i] > 0) { + incrementDocCount.accept( + fieldType.convertNanosToMillis( + NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) + ), + counts[i] + ); + } + } + throw new CollectionTerminatedException(); + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index 8bab961423f91..c88dfb2060393 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -119,7 +119,8 @@ protected void afterResponseProcessor(Processor processor, long timeInNanos) {} protected void onResponseProcessorFailed(Processor processor) {} - void transformRequest(SearchRequest request, ActionListener requestListener) throws SearchPipelineProcessingException { + void transformRequest(SearchRequest request, ActionListener requestListener, PipelineProcessingContext requestContext) + throws SearchPipelineProcessingException { if (searchRequestProcessors.isEmpty()) { requestListener.onResponse(request); return; @@ -137,7 +138,7 @@ void transformRequest(SearchRequest request, ActionListener reque return; } - ActionListener finalListener = getTerminalSearchRequestActionListener(requestListener); + ActionListener finalListener = getTerminalSearchRequestActionListener(requestListener, requestContext); // Chain listeners back-to-front ActionListener currentListener = finalListener; @@ -147,7 +148,7 @@ void transformRequest(SearchRequest request, ActionListener reque currentListener = ActionListener.wrap(r -> { long start = relativeTimeSupplier.getAsLong(); beforeRequestProcessor(processor); - processor.processRequestAsync(r, ActionListener.wrap(rr -> { + processor.processRequestAsync(r, requestContext, ActionListener.wrap(rr -> { long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); afterRequestProcessor(processor, took); nextListener.onResponse(rr); @@ -176,13 +177,16 @@ void transformRequest(SearchRequest request, ActionListener reque currentListener.onResponse(request); } - private ActionListener getTerminalSearchRequestActionListener(ActionListener requestListener) { + private ActionListener getTerminalSearchRequestActionListener( + ActionListener requestListener, + PipelineProcessingContext requestContext + ) { final long pipelineStart = relativeTimeSupplier.getAsLong(); return ActionListener.wrap(r -> { long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); afterTransformRequest(took); - requestListener.onResponse(new PipelinedRequest(this, r)); + requestListener.onResponse(new PipelinedRequest(this, r, requestContext)); }, e -> { long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); afterTransformRequest(took); @@ -191,7 +195,11 @@ private ActionListener getTerminalSearchRequestActionListener(Act }); } - ActionListener transformResponseListener(SearchRequest request, ActionListener responseListener) { + ActionListener transformResponseListener( + SearchRequest request, + ActionListener responseListener, + PipelineProcessingContext requestContext + ) { if (searchResponseProcessors.isEmpty()) { // No response transformation necessary return responseListener; @@ -219,7 +227,7 @@ ActionListener transformResponseListener(SearchRequest request, responseListener = ActionListener.wrap(r -> { beforeResponseProcessor(processor); final long start = relativeTimeSupplier.getAsLong(); - processor.processResponseAsync(request, r, ActionListener.wrap(rr -> { + processor.processResponseAsync(request, r, requestContext, ActionListener.wrap(rr -> { long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); afterResponseProcessor(processor, took); currentFinalListener.onResponse(rr); @@ -257,14 +265,15 @@ void runSearchPhaseResultsTransformer( SearchPhaseResults searchPhaseResult, SearchPhaseContext context, String currentPhase, - String nextPhase + String nextPhase, + PipelineProcessingContext requestContext ) throws SearchPipelineProcessingException { try { for (SearchPhaseResultsProcessor searchPhaseResultsProcessor : searchPhaseResultsProcessors) { if (currentPhase.equals(searchPhaseResultsProcessor.getBeforePhase().getName()) && nextPhase.equals(searchPhaseResultsProcessor.getAfterPhase().getName())) { try { - searchPhaseResultsProcessor.process(searchPhaseResult, context); + searchPhaseResultsProcessor.process(searchPhaseResult, context, requestContext); } catch (Exception e) { if (searchPhaseResultsProcessor.isIgnoreFailure()) { logger.warn( diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java new file mode 100644 index 0000000000000..a1f2b8b99d958 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import java.util.HashMap; +import java.util.Map; + +/** + * A holder for state that is passed through each processor in the pipeline. + */ +public class PipelineProcessingContext { + private final Map attributes = new HashMap<>(); + + /** + * Set a generic attribute in the state for this request. Overwrites any existing value. + * + * @param name the name of the attribute to set + * @param value the value to set on the attributen + */ + public void setAttribute(String name, Object value) { + attributes.put(name, value); + } + + /** + * Retrieves a generic attribute value from the state for this request. + * @param name the name of the attribute + * @return the value of the attribute if previously set (and null otherwise) + */ + public Object getAttribute(String name) { + return attributes.get(name); + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index 77dfc6bcd4fc5..d550fbb768133 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -22,18 +22,20 @@ */ public final class PipelinedRequest extends SearchRequest { private final Pipeline pipeline; + private final PipelineProcessingContext requestContext; - PipelinedRequest(Pipeline pipeline, SearchRequest transformedRequest) { + PipelinedRequest(Pipeline pipeline, SearchRequest transformedRequest, PipelineProcessingContext requestContext) { super(transformedRequest); this.pipeline = pipeline; + this.requestContext = requestContext; } public void transformRequest(ActionListener requestListener) { - pipeline.transformRequest(this, requestListener); + pipeline.transformRequest(this, requestListener, requestContext); } public ActionListener transformResponseListener(ActionListener responseListener) { - return pipeline.transformResponseListener(this, responseListener); + return pipeline.transformResponseListener(this, responseListener, requestContext); } public void transformSearchPhaseResults( @@ -42,7 +44,7 @@ public void transformSearchPhaseResults( final String currentPhase, final String nextPhase ) { - pipeline.runSearchPhaseResultsTransformer(searchPhaseResult, searchPhaseContext, currentPhase, nextPhase); + pipeline.runSearchPhaseResultsTransformer(searchPhaseResult, searchPhaseContext, currentPhase, nextPhase, requestContext); } // Visible for testing diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index 0120d68ceb5aa..a06383fbe9cef 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -21,13 +21,6 @@ * @opensearch.internal */ public interface Processor { - /** - * Processor configuration key to let the factory know the context for pipeline creation. - *

- * See {@link PipelineSource}. - */ - String PIPELINE_SOURCE = "pipeline_source"; - /** * Gets the type of processor */ diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java index 772dc8758bace..a64266cfb2a2b 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java @@ -32,6 +32,22 @@ void process( final SearchPhaseContext searchPhaseContext ); + /** + * Processes the {@link SearchPhaseResults} obtained from a SearchPhase which will be returned to next + * SearchPhase. Receives the {@link PipelineProcessingContext} passed to other processors. + * @param searchPhaseResult {@link SearchPhaseResults} + * @param searchPhaseContext {@link SearchContext} + * @param requestContext {@link PipelineProcessingContext} + * @param {@link SearchPhaseResult} + */ + default void process( + final SearchPhaseResults searchPhaseResult, + final SearchPhaseContext searchPhaseContext, + final PipelineProcessingContext requestContext + ) { + process(searchPhaseResult, searchPhaseContext); + } + /** * The phase which should have run before, this processor can start executing. * @return {@link SearchPhaseName} diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 580fe1b7c4216..2175b5d135394 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -408,7 +408,8 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { pipeline = pipelineHolder.pipeline; } } - return new PipelinedRequest(pipeline, searchRequest); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + return new PipelinedRequest(pipeline, searchRequest, requestContext); } Map> getRequestProcessorFactories() { diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java index 427c9e4ab694c..30adc9b0afbe8 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java @@ -15,18 +15,27 @@ * Interface for a search pipeline processor that modifies a search request. */ public interface SearchRequestProcessor extends Processor { - /** - * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} - * executes. - *

+ * Process a SearchRequest without receiving request-scoped state. * Implement this method if the processor makes no asynchronous calls. - * @param request the executed {@link SearchRequest} - * @return a new {@link SearchRequest} (or the input {@link SearchRequest} if no changes) - * @throws Exception if an error occurs during processing + * @param request the search request (which may have been modified by an earlier processor) + * @return the modified search request + * @throws Exception implementation-specific processing exception */ SearchRequest processRequest(SearchRequest request) throws Exception; + /** + * Process a SearchRequest, with request-scoped state shared across processors in the pipeline + * Implement this method if the processor makes no asynchronous calls. + * @param request the search request (which may have been modified by an earlier processor) + * @param requestContext request-scoped state shared across processors in the pipeline + * @return the modified search request + * @throws Exception implementation-specific processing exception + */ + default SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { + return processRequest(request); + } + /** * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} * executes. @@ -35,9 +44,13 @@ public interface SearchRequestProcessor extends Processor { * @param request the executed {@link SearchRequest} * @param requestListener callback to be invoked on successful processing or on failure */ - default void processRequestAsync(SearchRequest request, ActionListener requestListener) { + default void processRequestAsync( + SearchRequest request, + PipelineProcessingContext requestContext, + ActionListener requestListener + ) { try { - requestListener.onResponse(processRequest(request)); + requestListener.onResponse(processRequest(request, requestContext)); } catch (Exception e) { requestListener.onFailure(e); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java index 21136ce208fee..98591ab9d0def 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java @@ -21,24 +21,47 @@ public interface SearchResponseProcessor extends Processor { * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. *

* Implement this method if the processor makes no asynchronous calls. - * @param request the executed {@link SearchRequest} + * + * @param request the executed {@link SearchRequest} * @param response the current {@link SearchResponse}, possibly modified by earlier processors * @return a modified {@link SearchResponse} (or the input {@link SearchResponse} if no changes) * @throws Exception if an error occurs during processing */ SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception; + /** + * Process a SearchResponse, with request-scoped state shared across processors in the pipeline + *

+ * Implement this method if the processor makes no asynchronous calls. + * + * @param request the (maybe transformed) search request + * @param response the search response (which may have been modified by an earlier processor) + * @param requestContext request-scoped state shared across processors in the pipeline + * @return the modified search response + * @throws Exception implementation-specific processing exception + */ + default SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception { + return processResponse(request, response); + } + /** * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. *

* Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processResponse. - * @param request the executed {@link SearchRequest} - * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors * @param responseListener callback to be invoked on successful processing or on failure */ - default void processResponseAsync(SearchRequest request, SearchResponse response, ActionListener responseListener) { + default void processResponseAsync( + SearchRequest request, + SearchResponse response, + PipelineProcessingContext requestContext, + ActionListener responseListener + ) { try { - responseListener.onResponse(processResponse(request, response)); + responseListener.onResponse(processResponse(request, response, requestContext)); } catch (Exception e) { responseListener.onFailure(e); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java new file mode 100644 index 0000000000000..67e1c1147cb87 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; + +/** + * A specialization of {@link SearchRequestProcessor} that makes use of the request-scoped processor state. + * Implementors must implement the processRequest method that accepts request-scoped processor state. + */ +public interface StatefulSearchRequestProcessor extends SearchRequestProcessor { + @Override + default SearchRequest processRequest(SearchRequest request) { + throw new UnsupportedOperationException(); + } + + @Override + SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception; +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java new file mode 100644 index 0000000000000..f0842d24e1b56 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; + +/** + * A specialization of {@link SearchResponseProcessor} that makes use of the request-scoped processor state. + * Implementors must implement the processResponse method that accepts request-scoped processor state. + */ +public interface StatefulSearchResponseProcessor extends SearchResponseProcessor { + @Override + default SearchResponse processResponse(SearchRequest request, SearchResponse response) { + throw new UnsupportedOperationException(); + } + + @Override + SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception; +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index e1d41227a22f7..99169b42c05f0 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -185,20 +185,22 @@ Map> buildSliceLevelBreakdown() { } final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); // get the count for current leaf timing type + final long sliceLeafTimingTypeCount = currentSliceLeafBreakdownMap.get(timingTypeCountKey); currentSliceBreakdown.compute( timingTypeCountKey, - (key, value) -> (value == null) - ? currentSliceLeafBreakdownMap.get(timingTypeCountKey) - : value + currentSliceLeafBreakdownMap.get(timingTypeCountKey) + (key, value) -> (value == null) ? sliceLeafTimingTypeCount : value + sliceLeafTimingTypeCount ); - // compute the sliceEndTime for timingType using max of endTime across slice leaves - final long sliceLeafTimingTypeEndTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey) - + currentSliceLeafBreakdownMap.get(timingType.toString()); - currentSliceBreakdown.compute( - timingTypeSliceEndTimeKey, - (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) - ); + if (sliceLeafTimingTypeCount == 0L) { + // In case where a slice with multiple leaves, it is possible that any one of the leaves has 0 invocations for a + // specific breakdown type. We should skip the slice start/end time computation for any leaf with 0 invocations on a + // timing type, as 0 does not represent an actual timing. + // For example, a slice has 0 invocations for a breakdown type from its leading leaves. Another example, let's + // consider a slice with three leaves: leaf A with a score count of 5, leaf B with a score count of 0, + // and leaf C with a score count of 4. In this situation, we only compute the timing type slice start/end time based + // on leaf A and leaf C. This is because leaf B has a start time of zero. + continue; + } // compute the sliceStartTime for timingType using min of startTime across slice leaves final long sliceLeafTimingTypeStartTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey); @@ -206,6 +208,22 @@ Map> buildSliceLevelBreakdown() { timingTypeSliceStartTimeKey, (key, value) -> (value == null) ? sliceLeafTimingTypeStartTime : Math.min(value, sliceLeafTimingTypeStartTime) ); + + // compute the sliceEndTime for timingType using max of endTime across slice leaves + final long sliceLeafTimingTypeEndTime = sliceLeafTimingTypeStartTime + currentSliceLeafBreakdownMap.get( + timingType.toString() + ); + currentSliceBreakdown.compute( + timingTypeSliceEndTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) + ); + } + // Only when we've checked all leaves in a slice and still find no invocations, then we should set the slice start/end time + // to the default 0L. This is because buildQueryBreakdownMap expects timingTypeSliceStartTimeKey and + // timingTypeSliceEndTimeKey in the slice level breakdowns. + if (currentSliceBreakdown.get(timingTypeCountKey) != null && currentSliceBreakdown.get(timingTypeCountKey) == 0L) { + currentSliceBreakdown.put(timingTypeSliceStartTimeKey, 0L); + currentSliceBreakdown.put(timingTypeSliceEndTimeKey, 0L); } // compute sliceMaxEndTime as max of sliceEndTime across all timing types sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 955ec71f30360..9d2c7eb882fa1 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -429,6 +429,10 @@ public ClusterState execute(ClusterState currentState) { createIndexService.validateIndexName(renamedIndexName, currentState); createIndexService.validateDotIndex(renamedIndexName, isHidden); createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetadata.getSettings(), false); + MetadataCreateIndexService.validateRefreshIntervalSettings( + snapshotIndexMetadata.getSettings(), + clusterSettings + ); IndexMetadata.Builder indexMdBuilder = IndexMetadata.builder(snapshotIndexMetadata) .state(IndexMetadata.State.OPEN) .index(renamedIndexName); @@ -578,6 +582,7 @@ public ClusterState execute(ClusterState currentState) { if (metadata.templates() != null) { // TODO: Should all existing templates be deleted first? for (final IndexTemplateMetadata cursor : metadata.templates().values()) { + MetadataCreateIndexService.validateRefreshIntervalSettings(cursor.settings(), clusterSettings); mdBuilder.put(cursor); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java index 9ee2db6d39893..e8efbeb7de3f9 100644 --- a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java +++ b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; /** * This interface allows plugins to intercept requests on both the sender and the receiver side. @@ -57,6 +58,19 @@ default TransportRequestHandler interceptHandler return actualHandler; } + /** + * This is called for handlers that needs admission control support + */ + default TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler, + AdmissionControlActionType admissionControlActionType + ) { + return interceptHandler(action, executor, forceExecution, actualHandler); + } + /** * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. * The returned sender is used to send all requests that come in via diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 5aeed72f306db..a1697b1898eeb 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -64,6 +64,7 @@ import org.opensearch.core.service.ReportingService; import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; import org.opensearch.telemetry.tracing.Span; @@ -1200,6 +1201,40 @@ public void registerRequestHandler( transport.registerRequestHandler(reg); } + /** + * Registers a new request handler with admission control support + * + * @param action The action the request handler is associated with + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. + * @param admissionControlActionType Admission control based on resource usage limits of provided action type + * @param requestReader The request class that will be used to construct new instances for streaming + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler( + String action, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + Writeable.Reader requestReader, + TransportRequestHandler handler + ) { + validateActionName(action); + handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, + requestReader, + taskManager, + handler, + executor, + forceExecution, + canTripCircuitBreaker + ); + transport.registerRequestHandler(reg); + } + /** * called by the {@link Transport} implementation when an incoming request arrives but before * any parsing of it has happened (with the exception of the requestId and action) diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 80f4ebf5d737a..b8ab5c935fa34 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -66,6 +66,11 @@ import org.opensearch.node.NodeResourceUsageStats; import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.test.OpenSearchTestCase; @@ -540,15 +545,44 @@ public void testSerialization() throws IOException { assertEquals(replicationStats.getTotalBytesBehind(), deserializedReplicationStats.getTotalBytesBehind()); assertEquals(replicationStats.getMaxReplicationLag(), deserializedReplicationStats.getMaxReplicationLag()); } + AdmissionControlStats admissionControlStats = nodeStats.getAdmissionControlStats(); + AdmissionControlStats deserializedAdmissionControlStats = deserializedNodeStats.getAdmissionControlStats(); + if (admissionControlStats == null) { + assertNull(deserializedAdmissionControlStats); + } else { + assertEquals( + admissionControlStats.getAdmissionControllerStatsList().size(), + deserializedAdmissionControlStats.getAdmissionControllerStatsList().size() + ); + AdmissionControllerStats admissionControllerStats = admissionControlStats.getAdmissionControllerStatsList().get(0); + AdmissionControllerStats deserializedAdmissionControllerStats = deserializedAdmissionControlStats + .getAdmissionControllerStatsList() + .get(0); + assertEquals( + admissionControllerStats.getAdmissionControllerName(), + deserializedAdmissionControllerStats.getAdmissionControllerName() + ); + assertEquals(1, (long) admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + assertEquals( + admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType()), + deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType()) + ); + + assertEquals(2, (long) admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); + assertEquals( + admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()), + deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()) + ); + } } } } - public static NodeStats createNodeStats() { + public static NodeStats createNodeStats() throws IOException { return createNodeStats(false); } - public static NodeStats createNodeStats(boolean remoteStoreStats) { + public static NodeStats createNodeStats(boolean remoteStoreStats) throws IOException { DiscoveryNode node = new DiscoveryNode( "test_node", buildNewFakeTransportAddress(), @@ -862,6 +896,26 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); clusterManagerThrottlingStats.onThrottle("test-task", randomInt()); } + + AdmissionControlStats admissionControlStats = null; + if (frequently()) { + AdmissionController admissionController = new AdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + null + ) { + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + return; + } + }; + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + AdmissionControllerStats stats = new AdmissionControllerStats(admissionController); + List statsList = new ArrayList(); + statsList.add(stats); + admissionControlStats = new AdmissionControlStats(statsList); + } ScriptCacheStats scriptCacheStats = scriptStats != null ? scriptStats.toScriptCacheStats() : null; WeightedRoutingStats weightedRoutingStats = null; @@ -899,7 +953,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { null, null, segmentReplicationRejectionStats, - null + null, + admissionControlStats ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index a0c45f95ef7c0..40a30342b86b9 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -41,6 +41,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; @@ -87,7 +88,13 @@ public void testNetworkTypesToXContent() throws Exception { } public void testIngestStats() throws Exception { - NodeStats nodeStats = randomValueOtherThanMany(n -> n.getIngestStats() == null, NodeStatsTests::createNodeStats); + NodeStats nodeStats = randomValueOtherThanMany(n -> n.getIngestStats() == null, () -> { + try { + return NodeStatsTests.createNodeStats(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); SortedMap processorStats = new TreeMap<>(); nodeStats.getIngestStats().getProcessorStats().values().forEach(stats -> { diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java new file mode 100644 index 0000000000000..58a4c4a4e555d --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +/** + * Helper interface to access package protected {@link SearchRequestOperationsListener} from test cases. + */ +public interface SearchRequestOperationsListenerSupport { + default void onPhaseStart(SearchRequestOperationsListener listener, SearchPhaseContext context) { + listener.onPhaseStart(context); + } + + default void onPhaseEnd(SearchRequestOperationsListener listener, SearchPhaseContext context) { + listener.onPhaseEnd(context, new SearchRequestContext()); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index f037b75dc16a3..ff47ec3015697 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -193,6 +193,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -222,6 +223,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -251,6 +253,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -311,6 +314,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -340,6 +344,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -369,6 +374,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java index ed178ed7e1526..0be1a1f36118d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java @@ -13,11 +13,16 @@ import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.allocator.RemoteShardsBalancer; import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_NO; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.NO_ATTEMPT; + public class RemoteShardsAllocateUnassignedTests extends RemoteShardsBalancerBaseTestCase { /** @@ -89,6 +94,38 @@ public void testPrimaryAllocation() { } } + /** + * Test remote unassigned shard allocation when deciders make NO or THROTTLED decision. + */ + public void testNoRemoteAllocation() { + final int localOnlyNodes = 10; + final int remoteCapableNodes = 5; + final int localIndices = 2; + final int remoteIndices = 1; + final ClusterState oldState = createInitialCluster(localOnlyNodes, remoteCapableNodes, localIndices, remoteIndices); + final boolean throttle = randomBoolean(); + final AllocationService service = this.createRejectRemoteAllocationService(throttle); + final ClusterState newState = allocateShardsAndBalance(oldState, service); + final RoutingNodes routingNodes = newState.getRoutingNodes(); + final RoutingAllocation allocation = getRoutingAllocation(newState, routingNodes); + + assertEquals(totalShards(remoteIndices), routingNodes.unassigned().size()); + + for (ShardRouting shard : newState.getRoutingTable().allShards()) { + if (RoutingPool.getShardPool(shard, allocation) == RoutingPool.REMOTE_CAPABLE) { + assertTrue(shard.unassigned()); + if (shard.primary()) { + final UnassignedInfo.AllocationStatus expect = throttle ? DECIDERS_THROTTLED : DECIDERS_NO; + assertEquals(expect, shard.unassignedInfo().getLastAllocationStatus()); + } else { + assertEquals(NO_ATTEMPT, shard.unassignedInfo().getLastAllocationStatus()); + } + } else { + assertFalse(shard.unassigned()); + } + } + } + /** * Test remote unassigned shard allocation when remote capable nodes fail to come up. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index dbb08a999877d..a1db6cd83ab6c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -20,7 +20,9 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; @@ -28,6 +30,7 @@ import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -201,6 +204,36 @@ public AllocationService createRemoteCapableAllocationService(String excludeNode ); } + public AllocationService createRejectRemoteAllocationService(boolean throttle) { + Settings settings = Settings.Builder.EMPTY_SETTINGS; + return new OpenSearchAllocationTestCase.MockAllocationService( + createRejectRemoteAllocationDeciders(throttle), + new TestGatewayAllocator(), + createShardAllocator(settings), + EmptyClusterInfoService.INSTANCE, + SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES + ); + } + + public AllocationDeciders createRejectRemoteAllocationDeciders(boolean throttle) { + Settings settings = Settings.Builder.EMPTY_SETTINGS; + List deciders = new ArrayList<>( + ClusterModule.createAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, Collections.emptyList()) + ); + deciders.add(new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shardRouting, allocation))) { + return throttle ? Decision.THROTTLE : Decision.NO; + } else { + return Decision.ALWAYS; + } + } + }); + Collections.shuffle(deciders, random()); + return new AllocationDeciders(deciders); + } + public AllocationDeciders createAllocationDeciders() { Settings settings = Settings.Builder.EMPTY_SETTINGS; return randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()); diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index ab51cafb039c2..de4bdcac6c2b2 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -474,13 +474,29 @@ public List getTransportInterceptors( try { transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); } catch (Exception e) { - assertEquals(0, called.get()); + assertEquals(1, called.get()); assertEquals(1, called1.get()); } + + coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor); + module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor1); + } + }); + + transportInterceptor = module.getTransportInterceptor(); + try { transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); } catch (Exception e) { - assertEquals(0, called.get()); + assertEquals(1, called.get()); assertEquals(2, called1.get()); } } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index db9f4bd305c79..14451ef21726e 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -452,12 +452,7 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { assertTrue(indexService.getTrimTranslogTask().mustReschedule()); final Engine readOnlyEngine = getEngine(indexService.getShard(0)); - assertBusy( - () -> assertThat( - readOnlyEngine.translogManager().getTranslogStats().getTranslogSizeInBytes(), - equalTo((long) Translog.DEFAULT_HEADER_SIZE_IN_BYTES) - ) - ); + assertBusy(() -> assertTrue(isTranslogEmpty(readOnlyEngine))); assertAcked(client().admin().indices().prepareOpen("test").setWaitForActiveShards(ActiveShardCount.DEFAULT)); @@ -467,6 +462,12 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(0)); } + boolean isTranslogEmpty(Engine engine) { + long tlogSize = engine.translogManager().getTranslogStats().getTranslogSizeInBytes(); + // translog contains 1(or 2 in some corner cases) empty readers. + return tlogSize == Translog.DEFAULT_HEADER_SIZE_IN_BYTES || tlogSize == 2 * Translog.DEFAULT_HEADER_SIZE_IN_BYTES; + } + public void testIllegalFsyncInterval() { Settings settings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 81d8bccb86c60..5b586524d0bfc 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -7801,7 +7801,9 @@ public void testMaxDocsOnPrimary() throws Exception { assertNotNull(result.getFailure()); assertThat( result.getFailure().getMessage(), - containsString("Number of documents in the index can't exceed [" + maxDocs + "]") + containsString( + "Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs + "] documents per shard" + ) ); assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 98c7b8e4b2bde..52b272094cd86 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -35,6 +35,7 @@ import org.opensearch.action.search.SearchPhase; import org.opensearch.action.search.SearchPhaseContext; import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestOperationsListenerSupport; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; @@ -47,7 +48,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class SearchStatsTests extends OpenSearchTestCase { +public class SearchStatsTests extends OpenSearchTestCase implements SearchRequestOperationsListenerSupport { // https://github.com/elastic/elasticsearch/issues/7644 public void testShardLevelSearchGroupStats() throws Exception { @@ -84,8 +85,8 @@ public void testShardLevelSearchGroupStats() throws Exception { when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); for (int iterator = 0; iterator < paramValue; iterator++) { - testRequestStats.onPhaseStart(ctx); - testRequestStats.onPhaseEnd(ctx, null /* not needed */); + onPhaseStart(testRequestStats, ctx); + onPhaseEnd(testRequestStats, ctx); } } searchStats1.setSearchRequestStats(testRequestStats); diff --git a/server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java similarity index 66% rename from server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java rename to server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java index 01242063caa77..a0641c365a2a1 100644 --- a/server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -26,9 +27,9 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class CloseableRetryableRefreshListenerTests extends OpenSearchTestCase { +public class ReleasableRetryableRefreshListenerTests extends OpenSearchTestCase { - private static final Logger logger = LogManager.getLogger(CloseableRetryableRefreshListenerTests.class); + private static final Logger logger = LogManager.getLogger(ReleasableRetryableRefreshListenerTests.class); private ThreadPool threadPool; @@ -43,7 +44,7 @@ public void init() { public void testPerformAfterRefresh() throws IOException { CountDownLatch countDownLatch = new CountDownLatch(2); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(mock(ThreadPool.class)) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -66,7 +67,7 @@ protected Logger getLogger() { // Second invocation of afterRefresh method testRefreshListener.afterRefresh(true); assertEquals(0, countDownLatch.getCount()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); } /** @@ -75,7 +76,7 @@ protected Logger getLogger() { public void testCloseAfterRefresh() throws IOException { final int initialCount = randomIntBetween(10, 100); final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(mock(ThreadPool.class)) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -98,7 +99,7 @@ protected Logger getLogger() { assertEquals(initialCount - refreshCount, countDownLatch.getCount()); // Closing the refresh listener so that no further afterRefreshes are executed going forward - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); for (int i = 0; i < initialCount - refreshCount; i++) { testRefreshListener.afterRefresh(true); @@ -112,7 +113,7 @@ protected Logger getLogger() { public void testNoRetry() throws IOException { int initialCount = randomIntBetween(10, 100); final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(mock(ThreadPool.class)) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -129,9 +130,9 @@ protected Logger getLogger() { }; testRefreshListener.afterRefresh(true); assertEquals(initialCount - 1, countDownLatch.getCount()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -148,9 +149,9 @@ protected Logger getLogger() { }; testRefreshListener.afterRefresh(true); assertEquals(initialCount - 2, countDownLatch.getCount()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -172,9 +173,9 @@ protected Logger getLogger() { }; testRefreshListener.afterRefresh(true); assertEquals(initialCount - 3, countDownLatch.getCount()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -196,7 +197,7 @@ protected Logger getLogger() { }; testRefreshListener.afterRefresh(true); assertEquals(initialCount - 4, countDownLatch.getCount()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); } /** @@ -205,7 +206,7 @@ protected Logger getLogger() { public void testRetry() throws Exception { int initialCount = randomIntBetween(10, 20); final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -237,7 +238,7 @@ protected boolean isRetryEnabled() { }; testRefreshListener.afterRefresh(true); assertBusy(() -> assertEquals(0, countDownLatch.getCount())); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); } /** @@ -246,7 +247,7 @@ protected boolean isRetryEnabled() { public void testCloseWithRetryPending() throws IOException { int initialCount = randomIntBetween(10, 20); final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { countDownLatch.countDown(); @@ -272,13 +273,14 @@ protected Logger getLogger() { } }; testRefreshListener.afterRefresh(randomBoolean()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); assertNotEquals(0, countDownLatch.getCount()); + assertRefreshListenerClosed(testRefreshListener); } public void testCloseWaitsForAcquiringAllPermits() throws Exception { final CountDownLatch countDownLatch = new CountDownLatch(1); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { try { @@ -307,13 +309,14 @@ protected Logger getLogger() { }); thread.start(); assertBusy(() -> assertEquals(0, countDownLatch.getCount())); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); } public void testScheduleRetryAfterClose() throws Exception { // This tests that once the listener has been closed, even the retries would not be scheduled. final AtomicLong runCount = new AtomicLong(); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { try { @@ -358,8 +361,8 @@ protected TimeValue getNextRetryInterval() { Thread thread2 = new Thread(() -> { try { Thread.sleep(500); - testRefreshListener.close(); - } catch (IOException | InterruptedException e) { + testRefreshListener.drainRefreshes(); + } catch (InterruptedException e) { throw new AssertionError(e); } }); @@ -368,13 +371,14 @@ protected TimeValue getNextRetryInterval() { thread1.join(); thread2.join(); assertBusy(() -> assertEquals(1, runCount.get())); + assertRefreshListenerClosed(testRefreshListener); } public void testConcurrentScheduleRetry() throws Exception { // This tests that there can be only 1 retry that can be scheduled at a time. final AtomicLong runCount = new AtomicLong(); final AtomicInteger retryCount = new AtomicInteger(0); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { retryCount.incrementAndGet(); @@ -408,7 +412,8 @@ protected boolean isRetryEnabled() { testRefreshListener.afterRefresh(true); testRefreshListener.afterRefresh(true); assertBusy(() -> assertEquals(3, runCount.get())); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); } public void testExceptionDuringThreadPoolSchedule() throws Exception { @@ -417,7 +422,7 @@ public void testExceptionDuringThreadPoolSchedule() throws Exception { AtomicInteger runCount = new AtomicInteger(); ThreadPool mockThreadPool = mock(ThreadPool.class); when(mockThreadPool.schedule(any(), any(), any())).thenThrow(new RuntimeException()); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(mockThreadPool) { + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mockThreadPool) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { runCount.incrementAndGet(); @@ -450,7 +455,121 @@ protected boolean isRetryEnabled() { assertThrows(RuntimeException.class, () -> testRefreshListener.afterRefresh(true)); assertBusy(() -> assertFalse(testRefreshListener.getRetryScheduledStatus())); assertEquals(1, runCount.get()); - testRefreshListener.close(); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testTimeoutDuringClose() throws Exception { + // This test checks the expected behaviour when the drainRefreshes times out. + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(TimeValue.timeValueSeconds(2).millis()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + TimeValue getDrainTimeout() { + return TimeValue.timeValueSeconds(1); + } + }; + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + assertBusy(() -> assertEquals(0, testRefreshListener.availablePermits())); + RuntimeException ex = assertThrows(RuntimeException.class, testRefreshListener::drainRefreshes); + assertEquals("Failed to acquire all permits", ex.getMessage()); + thread1.join(); + } + + public void testThreadInterruptDuringClose() throws Exception { + // This test checks the expected behaviour when the thread performing the drainRefresh is interrupted. + CountDownLatch latch = new CountDownLatch(2); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(TimeValue.timeValueSeconds(2).millis()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + TimeValue getDrainTimeout() { + return TimeValue.timeValueSeconds(2); + } + }; + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + latch.countDown(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + Thread thread2 = new Thread(() -> { + RuntimeException ex = assertThrows(RuntimeException.class, testRefreshListener::drainRefreshes); + assertEquals("Failed to acquire all permits", ex.getMessage()); + latch.countDown(); + }); + thread1.start(); + assertBusy(() -> assertEquals(0, testRefreshListener.availablePermits())); + thread2.start(); + thread2.interrupt(); + thread1.join(); + thread2.join(); + assertEquals(0, latch.getCount()); + } + + public void testResumeRefreshesAfterDrainRefreshes() { + // This test checks the expected behaviour when the refresh listener is drained, but then refreshes are resumed again + // by closing the releasables acquired by calling the drainRefreshes method. + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + assertRefreshListenerOpen(testRefreshListener); + Releasable releasable = testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + releasable.close(); + assertRefreshListenerOpen(testRefreshListener); } @After @@ -458,4 +577,14 @@ public void tearDown() throws Exception { super.tearDown(); terminate(threadPool); } + + private void assertRefreshListenerClosed(ReleasableRetryableRefreshListener testRefreshListener) { + assertTrue(testRefreshListener.isClosed()); + assertEquals(0, testRefreshListener.availablePermits()); + } + + private void assertRefreshListenerOpen(ReleasableRetryableRefreshListener testRefreshListener) { + assertFalse(testRefreshListener.isClosed()); + assertEquals(1, testRefreshListener.availablePermits()); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 51814283c5eb3..811d6a722d0f6 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -33,6 +33,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -237,7 +238,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) { + for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -319,7 +320,7 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { // This is the case of isRetry=false, shouldRetry=false // Succeed on 1st attempt int succeedOnAttempt = 1; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down @@ -340,7 +341,7 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { // This covers 2 cases - 1) isRetry=false, shouldRetry=true 2) isRetry=true, shouldRetry=false // Succeed on 2nd attempt int succeedOnAttempt = 2; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down @@ -364,7 +365,7 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { public void testRefreshSuccessAfterFailureInFirstAttemptAfterSnapshotAndMetadataUpload() throws Exception { int succeedOnAttempt = 1; int checkpointPublishSucceedOnAttempt = 2; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 6 as during a successful upload IndexShard.getEngine() is hit thrice and here we are running the flow twice @@ -386,7 +387,7 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { // This covers 3 cases - 1) isRetry=false, shouldRetry=true 2) isRetry=true, shouldRetry=false 3) isRetry=True, shouldRetry=true // Succeed on 3rd attempt int succeedOnAttempt = 3; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down @@ -478,6 +479,7 @@ private Tuple mockIn IndexShard shard = mock(IndexShard.class); Store store = mock(Store.class); when(shard.store()).thenReturn(store); + when(shard.state()).thenReturn(IndexShardState.STARTED); when(store.directory()).thenReturn(indexShard.store().directory()); // Mock (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) @@ -499,13 +501,12 @@ private Tuple mockIn when(shard.getThreadPool()).thenReturn(threadPool); // Mock indexShard.getReplicationTracker().isPrimaryMode() - doAnswer(invocation -> { if (Objects.nonNull(refreshCountLatch)) { refreshCountLatch.countDown(); } - return indexShard.getReplicationTracker(); - }).when(shard).getReplicationTracker(); + return true; + }).when(shard).isStartedPrimary(); AtomicLong counter = new AtomicLong(); // Mock indexShard.getSegmentInfosSnapshot() @@ -550,6 +551,9 @@ private Tuple mockIn RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); + RecoverySettings recoverySettings = mock(RecoverySettings.class); + when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRecoverySettings()).thenReturn(recoverySettings); RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 3cb65610fab58..6bfab278993ed 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -25,6 +25,8 @@ import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -125,7 +127,6 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { private ThreadPool threadPool; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; - AtomicInteger writeCalls = new AtomicInteger(); BlobStoreRepository repository; @@ -133,6 +134,8 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { TestTranslog.FailSwitch fail; + TestTranslog.SlowDownWriteSwitch slowDown; + private LongConsumer getPersistedSeqNoConsumer() { return seqNo -> { final LongConsumer consumer = persistedSeqNoConsumer.get(); @@ -228,13 +231,15 @@ private BlobStoreRepository createRepository() { final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); fail = new TestTranslog.FailSwitch(); fail.failNever(); + slowDown = new TestTranslog.SlowDownWriteSwitch(); final FsRepository repository = new ThrowingBlobRepository( repositoryMetadata, createEnvironment(), xContentRegistry(), clusterService, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - fail + fail, + slowDown ) { @Override protected void assertSnapshotOrGenericThread() { @@ -819,6 +824,79 @@ public void testMetadataFileDeletion() throws Exception { } } + public void testDrainSync() throws Exception { + // This test checks following scenarios - + // 1. During ongoing uploads, the available permits are 0. + // 2. During an upload, if drainSync is called, it will wait for it to acquire and available permits are 0. + // 3. After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. + // 4. After drainSync, if an upload is an attempted, we do not upload to remote store. + ArrayList ops = new ArrayList<>(); + assertEquals(0, translog.allUploaded().size()); + assertEquals(1, translog.readers.size()); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(0), 0, primaryTerm.get(), new byte[] { 1 })); + assertEquals(4, translog.allUploaded().size()); + assertEquals(2, translog.readers.size()); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + + translog.setMinSeqNoToKeep(0); + translog.trimUnreferencedReaders(); + assertEquals(1, translog.readers.size()); + + // Case 1 - During ongoing uploads, the available permits are 0. + slowDown.setSleepSeconds(2); + CountDownLatch latch = new CountDownLatch(1); + Thread thread1 = new Thread(() -> { + try { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(1), 1, primaryTerm.get(), new byte[] { 1 })); + assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + latch.countDown(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + assertBusy(() -> assertEquals(0, translog.availablePermits())); + // Case 2 - During an upload, if drainSync is called, it will wait for it to acquire and available permits are 0. + Releasable releasable = translog.drainSync(); + assertBusy(() -> assertEquals(0, latch.getCount())); + assertEquals(0, translog.availablePermits()); + slowDown.setSleepSeconds(0); + assertEquals(6, translog.allUploaded().size()); + assertEquals(2, translog.readers.size()); + Set mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); + + // Case 3 - After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. + translog.setMinSeqNoToKeep(1); + translog.trimUnreferencedReaders(); + assertEquals(1, translog.readers.size()); + assertEquals(6, translog.allUploaded().size()); + assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); + + // Case 4 - After drainSync, if an upload is an attempted, we do not upload to remote store. + Translog.Location loc = addToTranslogAndListAndUpload( + translog, + ops, + new Translog.Index(String.valueOf(2), 2, primaryTerm.get(), new byte[] { 1 }) + ); + assertEquals(1, translog.readers.size()); + assertEquals(6, translog.allUploaded().size()); + assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); + + // Refill the permits back + Releasables.close(releasable); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(3), 3, primaryTerm.get(), new byte[] { 1 })); + assertEquals(2, translog.readers.size()); + assertEquals(8, translog.allUploaded().size()); + assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + + translog.setMinSeqNoToKeep(3); + translog.trimUnreferencedReaders(); + assertEquals(1, translog.readers.size()); + assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + } + private BlobPath getTranslogDirectory() { return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); } @@ -1624,9 +1702,10 @@ public void testDownloadWithRetries() throws IOException { } public class ThrowingBlobRepository extends FsRepository { - private final Environment environment; - private TestTranslog.FailSwitch fail; + private final Environment environment; + private final TestTranslog.FailSwitch fail; + private final TestTranslog.SlowDownWriteSwitch slowDown; public ThrowingBlobRepository( RepositoryMetadata metadata, @@ -1634,33 +1713,43 @@ public ThrowingBlobRepository( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings, - TestTranslog.FailSwitch fail + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown ) { super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; this.fail = fail; + this.slowDown = slowDown; } protected BlobStore createBlobStore() throws Exception { final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); final Path locationFile = environment.resolveRepoFile(location); - return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail, slowDown); } } private class ThrowingBlobStore extends FsBlobStore { - private TestTranslog.FailSwitch fail; + private final TestTranslog.FailSwitch fail; + private final TestTranslog.SlowDownWriteSwitch slowDown; - public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, TestTranslog.FailSwitch fail) throws IOException { + public ThrowingBlobStore( + int bufferSizeInBytes, + Path path, + boolean readonly, + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown + ) throws IOException { super(bufferSizeInBytes, path, readonly); this.fail = fail; + this.slowDown = slowDown; } @Override public BlobContainer blobContainer(BlobPath path) { try { - return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail); + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail, slowDown); } catch (IOException ex) { throw new OpenSearchException("failed to create blob container", ex); } @@ -1670,17 +1759,33 @@ public BlobContainer blobContainer(BlobPath path) { private class ThrowingBlobContainer extends FsBlobContainer { private TestTranslog.FailSwitch fail; - - public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, TestTranslog.FailSwitch fail) { + private final TestTranslog.SlowDownWriteSwitch slowDown; + + public ThrowingBlobContainer( + FsBlobStore blobStore, + BlobPath blobPath, + Path path, + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown + ) { super(blobStore, blobPath, path); this.fail = fail; + this.slowDown = slowDown; } + @Override public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) throws IOException { if (fail.fail()) { throw new IOException("blob container throwing error"); } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); } } diff --git a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java index fd4be1d7a8635..01c8844b51b02 100644 --- a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java @@ -335,6 +335,18 @@ public void onceFailedFailAlways() { } } + static class SlowDownWriteSwitch { + private volatile int sleepSeconds; + + public void setSleepSeconds(int sleepSeconds) { + this.sleepSeconds = sleepSeconds; + } + + public int getSleepSeconds() { + return sleepSeconds; + } + } + static class SortedSnapshot implements Translog.Snapshot { private final Translog.Snapshot snapshot; private List operations = null; diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 75639661f539d..18e7dfb375132 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,4 +96,49 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() + ); + assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() + ); + assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 252f3975bab25..f284a425a417b 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -12,11 +12,18 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -24,6 +31,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.index.IndexService; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; @@ -51,6 +59,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; @@ -91,6 +100,8 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private SegmentReplicationState state; private ReplicationCheckpoint initialCheckpoint; + private ClusterState clusterState; + private static final long TRANSPORT_TIMEOUT = 30000;// 30sec @Override @@ -129,7 +140,7 @@ public void setUp() throws Exception { indicesService = mock(IndicesService.class); ClusterService clusterService = mock(ClusterService.class); - ClusterState clusterState = mock(ClusterState.class); + clusterState = mock(ClusterState.class); RoutingTable mockRoutingTable = mock(RoutingTable.class); when(clusterService.state()).thenReturn(clusterState); when(clusterState.routingTable()).thenReturn(mockRoutingTable); @@ -465,9 +476,22 @@ public void testStartReplicationListenerFailure() throws InterruptedException { verify(spy, (never())).updateVisibleCheckpoint(eq(0L), eq(replicaShard)); } - public void testDoNotProcessLatestCheckpointIfItIsbehind() { - sut.updateLatestReceivedCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); - assertFalse(sut.processLatestReceivedCheckpoint(replicaShard, null)); + public void testDoNotProcessLatestCheckpointIfCheckpointIsBehind() { + SegmentReplicationTargetService service = spy(sut); + doReturn(mock(SegmentReplicationTarget.class)).when(service).startReplication(any(), any(), any()); + ReplicationCheckpoint checkpoint = replicaShard.getLatestReplicationCheckpoint(); + service.updateLatestReceivedCheckpoint(checkpoint, replicaShard); + service.processLatestReceivedCheckpoint(replicaShard, null); + verify(service, times(0)).startReplication(eq(replicaShard), eq(checkpoint), any()); + } + + public void testProcessLatestCheckpointIfCheckpointAhead() { + SegmentReplicationTargetService service = spy(sut); + doNothing().when(service).startReplication(any()); + doReturn(mock(SegmentReplicationTarget.class)).when(service).startReplication(any(), any(), any()); + service.updateLatestReceivedCheckpoint(aheadCheckpoint, replicaShard); + service.processLatestReceivedCheckpoint(replicaShard, null); + verify(service, times(1)).startReplication(eq(replicaShard), eq(aheadCheckpoint), any()); } public void testOnNewCheckpointInvokedOnClosedShardDoesNothing() throws IOException { @@ -617,4 +641,46 @@ public void onReplicationFailure(SegmentReplicationState state, ReplicationFaile target.cancel("test"); sut.startReplication(target); } + + public void testProcessCheckpointOnClusterStateUpdate() { + // set up mocks on indicies & index service to return our replica's index & shard. + IndexService indexService = mock(IndexService.class); + when(indexService.iterator()).thenReturn(Set.of(replicaShard).iterator()); + when(indexService.getIndexSettings()).thenReturn(replicaShard.indexSettings()); + when(indexService.index()).thenReturn(replicaShard.routingEntry().index()); + when(indicesService.iterator()).thenReturn(Set.of(indexService).iterator()); + + // create old & new cluster states + final String targetNodeId = "targetNodeId"; + ShardRouting initialRouting = primaryShard.routingEntry().relocate(targetNodeId, 0L); + assertEquals(ShardRoutingState.RELOCATING, initialRouting.state()); + + ShardRouting targetRouting = ShardRouting.newUnassigned( + primaryShard.shardId(), + true, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "test") + ).initialize(targetNodeId, initialRouting.allocationId().getId(), 0L).moveToStarted(); + assertEquals(targetNodeId, targetRouting.currentNodeId()); + assertEquals(ShardRoutingState.STARTED, targetRouting.state()); + ClusterState oldState = ClusterState.builder(ClusterName.DEFAULT) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(primaryShard.shardId().getIndex()).addShard(initialRouting).build()) + .build() + ) + .build(); + ClusterState newState = ClusterState.builder(ClusterName.DEFAULT) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(primaryShard.shardId().getIndex()).addShard(targetRouting).build()) + .build() + ) + .build(); + + // spy so we can verify process is invoked + SegmentReplicationTargetService spy = spy(sut); + spy.clusterChanged(new ClusterChangedEvent("ignored", oldState, newState)); + verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); + } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java index bac4eaf3fd677..7a67ffc8c7c5d 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -12,9 +12,10 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; -import org.opensearch.ratelimitting.admissioncontrol.controllers.CPUBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; -import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -46,13 +47,13 @@ public void tearDown() throws Exception { } public void testWhenAdmissionControllerRegistered() { - admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); assertEquals(admissionControlService.getAdmissionControllers().size(), 1); } public void testRegisterInvalidAdmissionController() { String test = "TEST"; - admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); assertEquals(admissionControlService.getAdmissionControllers().size(), 1); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -62,12 +63,12 @@ public void testRegisterInvalidAdmissionController() { } public void testAdmissionControllerSettings() { - admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); AdmissionControlSettings admissionControlSettings = admissionControlService.admissionControlSettings; List admissionControllerList = admissionControlService.getAdmissionControllers(); assertEquals(admissionControllerList.size(), 1); - CPUBasedAdmissionController cpuBasedAdmissionController = (CPUBasedAdmissionController) admissionControlService - .getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); + CpuBasedAdmissionController cpuBasedAdmissionController = (CpuBasedAdmissionController) admissionControlService + .getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); assertEquals( admissionControlSettings.isTransportLayerAdmissionControlEnabled(), cpuBasedAdmissionController.isEnabledForTransportLayer( @@ -90,7 +91,7 @@ public void testAdmissionControllerSettings() { Settings newSettings = Settings.builder() .put(settings) .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode() ) .build(); @@ -105,36 +106,53 @@ public void testAdmissionControllerSettings() { public void testApplyAdmissionControllerDisabled() { this.action = "indices:data/write/bulk[s][p]"; - admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); - admissionControlService.applyTransportAdmissionControl(this.action); + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); List admissionControllerList = admissionControlService.getAdmissionControllers(); - admissionControllerList.forEach(admissionController -> { assertEquals(admissionController.getRejectionCount(), 0); }); + admissionControllerList.forEach(admissionController -> { + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + }); } public void testApplyAdmissionControllerEnabled() { this.action = "indices:data/write/bulk[s][p]"; - admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService.getClusterSettings(), threadPool); - admissionControlService.applyTransportAdmissionControl(this.action); + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); assertEquals( - admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) - .getRejectionCount(), + admissionControlService.getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0 ); Settings settings = Settings.builder() .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode() ) .build(); clusterService.getClusterSettings().applySettings(settings); - admissionControlService.applyTransportAdmissionControl(this.action); List admissionControllerList = admissionControlService.getAdmissionControllers(); assertEquals(admissionControllerList.size(), 1); + } + + public void testApplyAdmissionControllerEnforced() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); assertEquals( - admissionControlService.getAdmissionController(CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER) - .getRejectionCount(), - 1 + admissionControlService.getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(AdmissionControlActionType.INDEXING.getType()), + 0 ); + + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + List admissionControllerList = admissionControlService.getAdmissionControllers(); + assertEquals(admissionControllerList.size(), 1); } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java new file mode 100644 index 0000000000000..a1694b2c3cee2 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.bulk.BulkRequestBuilder; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.After; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.is; + +/** + * Single node integration tests for admission control + */ +public class AdmissionControlSingleNodeTests extends OpenSearchSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @After + public void cleanup() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) + .build(); + } + + public void testAdmissionControlRejectionEnforcedMode() throws Exception { + ensureGreen(); + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + // Thread.sleep(700); + client().admin().indices().prepareCreate("index").execute().actionGet(); + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + } + // Verify that cluster state is updated + ActionFuture future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + // verify bulk request hits 429 + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); + client().admin().indices().prepareRefresh("index").get(); + + // verify search request hits 429 + SearchRequest searchRequest = new SearchRequest("index"); + try { + client().search(searchRequest).actionGet(); + } catch (Exception e) { + assertTrue(((SearchPhaseExecutionException) e).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + } + + public void testAdmissionControlRejectionMonitorOnlyMode() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + // Verify that cluster state is updated + ActionFuture future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); + client().admin().indices().prepareRefresh("index").get(); + + // verify search request success but admission control having rejections stats + SearchRequest searchRequest = new SearchRequest("index"); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + } + + public void testAdmissionControlRejectionDisabledMode() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + // Verify that cluster state is updated + ActionFuture future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder().put(super.nodeSettings()).put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + } + // verify bulk request success and no rejections + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(0, acStats.getRejectionCount().size()); + client().admin().indices().prepareRefresh("index").get(); + + // verify search request success and no rejections + SearchRequest searchRequest = new SearchRequest("index"); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(0, acStats.getRejectionCount().size()); + + } + + public void testAdmissionControlWithinLimits() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + // Verify that cluster state is updated + ActionFuture future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + } + // verify bulk request success and no rejections + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(0, acStats.getRejectionCount().size()); + client().admin().indices().prepareRefresh("index").get(); + + // verify search request success and no rejections + SearchRequest searchRequest = new SearchRequest("index"); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); + assertEquals(0, acStats.getRejectionCount().size()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java deleted file mode 100644 index af6ec0749e709..0000000000000 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CPUBasedAdmissionControllerTests.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.ratelimitting.admissioncontrol.controllers; - -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; -import org.opensearch.ratelimitting.admissioncontrol.settings.CPUBasedAdmissionControllerSettings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -public class CPUBasedAdmissionControllerTests extends OpenSearchTestCase { - private ClusterService clusterService; - private ThreadPool threadPool; - CPUBasedAdmissionController admissionController = null; - - String action = "TEST_ACTION"; - - @Override - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool("admission_controller_settings_test"); - clusterService = new ClusterService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - threadPool.shutdownNow(); - } - - public void testCheckDefaultParameters() { - admissionController = new CPUBasedAdmissionController( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, - Settings.EMPTY, - clusterService.getClusterSettings() - ); - assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); - assertEquals(admissionController.getRejectionCount(), 0); - assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); - assertFalse( - admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) - ); - } - - public void testCheckUpdateSettings() { - admissionController = new CPUBasedAdmissionController( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, - Settings.EMPTY, - clusterService.getClusterSettings() - ); - Settings settings = Settings.builder() - .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), - AdmissionControlMode.ENFORCED.getMode() - ) - .build(); - clusterService.getClusterSettings().applySettings(settings); - - assertEquals(admissionController.getName(), CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER); - assertEquals(admissionController.getRejectionCount(), 0); - assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); - assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); - } - - public void testApplyControllerWithDefaultSettings() { - admissionController = new CPUBasedAdmissionController( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, - Settings.EMPTY, - clusterService.getClusterSettings() - ); - assertEquals(admissionController.getRejectionCount(), 0); - assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); - action = "indices:data/write/bulk[s][p]"; - admissionController.apply(action); - assertEquals(admissionController.getRejectionCount(), 0); - } - - public void testApplyControllerWhenSettingsEnabled() { - Settings settings = Settings.builder() - .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), - AdmissionControlMode.ENFORCED.getMode() - ) - .build(); - admissionController = new CPUBasedAdmissionController( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER, - settings, - clusterService.getClusterSettings() - ); - assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); - assertEquals(admissionController.getRejectionCount(), 0); - action = "indices:data/write/bulk[s][p]"; - admissionController.apply(action); - assertEquals(admissionController.getRejectionCount(), 1); - } -} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..e72c0cd58ed64 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import org.mockito.Mockito; + +public class CpuBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + CpuBasedAdmissionController admissionController = null; + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getName(), CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + + assertEquals(admissionController.getName(), CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action, AdmissionControlActionType.INDEXING); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testApplyControllerWhenSettingsEnabled() throws Exception { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertTrue( + admissionController.isAdmissionControllerEnforced(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + // we can assert admission control and rejections as part of ITs + } + + public void testRejectionCount() { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 3); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 1); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 3); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 5); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java similarity index 53% rename from server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java rename to server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java index 02f582c26f54e..15a25e6cbca1c 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/TransportActionTypeTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java @@ -10,18 +10,18 @@ import org.opensearch.test.OpenSearchTestCase; -public class TransportActionTypeTests extends OpenSearchTestCase { +public class AdmissionControlActionTypeTests extends OpenSearchTestCase { public void testValidActionType() { - assertEquals(TransportActionType.SEARCH.getType(), "search"); - assertEquals(TransportActionType.INDEXING.getType(), "indexing"); - assertEquals(TransportActionType.fromName("search"), TransportActionType.SEARCH); - assertEquals(TransportActionType.fromName("indexing"), TransportActionType.INDEXING); + assertEquals(AdmissionControlActionType.SEARCH.getType(), "search"); + assertEquals(AdmissionControlActionType.INDEXING.getType(), "indexing"); + assertEquals(AdmissionControlActionType.fromName("search"), AdmissionControlActionType.SEARCH); + assertEquals(AdmissionControlActionType.fromName("indexing"), AdmissionControlActionType.INDEXING); } public void testInValidActionType() { String name = "test"; - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> TransportActionType.fromName(name)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> AdmissionControlActionType.fromName(name)); assertEquals(ex.getMessage(), "Not Supported TransportAction Type: " + name); } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java index 43103926a69a2..11688e2f30d4b 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java @@ -47,16 +47,16 @@ public void testSettingsExists() { "All the cpu based admission controller settings should be supported built in settings", settings.containsAll( Arrays.asList( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, - CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT ) ) ); } public void testDefaultSettings() { - CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( clusterService.getClusterSettings(), Settings.EMPTY ); @@ -64,7 +64,6 @@ public void testDefaultSettings() { assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); - assertEquals(cpuBasedAdmissionControllerSettings.getTransportActionsList(), Arrays.asList("indexing", "search")); } public void testGetConfiguredSettings() { @@ -72,13 +71,13 @@ public void testGetConfiguredSettings() { long indexingPercent = 85; Settings settings = Settings.builder() .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode() ) - .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .put(CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) .build(); - CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( clusterService.getClusterSettings(), settings ); @@ -90,16 +89,16 @@ public void testGetConfiguredSettings() { public void testUpdateAfterGetDefaultSettings() { long percent = 95; long searchPercent = 80; - CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( clusterService.getClusterSettings(), Settings.EMPTY ); Settings settings = Settings.builder() .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode() ) - .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) .build(); clusterService.getClusterSettings().applySettings(settings); assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); @@ -113,13 +112,13 @@ public void testUpdateAfterGetConfiguredSettings() { long searchPercent = 80; Settings settings = Settings.builder() .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode() ) - .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) .build(); - CPUBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CPUBasedAdmissionControllerSettings( + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( clusterService.getClusterSettings(), settings ); @@ -129,10 +128,10 @@ public void testUpdateAfterGetConfiguredSettings() { Settings updatedSettings = Settings.builder() .put( - CPUBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode() ) - .put(CPUBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .put(CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) .build(); clusterService.getClusterSettings().applySettings(updatedSettings); assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); @@ -143,7 +142,7 @@ public void testUpdateAfterGetConfiguredSettings() { updatedSettings = Settings.builder() .put(updatedSettings) - .put(CPUBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) .build(); clusterService.getClusterSettings().applySettings(updatedSettings); diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java new file mode 100644 index 0000000000000..7b4db5f787d6e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class AdmissionControlStatsTests extends OpenSearchTestCase { + AdmissionController admissionController; + AdmissionControllerStats admissionControllerStats; + AdmissionControlStats admissionControlStats; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + ClusterService clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + mock(ResourceUsageCollectorService.class), + clusterService, + settings + ); + admissionControllerStats = new AdmissionControllerStats(admissionController); + List admissionControllerStats = new ArrayList<>(); + admissionControlStats = new AdmissionControlStats(admissionControllerStats); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testDefaults() throws IOException { + assertEquals(admissionControlStats.getAdmissionControllerStatsList().size(), 0); + } + + public void testRejectionCount() throws IOException { + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 11); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 1); + admissionControllerStats = new AdmissionControllerStats(admissionController); + admissionControlStats = new AdmissionControlStats(List.of(admissionControllerStats)); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder = admissionControlStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String response = builder.toString(); + assertEquals( + response, + "{\"admission_control\":{\"global_cpu_usage\":{\"transport\":{\"rejection_count\":{\"search\":11,\"indexing\":1}}}}}" + ); + AdmissionControlStats admissionControlStats1 = admissionControlStats; + assertEquals(admissionControlStats.hashCode(), admissionControlStats1.hashCode()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java new file mode 100644 index 0000000000000..fe0399e79a5f4 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +public class AdmissionControllerStatsTests extends OpenSearchTestCase { + AdmissionController admissionController; + AdmissionControllerStats admissionControllerStats; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + ClusterService clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + admissionController = new CpuBasedAdmissionController("TEST", mock(ResourceUsageCollectorService.class), clusterService, settings); + admissionControllerStats = new AdmissionControllerStats(admissionController); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testDefaults() throws IOException { + assertEquals(admissionControllerStats.getRejectionCount().size(), 0); + assertEquals(admissionControllerStats.getAdmissionControllerName(), "TEST"); + } + + public void testRejectionCount() throws IOException { + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 11); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 1); + admissionControllerStats = new AdmissionControllerStats(admissionController); + long searchRejection = admissionControllerStats.getRejectionCount().getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L); + long indexingRejection = admissionControllerStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L); + assertEquals(searchRejection, 11); + assertEquals(indexingRejection, 1); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = admissionControllerStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + String response = builder.toString(); + assertEquals(response, "{\"transport\":{\"rejection_count\":{\"search\":11,\"indexing\":1}}}"); + AdmissionControllerStats admissionControllerStats1 = admissionControllerStats; + assertEquals(admissionControllerStats.hashCode(), admissionControllerStats1.hashCode()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java index 03d4819a94045..0c95769e19489 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java @@ -29,7 +29,8 @@ public void testHandlerInvoked() throws Exception { action, handler, mock(AdmissionControlService.class), - false + false, + null ); admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); assertEquals(1, handler.count); @@ -38,33 +39,32 @@ public void testHandlerInvoked() throws Exception { public void testHandlerInvokedRejectedException() throws Exception { String action = "TEST"; AdmissionControlService admissionControlService = mock(AdmissionControlService.class); - doThrow(new OpenSearchRejectedExecutionException()).when(admissionControlService).applyTransportAdmissionControl(action); + doThrow(new OpenSearchRejectedExecutionException()).when(admissionControlService).applyTransportAdmissionControl(action, null); InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); admissionControlTransportHandler = new AdmissionControlTransportHandler( action, handler, admissionControlService, - false + false, + null ); - try { - admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); - } catch (OpenSearchRejectedExecutionException exception) { - assertEquals(0, handler.count); - handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); - } + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); assertEquals(1, handler.count); } public void testHandlerInvokedRandomException() throws Exception { String action = "TEST"; AdmissionControlService admissionControlService = mock(AdmissionControlService.class); - doThrow(new NullPointerException()).when(admissionControlService).applyTransportAdmissionControl(action); + doThrow(new NullPointerException()).when(admissionControlService).applyTransportAdmissionControl(action, null); InterceptingRequestHandler handler = new InterceptingRequestHandler<>(action); admissionControlTransportHandler = new AdmissionControlTransportHandler( action, handler, admissionControlService, - false + false, + null ); try { admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 37cd7a42c7cdf..dda053af78b30 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -969,6 +969,7 @@ private void indexSampleData(List dataset, RandomIndexWriter inde for (final ZonedDateTime date : dataset) { final long instant = date.toInstant().toEpochMilli(); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); + document.add(new LongPoint(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); document.add(new SortedNumericDocValuesField(NUMERIC_FIELD, i)); indexWriter.addDocument(document); diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index 98d2a7e84d672..f5851e669a2da 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.NoopCircuitBreaker; @@ -68,6 +69,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.mockito.ArgumentMatchers.anyString; @@ -1378,4 +1380,92 @@ public void testExtraParameterInProcessorConfig() { fail("Wrong exception type: " + e.getClass()); } } + + private static class FakeStatefulRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { + private final String type; + private final Consumer stateConsumer; + + public FakeStatefulRequestProcessor(String type, Consumer stateConsumer) { + super(null, null, false); + this.type = type; + this.stateConsumer = stateConsumer; + } + + @Override + public String getType() { + return type; + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { + stateConsumer.accept(requestContext); + return request; + } + } + + private static class FakeStatefulResponseProcessor extends AbstractProcessor implements StatefulSearchResponseProcessor { + private final String type; + private final Consumer stateConsumer; + + public FakeStatefulResponseProcessor(String type, Consumer stateConsumer) { + super(null, null, false); + this.type = type; + this.stateConsumer = stateConsumer; + } + + @Override + public String getType() { + return type; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception { + stateConsumer.accept(requestContext); + return response; + } + } + + public void testStatefulProcessors() throws Exception { + AtomicReference contextHolder = new AtomicReference<>(); + SearchPipelineService searchPipelineService = createWithProcessors( + Map.of( + "write_context", + (pf, t, d, igf, cfg, ctx) -> new FakeStatefulRequestProcessor("write_context", (c) -> c.setAttribute("a", "b")) + ), + Map.of( + "read_context", + (pf, t, d, igf, cfg, ctx) -> new FakeStatefulResponseProcessor( + "read_context", + (c) -> contextHolder.set((String) c.getAttribute("a")) + ) + ), + Collections.emptyMap() + ); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "p1", + new PipelineConfiguration( + "p1", + new BytesArray( + "{\"request_processors\" : [ { \"write_context\": {} } ], \"response_processors\": [ { \"read_context\": {} }] }" + ), + XContentType.JSON + ) + ) + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + + PipelinedRequest request = searchPipelineService.resolvePipeline(new SearchRequest().pipeline("p1")); + assertNull(contextHolder.get()); + syncExecutePipeline(request, new SearchResponse(null, null, 0, 0, 0, 0, null, null)); + assertNotNull(contextHolder.get()); + assertEquals("b", contextHolder.get()); + } } diff --git a/settings.gradle b/settings.gradle index 13cc6669e3d33..139d45013710f 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.14.1" + id "com.gradle.enterprise" version "3.15.1" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 13345fcb20de1..2b56a0fc3f993 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -65,10 +65,10 @@ dependencies { api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" api 'org.apache.zookeeper:zookeeper:3.9.1' - api "org.apache.commons:commons-text:1.10.0" - api "commons-net:commons-net:3.9.0" + api "org.apache.commons:commons-text:1.11.0" + api "commons-net:commons-net:3.10.0" runtimeOnly "com.google.guava:guava:${versions.guava}" - runtimeOnly("com.squareup.okhttp3:okhttp:4.11.0") { + runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" } runtimeOnly "com.squareup.okio:okio:3.6.0" diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 2ba4de5e54a67..1ad6083074025 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -123,7 +123,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getTaskCancellationStats(), nodeStats.getSearchPipelineStats(), nodeStats.getSegmentReplicationRejectionStats(), - nodeStats.getRepositoriesStats() + nodeStats.getRepositoriesStats(), + nodeStats.getAdmissionControlStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 952cd6c085966..c2b964aa96212 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2735,6 +2735,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat(