diff --git a/.github/release-please.yml b/.github/release-please.yml
index be8096d847d..2853b1763cf 100644
--- a/.github/release-please.yml
+++ b/.github/release-please.yml
@@ -38,3 +38,7 @@ branches:
bumpMinorPreMajor: true
handleGHRelease: true
branch: 6.67.x
+ - releaseType: java-backport
+ bumpMinorPreMajor: true
+ handleGHRelease: true
+ branch: 6.66.x
diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml
index d0bf28ee0f1..7ab67b223ec 100644
--- a/.github/sync-repo-settings.yaml
+++ b/.github/sync-repo-settings.yaml
@@ -140,6 +140,25 @@ branchProtectionRules:
- checkstyle
- compile (8)
- compile (11)
+ - pattern: 6.66.x
+ isAdminEnforced: true
+ requiredApprovingReviewCount: 1
+ requiresCodeOwnerReviews: true
+ requiresStrictStatusChecks: false
+ requiredStatusCheckContexts:
+ - dependencies (17)
+ - lint
+ - javadoc
+ - units (8)
+ - units (11)
+ - 'Kokoro - Test: Integration'
+ - 'Kokoro - Test: Integration with Multiplexed Sessions'
+ - cla/google
+ - checkstyle
+ - compile (8)
+ - compile (11)
+ - units-with-multiplexed-session (8)
+ - units-with-multiplexed-session (11)
permissionRules:
- team: yoshi-admins
permission: admin
diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml
index 7146cc3dc1c..8b479f3a455 100644
--- a/.github/workflows/hermetic_library_generation.yaml
+++ b/.github/workflows/hermetic_library_generation.yaml
@@ -17,9 +17,12 @@ name: Hermetic library generation upon generation config change through pull req
on:
pull_request:
+env:
+ HEAD_REF: ${{ github.head_ref }}
+
jobs:
library_generation:
- # skip pull requests coming from a forked repository
+ # skip pull requests come from a forked repository
if: github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
steps:
@@ -35,6 +38,6 @@ jobs:
[ -z "$(git config user.name)" ] && git config --global user.name "cloud-java-bot"
bash .github/scripts/hermetic_library_generation.sh \
--target_branch ${{ github.base_ref }} \
- --current_branch ${{ github.head_ref }}
+ --current_branch $HEAD_REF
env:
GH_TOKEN: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }}
diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml
index eb740eb8ba0..2e6ec1a12bf 100644
--- a/.github/workflows/unmanaged_dependency_check.yaml
+++ b/.github/workflows/unmanaged_dependency_check.yaml
@@ -17,6 +17,6 @@ jobs:
# repository
.kokoro/build.sh
- name: Unmanaged dependency check
- uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.32.0
+ uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.33.0
with:
bom-path: google-cloud-spanner-bom/pom.xml
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index d3eaf9922bb..f8ae5a96f37 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -48,6 +48,16 @@ if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" && "${GOOGLE_APPLICATION_CREDENTI
export GOOGLE_APPLICATION_CREDENTIALS=$(realpath ${KOKORO_GFILE_DIR}/${GOOGLE_APPLICATION_CREDENTIALS})
fi
+# Start the Spanner emulator if the environment variable for it has been set.
+# TODO: Change if statement once the env var can be set in the config.
+#if [[ ! -z "${SPANNER_EMULATOR_HOST}" ]]; then
+if [[ "$JOB_TYPE" == "graalvm" ]] || [[ "$JOB_TYPE" == "graalvm17" ]]; then
+ echo "Starting emulator"
+ export SPANNER_EMULATOR_HOST=localhost:9010
+ docker pull gcr.io/cloud-spanner-emulator/emulator
+ docker run -d --rm --name spanner-emulator -p 9010:9010 -p 9020:9020 gcr.io/cloud-spanner-emulator/emulator
+fi
+
# Kokoro integration test uses both JDK 11 and JDK 8. We ensure the generated class files
# are compatible with Java 8 when running tests.
if [ -n "${JAVA8_HOME}" ]; then
@@ -233,6 +243,11 @@ clirr)
;;
esac
+if [[ ! -z "${SPANNER_EMULATOR_HOST}" ]]; then
+ echo "Stopping emulator"
+ docker container stop spanner-emulator
+fi
+
if [ "${REPORT_COVERAGE}" == "true" ]
then
bash ${KOKORO_GFILE_DIR}/codecov.sh
diff --git a/.kokoro/presubmit/graalvm-native-17.cfg b/.kokoro/presubmit/graalvm-native-17.cfg
index 7d5ab3a25c4..7008a721567 100644
--- a/.kokoro/presubmit/graalvm-native-17.cfg
+++ b/.kokoro/presubmit/graalvm-native-17.cfg
@@ -3,7 +3,7 @@
# Configure the docker image for kokoro-trampoline.
env_vars: {
key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.32.0"
+ value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.33.0"
}
env_vars: {
diff --git a/.kokoro/presubmit/graalvm-native.cfg b/.kokoro/presubmit/graalvm-native.cfg
index 519c2e3ce37..931f9bb0052 100644
--- a/.kokoro/presubmit/graalvm-native.cfg
+++ b/.kokoro/presubmit/graalvm-native.cfg
@@ -3,7 +3,7 @@
# Configure the docker image for kokoro-trampoline.
env_vars: {
key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.32.0"
+ value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.33.0"
}
env_vars: {
diff --git a/.repo-metadata.json b/.repo-metadata.json
index 5a0cd00cfe6..7848b32f2b6 100644
--- a/.repo-metadata.json
+++ b/.repo-metadata.json
@@ -5,7 +5,7 @@
"api_description": "is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, \\nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \\nfor high availability.\\n\\nBe sure to activate the Cloud Spanner API on the Developer's Console to\\nuse Cloud Spanner from your project.",
"client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history",
"release_level": "stable",
- "transport": "both",
+ "transport": "grpc",
"language": "java",
"repo": "googleapis/java-spanner",
"repo_short": "java-spanner",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e996cccc5a4..fc0757052a6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,36 @@
# Changelog
+## [6.72.0](https://github.com/googleapis/java-spanner/compare/v6.71.0...v6.72.0) (2024-08-07)
+
+
+### Features
+
+* Add `RESOURCE_EXHAUSTED` to the list of retryable error codes ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* Add field order_by in spanner.proto ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* Add QueryCancellationAction message in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* Add SessionPoolOptions, SpannerOptions protos in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* Add support for multi region encryption config ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* Enable hermetic library generation ([#3129](https://github.com/googleapis/java-spanner/issues/3129)) ([94b2a86](https://github.com/googleapis/java-spanner/commit/94b2a8610ac02d2b4212c421f03b4e9561ec9949))
+* **spanner:** Add samples for instance partitions ([#3221](https://github.com/googleapis/java-spanner/issues/3221)) ([bc48bf2](https://github.com/googleapis/java-spanner/commit/bc48bf212e37441221b3b6c8742b07ff601f6c41))
+* **spanner:** Add support for Cloud Spanner Scheduled Backups ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* **spanner:** Adding `EXPECTED_FULFILLMENT_PERIOD` to the indicate instance creation times (with `FULFILLMENT_PERIOD_NORMAL` or `FULFILLMENT_PERIOD_EXTENDED` ENUM) with the extended instance creation time triggered by On-Demand Capacity Feature ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878))
+* **spanner:** Set manual affinity incase of gRPC-GCP extenstion ([#3215](https://github.com/googleapis/java-spanner/issues/3215)) ([86b306a](https://github.com/googleapis/java-spanner/commit/86b306a4189483a5fd2746052bed817443630567))
+* Support Read RPC OrderBy ([#3180](https://github.com/googleapis/java-spanner/issues/3180)) ([735bca5](https://github.com/googleapis/java-spanner/commit/735bca523e4ea53a24929fb2c27d282c41350e91))
+
+
+### Bug Fixes
+
+* Make sure commitAsync always finishes ([#3216](https://github.com/googleapis/java-spanner/issues/3216)) ([440c88b](https://github.com/googleapis/java-spanner/commit/440c88bd67e1c9d08445fe26b01bf243f7fd1ca4))
+* SessionPoolOptions.Builder#toBuilder() skipped useMultiplexedSessions ([#3197](https://github.com/googleapis/java-spanner/issues/3197)) ([027f92c](https://github.com/googleapis/java-spanner/commit/027f92cf32fee8217d2075db61fe0be58d43a40d))
+
+
+### Dependencies
+
+* Bump sdk-platform-java-config to 3.33.0 ([#3243](https://github.com/googleapis/java-spanner/issues/3243)) ([35907c6](https://github.com/googleapis/java-spanner/commit/35907c63ae981612ba24dd9605db493b5b864217))
+* Update dependencies to latest ([#3250](https://github.com/googleapis/java-spanner/issues/3250)) ([d1d566b](https://github.com/googleapis/java-spanner/commit/d1d566b096915a537e0978715c81bfca00e34ceb))
+* Update dependency com.google.auto.value:auto-value-annotations to v1.11.0 ([#3191](https://github.com/googleapis/java-spanner/issues/3191)) ([065cd48](https://github.com/googleapis/java-spanner/commit/065cd489964aaee42fffe1e71327906bde907205))
+* Update dependency com.google.cloud:google-cloud-trace to v2.47.0 ([#3067](https://github.com/googleapis/java-spanner/issues/3067)) ([e336ab8](https://github.com/googleapis/java-spanner/commit/e336ab81a1d392d56386f9302bf51bf14e385dad))
+
## [6.71.0](https://github.com/googleapis/java-spanner/compare/v6.70.0...v6.71.0) (2024-07-03)
diff --git a/README.md b/README.md
index 95846aead8e..352b0c44967 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file:
com.google.cloudlibraries-bom
- 26.39.0
+ 26.43.0pomimport
@@ -42,7 +42,7 @@ If you are using Maven without the BOM, add this to your dependencies:
com.google.cloudgoogle-cloud-spanner
- 6.67.0
+ 6.71.0
```
@@ -621,7 +621,7 @@ To get help, follow the instructions in the [shared Troubleshooting document][tr
## Transport
-Cloud Spanner uses both gRPC and HTTP/JSON for the transport layer.
+Cloud Spanner uses gRPC for the transport layer.
## Supported Java Versions
diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index 14131d2342d..971cf7023a8 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -24,7 +24,7 @@
com.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
@@ -33,8 +33,8 @@
1.8UTF-8UTF-8
- 2.9.1
- 1.39.0
+ 2.10.0
+ 1.40.0
@@ -49,12 +49,12 @@
com.google.cloud.opentelemetryexporter-trace
- 0.29.0
+ 0.31.0com.google.cloud.opentelemetryexporter-metrics
- 0.29.0
+ 0.31.0
@@ -85,14 +85,14 @@
io.opentelemetryopentelemetry-bom
- 1.39.0
+ 1.40.0pomimportcom.google.cloudgoogle-cloud-spanner
- 6.67.0
+ 6.71.0commons-cli
@@ -102,7 +102,7 @@
com.google.auto.valueauto-value-annotations
- 1.10.4
+ 1.11.0com.kohlschutter.junixsocket
@@ -133,7 +133,7 @@
org.codehaus.mojoexec-maven-plugin
- 3.3.0
+ 3.4.0com.google.cloud.spanner.benchmark.LatencyBenchmarkfalse
diff --git a/generation_config.yaml b/generation_config.yaml
index 5db47f1db7c..07eb1e3ac06 100644
--- a/generation_config.yaml
+++ b/generation_config.yaml
@@ -1,5 +1,5 @@
-gapic_generator_version: 2.42.0
-googleapis_commitish: 19577edb4d439db98d2fb1f6f48f2e1b29fba099
+gapic_generator_version: 2.43.0
+googleapis_commitish: 7314e20f5e3b2550b2e10a8c53f58ae57c511773
libraries_bom_version: 26.43.0
libraries:
- api_shortname: spanner
diff --git a/google-cloud-spanner-bom/pom.xml b/google-cloud-spanner-bom/pom.xml
index 8f975232a56..7678b407b23 100644
--- a/google-cloud-spanner-bom/pom.xml
+++ b/google-cloud-spanner-bom/pom.xml
@@ -3,12 +3,12 @@
4.0.0com.google.cloudgoogle-cloud-spanner-bom
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTpomcom.google.cloudsdk-platform-java-config
- 3.32.0
+ 3.33.0Google Cloud Spanner BOM
@@ -53,43 +53,43 @@
com.google.cloudgoogle-cloud-spanner
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.cloudgoogle-cloud-spannertest-jar
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcgrpc-google-cloud-spanner-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcgrpc-google-cloud-spanner-admin-instance-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcgrpc-google-cloud-spanner-admin-database-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcproto-google-cloud-spanner-admin-instance-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcproto-google-cloud-spanner-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTcom.google.api.grpcproto-google-cloud-spanner-admin-database-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
diff --git a/google-cloud-spanner-executor/pom.xml b/google-cloud-spanner-executor/pom.xml
index 6df4fe5830c..62c9474ef27 100644
--- a/google-cloud-spanner-executor/pom.xml
+++ b/google-cloud-spanner-executor/pom.xml
@@ -5,14 +5,14 @@
4.0.0com.google.cloudgoogle-cloud-spanner-executor
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTjarGoogle Cloud Spanner Executorcom.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
@@ -188,7 +188,7 @@
org.apache.maven.pluginsmaven-failsafe-plugin
- 3.3.0
+ 3.3.1
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java
index 2b8c17ada97..88843026f4f 100644
--- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java
@@ -17,6 +17,7 @@
package com.google.cloud.spanner.executor.v1.stub;
import com.google.api.core.ApiFunction;
+import com.google.api.core.ObsoleteApi;
import com.google.api.gax.core.GaxProperties;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
@@ -119,6 +120,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
}
/** Returns the default service endpoint. */
+ @ObsoleteApi("Use getEndpoint() instead")
public static String getDefaultEndpoint() {
return "spanner-cloud-executor.googleapis.com:443";
}
diff --git a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json
index f0594591386..e28206a3de9 100644
--- a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json
+++ b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json
@@ -2222,6 +2222,24 @@
"allDeclaredClasses": true,
"allPublicClasses": true
},
+ {
+ "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
{
"name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest",
"queryAllDeclaredConstructors": true,
diff --git a/google-cloud-spanner/pom.xml b/google-cloud-spanner/pom.xml
index 8a79e70362e..b63f9be06d6 100644
--- a/google-cloud-spanner/pom.xml
+++ b/google-cloud-spanner/pom.xml
@@ -3,7 +3,7 @@
4.0.0com.google.cloudgoogle-cloud-spanner
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTjarGoogle Cloud Spannerhttps://github.com/googleapis/java-spanner
@@ -11,7 +11,7 @@
com.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTgoogle-cloud-spanner
@@ -107,7 +107,7 @@
com.google.cloud.spanner.ParallelIntegrationTest
- 8
+ 12truecom.google.cloud.spanner.ParallelIntegrationTest
@@ -266,12 +266,12 @@
com.google.cloudgoogle-cloud-monitoring
- 3.38.0
+ 3.48.0com.google.api.grpcproto-google-cloud-monitoring-v3
- 3.38.0
+ 3.48.0com.google.auth
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java
index eed687c416d..07d1310e91b 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java
@@ -114,6 +114,13 @@ public void attemptFailed(Throwable error, Duration delay) {
}
}
+ @Override
+ public void attemptFailedDuration(Throwable error, java.time.Duration delay) {
+ for (ApiTracer child : children) {
+ child.attemptFailedDuration(error, delay);
+ }
+ }
+
@Override
public void attemptFailedRetriesExhausted(Throwable error) {
for (ApiTracer child : children) {
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java
index f36da57a816..1819224495d 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java
@@ -65,7 +65,6 @@
import com.google.cloud.spanner.SpannerImpl.ClosedException;
import com.google.cloud.spanner.spi.v1.SpannerRpc;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
@@ -107,9 +106,10 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
+import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.annotation.concurrent.GuardedBy;
import org.threeten.bp.Duration;
@@ -144,14 +144,6 @@ void maybeWaitOnMinSessions() {
ErrorCode.DEADLINE_EXCEEDED,
"Timed out after waiting " + timeoutMillis + "ms for session pool creation");
}
-
- if (useMultiplexedSessions()
- && !waitOnMultiplexedSessionsLatch.await(timeoutNanos, TimeUnit.NANOSECONDS)) {
- final long timeoutMillis = options.getWaitForMinSessions().toMillis();
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.DEADLINE_EXCEEDED,
- "Timed out after waiting " + timeoutMillis + "ms for multiplexed session creation");
- }
} catch (InterruptedException e) {
throw SpannerExceptionFactory.propagateInterrupt(e);
}
@@ -241,7 +233,7 @@ public ApiFuture setCallback(Executor exec, ReadyCallback cb) {
private AutoClosingReadContext(
Function delegateSupplier,
SessionPool sessionPool,
- SessionReplacementHandler sessionReplacementHandler,
+ SessionReplacementHandler sessionReplacementHandler,
I session,
boolean isSingleUse) {
this.readContextDelegateSupplier = delegateSupplier;
@@ -554,7 +546,7 @@ private static class AutoClosingReadTransaction
AutoClosingReadTransaction(
Function txnSupplier,
SessionPool sessionPool,
- SessionReplacementHandler sessionReplacementHandler,
+ SessionReplacementHandler sessionReplacementHandler,
I session,
boolean isSingleUse) {
super(txnSupplier, sessionPool, sessionReplacementHandler, session, isSingleUse);
@@ -590,23 +582,6 @@ public PooledSessionFuture replaceSession(
}
}
- static class MultiplexedSessionReplacementHandler
- implements SessionReplacementHandler {
- @Override
- public MultiplexedSessionFuture replaceSession(
- SessionNotFoundException e, MultiplexedSessionFuture session) {
- /**
- * For multiplexed sessions, we would never obtain a {@link SessionNotFoundException}. Hence,
- * this method will ideally never be invoked.
- */
- logger.log(
- Level.WARNING,
- String.format(
- "Replace session invoked for multiplexed session => %s", session.getName()));
- throw e;
- }
- }
-
interface SessionNotFoundHandler {
/**
* Handles the given {@link SessionNotFoundException} by possibly converting it to a different
@@ -781,10 +756,13 @@ public ApiFuture bufferAsync(Iterable mutations) {
return delegate.bufferAsync(mutations);
}
+ @SuppressWarnings("deprecation")
@Override
public ResultSetStats analyzeUpdate(
Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) {
- return analyzeUpdateStatement(statement, analyzeMode, options).getStats();
+ try (ResultSet resultSet = analyzeUpdateStatement(statement, analyzeMode, options)) {
+ return resultSet.getStats();
+ }
}
@Override
@@ -870,7 +848,7 @@ private static class AutoClosingTransactionManager
AutoClosingTransactionManager(
T session,
- SessionReplacementHandler sessionReplacementHandler,
+ SessionReplacementHandler sessionReplacementHandler,
TransactionOption... options) {
this.session = session;
this.options = options;
@@ -1000,7 +978,7 @@ private static final class SessionPoolTransactionRunner
private SessionPoolTransactionRunner(
I session,
- SessionReplacementHandler sessionReplacementHandler,
+ SessionReplacementHandler sessionReplacementHandler,
TransactionOption... options) {
this.session = session;
this.options = options;
@@ -1032,6 +1010,7 @@ public T run(TransactionCallable callable) {
session.get().markUsed();
return result;
} catch (SpannerException e) {
+ //noinspection ThrowableNotThrown
session.get().setLastException(e);
throw e;
} finally {
@@ -1064,7 +1043,7 @@ private static class SessionPoolAsyncRunner implements
private SessionPoolAsyncRunner(
I session,
- SessionReplacementHandler sessionReplacementHandler,
+ SessionReplacementHandler sessionReplacementHandler,
TransactionOption... options) {
this.session = session;
this.options = options;
@@ -1100,7 +1079,6 @@ public ApiFuture runAsync(final AsyncWork work, Executor executor) {
session =
sessionReplacementHandler.replaceSession(
(SessionNotFoundException) se, session);
- se = null;
} catch (SessionNotFoundException e) {
exception = e;
break;
@@ -1266,39 +1244,6 @@ public PooledSessionFuture get() {
}
}
- class MultiplexedSessionFutureWrapper implements SessionFutureWrapper {
- private ISpan span;
- private volatile MultiplexedSessionFuture multiplexedSessionFuture;
-
- public MultiplexedSessionFutureWrapper(ISpan span) {
- this.span = span;
- }
-
- @Override
- public MultiplexedSessionFuture get() {
- if (resourceNotFoundException != null) {
- span.addAnnotation("Database has been deleted");
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.NOT_FOUND,
- String.format(
- "The session pool has been invalidated because a previous RPC returned 'Database not found': %s",
- resourceNotFoundException.getMessage()),
- resourceNotFoundException);
- }
- if (multiplexedSessionFuture == null) {
- synchronized (lock) {
- if (multiplexedSessionFuture == null) {
- // Creating a new reference where the request's span state can be stored.
- MultiplexedSessionFuture multiplexedSessionFuture = new MultiplexedSessionFuture(span);
- this.multiplexedSessionFuture = multiplexedSessionFuture;
- return multiplexedSessionFuture;
- }
- }
- }
- return multiplexedSessionFuture;
- }
- }
-
interface SessionFuture extends Session {
/**
@@ -1318,8 +1263,8 @@ class PooledSessionFuture extends SimpleForwardingListenableFuture(this, pooledSessionReplacementHandler, options);
}
@Override
@@ -1563,7 +1508,7 @@ PooledSession get(final boolean eligibleForLongRunning) {
res.markBusy(span);
span.addAnnotation("Using Session", "sessionId", res.getName());
synchronized (lock) {
- incrementNumSessionsInUse(false);
+ incrementNumSessionsInUse();
checkedOutSessions.add(this);
}
res.eligibleForLongRunning = eligibleForLongRunning;
@@ -1581,247 +1526,6 @@ PooledSession get(final boolean eligibleForLongRunning) {
}
}
- class MultiplexedSessionFuture implements SessionFuture {
-
- private final ISpan span;
- private volatile MultiplexedSession multiplexedSession;
-
- MultiplexedSessionFuture(ISpan span) {
- this.span = span;
- }
-
- @Override
- public Timestamp write(Iterable mutations) throws SpannerException {
- return writeWithOptions(mutations).getCommitTimestamp();
- }
-
- @Override
- public CommitResponse writeWithOptions(
- Iterable mutations, TransactionOption... options) throws SpannerException {
- try {
- return get().writeWithOptions(mutations, options);
- } finally {
- close();
- }
- }
-
- @Override
- public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException {
- return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp();
- }
-
- @Override
- public CommitResponse writeAtLeastOnceWithOptions(
- Iterable mutations, TransactionOption... options) throws SpannerException {
- try {
- return get().writeAtLeastOnceWithOptions(mutations, options);
- } finally {
- close();
- }
- }
-
- @Override
- public ServerStream batchWriteAtLeastOnce(
- Iterable mutationGroups, TransactionOption... options)
- throws SpannerException {
- try {
- return get().batchWriteAtLeastOnce(mutationGroups, options);
- } finally {
- close();
- }
- }
-
- @Override
- public ReadContext singleUse() {
- try {
- return new AutoClosingReadContext<>(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().singleUse();
- },
- SessionPool.this,
- multiplexedSessionReplacementHandler,
- this,
- true);
- } catch (Exception e) {
- close();
- throw e;
- }
- }
-
- @Override
- public ReadContext singleUse(final TimestampBound bound) {
- try {
- return new AutoClosingReadContext<>(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().singleUse(bound);
- },
- SessionPool.this,
- multiplexedSessionReplacementHandler,
- this,
- true);
- } catch (Exception e) {
- close();
- throw e;
- }
- }
-
- @Override
- public ReadOnlyTransaction singleUseReadOnlyTransaction() {
- return internalReadOnlyTransaction(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().singleUseReadOnlyTransaction();
- },
- true);
- }
-
- @Override
- public ReadOnlyTransaction singleUseReadOnlyTransaction(final TimestampBound bound) {
- return internalReadOnlyTransaction(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().singleUseReadOnlyTransaction(bound);
- },
- true);
- }
-
- @Override
- public ReadOnlyTransaction readOnlyTransaction() {
- return internalReadOnlyTransaction(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().readOnlyTransaction();
- },
- false);
- }
-
- @Override
- public ReadOnlyTransaction readOnlyTransaction(final TimestampBound bound) {
- return internalReadOnlyTransaction(
- session -> {
- MultiplexedSession multiplexedSession = session.get();
- return multiplexedSession.getDelegate().readOnlyTransaction(bound);
- },
- false);
- }
-
- private ReadOnlyTransaction internalReadOnlyTransaction(
- Function transactionSupplier,
- boolean isSingleUse) {
- try {
- return new AutoClosingReadTransaction<>(
- transactionSupplier,
- SessionPool.this,
- multiplexedSessionReplacementHandler,
- this,
- isSingleUse);
- } catch (Exception e) {
- close();
- throw e;
- }
- }
-
- @Override
- public TransactionRunner readWriteTransaction(TransactionOption... options) {
- return new SessionPoolTransactionRunner<>(
- this, multiplexedSessionReplacementHandler, options);
- }
-
- @Override
- public TransactionManager transactionManager(TransactionOption... options) {
- return new AutoClosingTransactionManager<>(
- this, multiplexedSessionReplacementHandler, options);
- }
-
- @Override
- public AsyncRunner runAsync(TransactionOption... options) {
- return new SessionPoolAsyncRunner(this, multiplexedSessionReplacementHandler, options);
- }
-
- @Override
- public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) {
- return new SessionPoolAsyncTransactionManager<>(
- multiplexedSessionReplacementHandler, this, options);
- }
-
- @Override
- public long executePartitionedUpdate(Statement stmt, UpdateOption... options) {
- try {
- return get().executePartitionedUpdate(stmt, options);
- } finally {
- close();
- }
- }
-
- @Override
- public String getName() {
- return get().getName();
- }
-
- @Override
- public void close() {
- try {
- asyncClose().get();
- } catch (InterruptedException e) {
- throw SpannerExceptionFactory.propagateInterrupt(e);
- } catch (ExecutionException e) {
- throw asSpannerException(e.getCause());
- }
- }
-
- @Override
- public ApiFuture asyncClose() {
- MultiplexedSession delegate = getOrNull();
- if (delegate != null) {
- return delegate.asyncClose();
- }
- return ApiFutures.immediateFuture(Empty.getDefaultInstance());
- }
-
- private MultiplexedSession getOrNull() {
- try {
- return get();
- } catch (Throwable ignore) {
- // this exception will never be thrown for a multiplexed session since the Future
- // object is already initialised.
- return null;
- }
- }
-
- @Override
- public MultiplexedSession get() {
- try {
- if (multiplexedSession == null) {
- boolean created = false;
- synchronized (this) {
- if (multiplexedSession == null) {
- SessionImpl sessionImpl =
- new SessionImpl(
- sessionClient.getSpanner(), currentMultiplexedSessionReference.get().get());
- MultiplexedSession multiplexedSession = new MultiplexedSession(sessionImpl);
- multiplexedSession.markBusy(span);
- span.addAnnotation("Using Session", "sessionId", multiplexedSession.getName());
- this.multiplexedSession = multiplexedSession;
- created = true;
- }
- }
- if (created) {
- synchronized (lock) {
- incrementNumSessionsInUse(true);
- }
- }
- }
- return multiplexedSession;
- } catch (ExecutionException e) {
- throw SpannerExceptionFactory.newSpannerException(e.getCause());
- } catch (InterruptedException e) {
- throw SpannerExceptionFactory.propagateInterrupt(e);
- }
- }
- }
-
interface CachedSession extends Session {
SessionImpl getDelegate();
@@ -1832,9 +1536,6 @@ interface CachedSession extends Session {
SpannerException setLastException(SpannerException exception);
- // TODO This method can be removed once we fully migrate to multiplexed sessions.
- boolean isAllowReplacing();
-
AsyncTransactionManagerImpl transactionManagerAsync(TransactionOption... options);
void setAllowReplacing(boolean b);
@@ -2024,7 +1725,7 @@ public void close() {
if ((lastException != null && isSessionNotFound(lastException)) || isRemovedFromPool) {
invalidateSession(this);
} else {
- if (lastException != null && isDatabaseOrInstanceNotFound(lastException)) {
+ if (isDatabaseOrInstanceNotFound(lastException)) {
// Mark this session pool as no longer valid and then release the session into the pool as
// there is nothing we can do with it anyways.
synchronized (lock) {
@@ -2116,8 +1817,7 @@ public SpannerException setLastException(SpannerException exception) {
return exception;
}
- @Override
- public boolean isAllowReplacing() {
+ boolean isAllowReplacing() {
return this.allowReplacing;
}
@@ -2127,172 +1827,12 @@ public TransactionManager transactionManager(TransactionOption... options) {
}
}
- class MultiplexedSession implements CachedSession {
- final SessionImpl delegate;
- private volatile SpannerException lastException;
-
- MultiplexedSession(SessionImpl session) {
- this.delegate = session;
- }
-
- @Override
- public boolean isAllowReplacing() {
- // for multiplexed session there is only 1 session, hence there is nothing that we
- // can replace.
- return false;
- }
-
- @Override
- public void setAllowReplacing(boolean allowReplacing) {
- // for multiplexed session there is only 1 session, there is nothing that can be replaced.
- // hence this is no-op.
- }
-
- @Override
- public void markBusy(ISpan span) {
- this.delegate.setCurrentSpan(span);
- }
-
- @Override
- public void markUsed() {
- // no-op for a multiplexed session since we don't track the last-used time
- // in case of multiplexed session
- }
-
- @Override
- public SpannerException setLastException(SpannerException exception) {
- this.lastException = exception;
- return exception;
- }
-
- @Override
- public SessionImpl getDelegate() {
- return delegate;
- }
-
- @Override
- public Timestamp write(Iterable mutations) throws SpannerException {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public CommitResponse writeWithOptions(
- Iterable mutations, TransactionOption... options) throws SpannerException {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public CommitResponse writeAtLeastOnceWithOptions(
- Iterable mutations, TransactionOption... options) throws SpannerException {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public ServerStream batchWriteAtLeastOnce(
- Iterable mutationGroups, TransactionOption... options)
- throws SpannerException {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public ReadContext singleUse() {
- return delegate.singleUse();
- }
-
- @Override
- public ReadContext singleUse(TimestampBound bound) {
- return delegate.singleUse(bound);
- }
-
- @Override
- public ReadOnlyTransaction singleUseReadOnlyTransaction() {
- return delegate.singleUseReadOnlyTransaction();
- }
-
- @Override
- public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) {
- return delegate.singleUseReadOnlyTransaction(bound);
- }
-
- @Override
- public ReadOnlyTransaction readOnlyTransaction() {
- return delegate.readOnlyTransaction();
- }
-
- @Override
- public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) {
- return delegate.readOnlyTransaction(bound);
- }
-
- @Override
- public TransactionRunner readWriteTransaction(TransactionOption... options) {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public TransactionManager transactionManager(TransactionOption... options) {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public AsyncRunner runAsync(TransactionOption... options) {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public AsyncTransactionManagerImpl transactionManagerAsync(TransactionOption... options) {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public long executePartitionedUpdate(Statement stmt, UpdateOption... options) {
- throw SpannerExceptionFactory.newSpannerException(
- ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session");
- }
-
- @Override
- public String getName() {
- return delegate.getName();
- }
-
- @Override
- public void close() {
- synchronized (lock) {
- if (lastException != null && isDatabaseOrInstanceNotFound(lastException)) {
- SessionPool.this.resourceNotFoundException =
- MoreObjects.firstNonNull(
- SessionPool.this.resourceNotFoundException,
- (ResourceNotFoundException) lastException);
- }
- }
- }
-
- @Override
- public ApiFuture asyncClose() {
- close();
- return ApiFutures.immediateFuture(Empty.getDefaultInstance());
- }
- }
-
private final class WaiterFuture extends ForwardingListenableFuture {
private static final long MAX_SESSION_WAIT_TIMEOUT = 240_000L;
private final SettableFuture waiter = SettableFuture.create();
@Override
+ @Nonnull
protected ListenableFuture extends PooledSession> delegate() {
return waiter;
}
@@ -2310,7 +1850,7 @@ public PooledSession get() {
long currentTimeout = options.getInitialWaitForSessionTimeoutMillis();
while (true) {
ISpan span = tracer.spanBuilder(WAIT_FOR_SESSION);
- try (IScope waitScope = tracer.withSpan(span)) {
+ try (IScope ignore = tracer.withSpan(span)) {
PooledSession s =
pollUninterruptiblyWithTimeout(currentTimeout, options.getAcquireSessionTimeout());
if (s == null) {
@@ -2395,9 +1935,6 @@ private PooledSession pollUninterruptiblyWithTimeout(
*/
final class PoolMaintainer {
- // Delay post which the maintainer will retry creating/replacing the current multiplexed session
- private final Duration multiplexedSessionCreationRetryDelay = Duration.ofMinutes(10);
-
// Length of the window in millis over which we keep track of maximum number of concurrent
// sessions in use.
private final Duration windowLength = Duration.ofMillis(TimeUnit.MINUTES.toMillis(10));
@@ -2421,8 +1958,6 @@ final class PoolMaintainer {
*/
@VisibleForTesting Instant lastExecutionTime;
- @VisibleForTesting Instant multiplexedSessionReplacementAttemptTime;
-
/**
* The previous numSessionsAcquired seen by the maintainer. This is used to calculate the
* transactions per second, which again is used to determine whether to randomize the order of
@@ -2440,7 +1975,6 @@ final class PoolMaintainer {
void init() {
lastExecutionTime = clock.instant();
- multiplexedSessionReplacementAttemptTime = clock.instant();
// Scheduled pool maintenance worker.
synchronized (lock) {
@@ -2483,7 +2017,6 @@ void maintainPool() {
this.prevNumSessionsAcquired = SessionPool.this.numSessionsAcquired;
}
Instant currTime = clock.instant();
- maintainMultiplexedSession(currTime);
removeIdleSessions(currTime);
// Now go over all the remaining sessions and see if they need to be kept alive explicitly.
keepAliveSessions(currTime);
@@ -2652,44 +2185,6 @@ private void removeLongRunningSessions(
}
}
}
-
- void maintainMultiplexedSession(Instant currentTime) {
- try {
- if (useMultiplexedSessions()) {
- if (currentMultiplexedSessionReference.get().isDone()) {
- SessionReference sessionReference = getMultiplexedSessionInstance();
- if (sessionReference != null
- && isMultiplexedSessionStale(sessionReference, currentTime)) {
- final Instant minExecutionTime =
- multiplexedSessionReplacementAttemptTime.plus(
- multiplexedSessionCreationRetryDelay);
- if (currentTime.isBefore(minExecutionTime)) {
- return;
- }
- /*
- This will attempt to create a new multiplexed session. if successfully created then
- the existing session will be replaced. Note that there maybe active transactions
- running on the stale session. Hence, it is important that we only replace the reference
- and not invoke a DeleteSession RPC.
- */
- maybeCreateMultiplexedSession(multiplexedMaintainerConsumer);
-
- // update this only after we have attempted to replace the multiplexed session
- multiplexedSessionReplacementAttemptTime = currentTime;
- }
- }
- }
- } catch (final Throwable t) {
- logger.log(Level.WARNING, "Failed to maintain multiplexed session", t);
- }
- }
-
- boolean isMultiplexedSessionStale(SessionReference sessionReference, Instant currentTime) {
- final Duration durationFromCreationTime =
- Duration.between(sessionReference.getCreateTime(), currentTime);
- return durationFromCreationTime.compareTo(options.getMultiplexedSessionMaintenanceDuration())
- > 0;
- }
}
enum Position {
@@ -2754,9 +2249,6 @@ enum Position {
@GuardedBy("lock")
private ResourceNotFoundException resourceNotFoundException;
- @GuardedBy("lock")
- private boolean stopAutomaticPrepare;
-
@GuardedBy("lock")
private final LinkedList sessions = new LinkedList<>();
@@ -2766,9 +2258,6 @@ enum Position {
@GuardedBy("lock")
private int numSessionsBeingCreated = 0;
- @GuardedBy("lock")
- private boolean multiplexedSessionBeingCreated = false;
-
@GuardedBy("lock")
private int numSessionsInUse = 0;
@@ -2790,10 +2279,7 @@ enum Position {
@GuardedBy("lock")
private long numLeakedSessionsRemoved = 0;
- private AtomicLong numWaiterTimeouts = new AtomicLong();
-
- private final AtomicReference>
- currentMultiplexedSessionReference = new AtomicReference<>(SettableApiFuture.create());
+ private final AtomicLong numWaiterTimeouts = new AtomicLong();
@GuardedBy("lock")
private final Set allSessions = new HashSet<>();
@@ -2807,21 +2293,12 @@ enum Position {
private final SessionConsumer sessionConsumer = new SessionConsumerImpl();
- private final MultiplexedSessionInitializationConsumer multiplexedSessionInitializationConsumer =
- new MultiplexedSessionInitializationConsumer();
- private final MultiplexedSessionMaintainerConsumer multiplexedMaintainerConsumer =
- new MultiplexedSessionMaintainerConsumer();
-
@VisibleForTesting Function idleSessionRemovedListener;
@VisibleForTesting Function longRunningSessionRemovedListener;
- @VisibleForTesting Function multiplexedSessionRemovedListener;
private final CountDownLatch waitOnMinSessionsLatch;
- private final CountDownLatch waitOnMultiplexedSessionsLatch;
- private final SessionReplacementHandler pooledSessionReplacementHandler =
+ private final PooledSessionReplacementHandler pooledSessionReplacementHandler =
new PooledSessionReplacementHandler();
- private static final SessionReplacementHandler multiplexedSessionReplacementHandler =
- new MultiplexedSessionReplacementHandler();
/**
* Create a session pool with the given options and for the given database. It will also start
@@ -2965,13 +2442,6 @@ private SessionPool(
openTelemetry, attributes, numMultiplexedSessionsAcquired, numMultiplexedSessionsReleased);
this.waitOnMinSessionsLatch =
options.getMinSessions() > 0 ? new CountDownLatch(1) : new CountDownLatch(0);
- this.waitOnMultiplexedSessionsLatch = new CountDownLatch(1);
- }
-
- // TODO: Remove once all code for multiplexed sessions has been removed from the pool.
- private boolean useMultiplexedSessions() {
- // Multiplexed sessions have moved to MultiplexedSessionDatabaseClient
- return false;
}
/**
@@ -3007,7 +2477,7 @@ Dialect getDialect() {
}
}
- SessionReplacementHandler getPooledSessionReplacementHandler() {
+ PooledSessionReplacementHandler getPooledSessionReplacementHandler() {
return pooledSessionReplacementHandler;
}
@@ -3087,13 +2557,6 @@ int getTotalSessionsPlusNumSessionsBeingCreated() {
}
}
- @VisibleForTesting
- boolean isMultiplexedSessionBeingCreated() {
- synchronized (lock) {
- return multiplexedSessionBeingCreated;
- }
- }
-
@VisibleForTesting
long getNumWaiterTimeouts() {
return numWaiterTimeouts.get();
@@ -3105,9 +2568,6 @@ private void initPool() {
if (options.getMinSessions() > 0) {
createSessions(options.getMinSessions(), true);
}
- if (useMultiplexedSessions()) {
- maybeCreateMultiplexedSession(multiplexedSessionInitializationConsumer);
- }
}
}
@@ -3173,36 +2633,8 @@ boolean isValid() {
* Returns a multiplexed session. The method fallbacks to a regular session if {@link
* SessionPoolOptions#getUseMultiplexedSession} is not set.
*/
- SessionFutureWrapper getMultiplexedSessionWithFallback() throws SpannerException {
- if (useMultiplexedSessions()) {
- ISpan span = tracer.getCurrentSpan();
- try {
- return getWrappedMultiplexedSessionFuture(span);
- } catch (Throwable t) {
- span.addAnnotation("No multiplexed session available.");
- throw asSpannerException(t.getCause());
- }
- } else {
- return new PooledSessionFutureWrapper(getSession());
- }
- }
-
- SessionFutureWrapper getWrappedMultiplexedSessionFuture(ISpan span) {
- return new MultiplexedSessionFutureWrapper(span);
- }
-
- /**
- * This method is a blocking method. It will block until the underlying {@code
- * SettableApiFuture} is resolved.
- */
- SessionReference getMultiplexedSessionInstance() {
- try {
- return currentMultiplexedSessionReference.get().get();
- } catch (InterruptedException e) {
- throw SpannerExceptionFactory.propagateInterrupt(e);
- } catch (ExecutionException e) {
- throw asSpannerException(e.getCause());
- }
+ PooledSessionFutureWrapper getMultiplexedSessionWithFallback() throws SpannerException {
+ return new PooledSessionFutureWrapper(getSession());
}
/**
@@ -3271,14 +2703,12 @@ private PooledSessionFuture checkoutSession(
return res;
}
- private void incrementNumSessionsInUse(boolean isMultiplexed) {
+ private void incrementNumSessionsInUse() {
synchronized (lock) {
- if (!isMultiplexed) {
- if (maxSessionsInUse < ++numSessionsInUse) {
- maxSessionsInUse = numSessionsInUse;
- }
- numSessionsAcquired++;
+ if (maxSessionsInUse < ++numSessionsInUse) {
+ maxSessionsInUse = numSessionsInUse;
}
+ numSessionsAcquired++;
}
}
@@ -3496,7 +2926,7 @@ static boolean isUnbalanced(
private void handleCreateSessionsFailure(SpannerException e, int count) {
synchronized (lock) {
for (int i = 0; i < count; i++) {
- if (waiters.size() > 0) {
+ if (!waiters.isEmpty()) {
waiters.poll().put(e);
} else {
break;
@@ -3638,20 +3068,6 @@ private boolean canCreateSession() {
}
}
- private void maybeCreateMultiplexedSession(SessionConsumer sessionConsumer) {
- synchronized (lock) {
- if (!multiplexedSessionBeingCreated) {
- logger.log(Level.FINE, String.format("Creating multiplexed sessions"));
- try {
- multiplexedSessionBeingCreated = true;
- sessionClient.asyncCreateMultiplexedSession(sessionConsumer);
- } catch (Throwable ignore) {
- // such an exception will never be thrown. the exception will be passed onto the consumer.
- }
- }
- }
- }
-
private void createSessions(final int sessionCount, boolean distributeOverChannels) {
logger.log(Level.FINE, String.format("Creating %d sessions", sessionCount));
synchronized (lock) {
@@ -3674,99 +3090,6 @@ private void createSessions(final int sessionCount, boolean distributeOverChanne
}
}
- /**
- * Callback interface which is invoked when a multiplexed session is being replaced by the
- * background maintenance thread. When a multiplexed session creation fails during background
- * thread, it would simply log the exception and retry the session creation in the next background
- * thread invocation.
- *
- *
This consumer is not used when the multiplexed session is getting initialized for the first
- * time during application startup. We instead use {@link
- * MultiplexedSessionInitializationConsumer} for the first time when multiplexed session is
- * getting created.
- */
- class MultiplexedSessionMaintainerConsumer implements SessionConsumer {
- @Override
- public void onSessionReady(SessionImpl sessionImpl) {
- final SessionReference sessionReference = sessionImpl.getSessionReference();
- final SettableFuture settableFuture = SettableFuture.create();
- settableFuture.set(sessionReference);
-
- synchronized (lock) {
- SessionReference oldSession = null;
- if (currentMultiplexedSessionReference.get().isDone()) {
- oldSession = getMultiplexedSessionInstance();
- }
- SettableApiFuture settableApiFuture = SettableApiFuture.create();
- settableApiFuture.set(sessionReference);
- currentMultiplexedSessionReference.set(settableApiFuture);
- if (oldSession != null) {
- logger.log(
- Level.INFO,
- String.format(
- "Removed Multiplexed Session => %s created at => %s",
- oldSession.getName(), oldSession.getCreateTime()));
- if (multiplexedSessionRemovedListener != null) {
- multiplexedSessionRemovedListener.apply(oldSession);
- }
- }
- multiplexedSessionBeingCreated = false;
- }
- }
-
- /**
- * Method which logs the exception so that session creation can be re-attempted in the next
- * background thread invocation.
- */
- @Override
- public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {
- synchronized (lock) {
- multiplexedSessionBeingCreated = false;
- }
- logger.log(
- Level.WARNING,
- String.format(
- "Failed to create multiplexed session. "
- + "Pending replacing stale multiplexed session",
- t));
- }
- }
-
- /**
- * Callback interface which is invoked when a multiplexed session is getting initialised for the
- * first time when a session is getting created.
- */
- class MultiplexedSessionInitializationConsumer implements SessionConsumer {
- @Override
- public void onSessionReady(SessionImpl sessionImpl) {
- final SessionReference sessionReference = sessionImpl.getSessionReference();
- synchronized (lock) {
- SettableApiFuture settableApiFuture =
- currentMultiplexedSessionReference.get();
- settableApiFuture.set(sessionReference);
- multiplexedSessionBeingCreated = false;
- waitOnMultiplexedSessionsLatch.countDown();
- }
- }
-
- /**
- * When a multiplexed session fails during initialization we would like all pending threads to
- * receive the exception and throw the error. This is done because at the time of start up there
- * is no other multiplexed session which could have been assigned to the pending requests.
- */
- @Override
- public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {
- synchronized (lock) {
- multiplexedSessionBeingCreated = false;
- if (isDatabaseOrInstanceNotFound(asSpannerException(t))) {
- setResourceNotFoundException((ResourceNotFoundException) t);
- poolMaintainer.close();
- }
- currentMultiplexedSessionReference.get().setException(asSpannerException(t));
- }
- }
- }
-
/**
* {@link SessionConsumer} that receives the created sessions from a {@link SessionClient} and
* releases these into the pool. The session pool only needs one instance of this, as all sessions
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java
index 692a60e97b5..7219389e775 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java
@@ -306,12 +306,23 @@ private void createTxnAsync(final SettableApiFuture res) {
void commit() {
try {
- commitResponse = commitAsync().get();
- } catch (InterruptedException e) {
+ // Normally, Gax will take care of any timeouts, but we add a timeout for getting the value
+ // from the future here as well to make sure the call always finishes, even if the future
+ // never resolves.
+ commitResponse =
+ commitAsync()
+ .get(
+ rpc.getCommitRetrySettings().getTotalTimeout().getSeconds() + 5,
+ TimeUnit.SECONDS);
+ } catch (InterruptedException | TimeoutException e) {
if (commitFuture != null) {
commitFuture.cancel(true);
}
- throw SpannerExceptionFactory.propagateInterrupt(e);
+ if (e instanceof InterruptedException) {
+ throw SpannerExceptionFactory.propagateInterrupt((InterruptedException) e);
+ } else {
+ throw SpannerExceptionFactory.propagateTimeout((TimeoutException) e);
+ }
} catch (ExecutionException e) {
throw SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause());
}
@@ -422,6 +433,14 @@ public void run() {
commitFuture.addListener(
() -> {
try (IScope ignore = tracer.withSpan(opSpan)) {
+ if (!commitFuture.isDone()) {
+ // This should not be possible, considering that we are in a listener for the
+ // future, but we add a result here as well as a safety precaution.
+ res.setException(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INTERNAL, "commitFuture is not done"));
+ return;
+ }
com.google.spanner.v1.CommitResponse proto = commitFuture.get();
if (!proto.hasCommitTimestamp()) {
throw newSpannerException(
@@ -430,20 +449,28 @@ public void run() {
span.addAnnotation("Commit Done");
opSpan.end();
res.set(new CommitResponse(proto));
- } catch (Throwable e) {
- if (e instanceof ExecutionException) {
- e =
- SpannerExceptionFactory.newSpannerException(
- e.getCause() == null ? e : e.getCause());
- } else if (e instanceof InterruptedException) {
- e = SpannerExceptionFactory.propagateInterrupt((InterruptedException) e);
- } else {
- e = SpannerExceptionFactory.newSpannerException(e);
+ } catch (Throwable throwable) {
+ SpannerException resultException;
+ try {
+ if (throwable instanceof ExecutionException) {
+ resultException =
+ SpannerExceptionFactory.asSpannerException(
+ throwable.getCause() == null ? throwable : throwable.getCause());
+ } else if (throwable instanceof InterruptedException) {
+ resultException =
+ SpannerExceptionFactory.propagateInterrupt(
+ (InterruptedException) throwable);
+ } else {
+ resultException = SpannerExceptionFactory.asSpannerException(throwable);
+ }
+ span.addAnnotation("Commit Failed", resultException);
+ opSpan.setStatus(resultException);
+ opSpan.end();
+ res.setException(onError(resultException, false));
+ } catch (Throwable unexpectedError) {
+ // This is a safety precaution to make sure that a result is always returned.
+ res.setException(unexpectedError);
}
- span.addAnnotation("Commit Failed", e);
- opSpan.setStatus(e);
- opSpan.end();
- res.setException(onError((SpannerException) e, false));
}
},
MoreExecutors.directExecutor());
@@ -451,9 +478,6 @@ public void run() {
res.setException(SpannerExceptionFactory.propagateInterrupt(e));
} catch (TimeoutException e) {
res.setException(SpannerExceptionFactory.propagateTimeout(e));
- } catch (ExecutionException e) {
- res.setException(
- SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause()));
} catch (Throwable e) {
res.setException(
SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause()));
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java
index 7ee75687221..2865fcd8d08 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java
@@ -26,6 +26,7 @@
import com.google.api.core.ApiFunction;
import com.google.api.core.ApiFuture;
import com.google.api.core.BetaApi;
+import com.google.api.core.ObsoleteApi;
import com.google.api.gax.core.GaxProperties;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
@@ -774,6 +775,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
}
/** Returns the default service endpoint. */
+ @ObsoleteApi("Use getEndpoint() instead")
public static String getDefaultEndpoint() {
return "spanner.googleapis.com:443";
}
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java
index 9a00b312c61..a74094a3149 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java
@@ -25,6 +25,7 @@
import com.google.api.core.ApiFunction;
import com.google.api.core.ApiFuture;
import com.google.api.core.BetaApi;
+import com.google.api.core.ObsoleteApi;
import com.google.api.gax.core.GaxProperties;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
@@ -738,6 +739,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
}
/** Returns the default service endpoint. */
+ @ObsoleteApi("Use getEndpoint() instead")
public static String getDefaultEndpoint() {
return "spanner.googleapis.com:443";
}
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java
index 520a2e180e5..0362ffc2050 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java
@@ -461,7 +461,10 @@ public void run() {
CallType.SYNC,
SELECT1_STATEMENT,
AnalyzeMode.NONE,
- Options.tag("connection.transaction-keep-alive"));
+ Options.tag(
+ System.getProperty(
+ "spanner.connection.keep_alive_query_tag",
+ "connection.transaction-keep-alive")));
future.addListener(
ReadWriteTransaction.this::maybeScheduleKeepAlivePing, MoreExecutors.directExecutor());
}
diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java
index b6016f04f78..00ae72f169a 100644
--- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java
+++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java
@@ -56,6 +56,7 @@
import com.google.api.pathtemplate.PathTemplate;
import com.google.cloud.RetryHelper;
import com.google.cloud.RetryHelper.RetryHelperException;
+import com.google.cloud.grpc.GcpManagedChannel;
import com.google.cloud.grpc.GcpManagedChannelBuilder;
import com.google.cloud.grpc.GcpManagedChannelOptions;
import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions;
@@ -240,6 +241,7 @@ public class GapicSpannerRpc implements SpannerRpc {
private final Set executeQueryRetryableCodes;
private final RetrySettings readRetrySettings;
private final Set readRetryableCodes;
+ private final RetrySettings commitRetrySettings;
private final SpannerStub partitionedDmlStub;
private final RetrySettings partitionedDmlRetrySettings;
private final InstanceAdminStubSettings instanceAdminStubSettings;
@@ -266,6 +268,8 @@ public class GapicSpannerRpc implements SpannerRpc {
private static final ConcurrentMap ADMINISTRATIVE_REQUESTS_RATE_LIMITERS =
new ConcurrentHashMap<>();
private final boolean leaderAwareRoutingEnabled;
+ private final int numChannels;
+ private final boolean isGrpcGcpExtensionEnabled;
public static GapicSpannerRpc create(SpannerOptions options) {
return new GapicSpannerRpc(options);
@@ -317,6 +321,8 @@ public GapicSpannerRpc(final SpannerOptions options) {
this.callCredentialsProvider = options.getCallCredentialsProvider();
this.compressorName = options.getCompressorName();
this.leaderAwareRoutingEnabled = options.isLeaderAwareRoutingEnabled();
+ this.numChannels = options.getNumChannels();
+ this.isGrpcGcpExtensionEnabled = options.isGrpcGcpExtensionEnabled();
if (initializeStubs) {
// First check if SpannerOptions provides a TransportChannelProvider. Create one
@@ -398,6 +404,8 @@ public GapicSpannerRpc(final SpannerOptions options) {
options.getSpannerStubSettings().executeStreamingSqlSettings().getRetrySettings();
this.executeQueryRetryableCodes =
options.getSpannerStubSettings().executeStreamingSqlSettings().getRetryableCodes();
+ this.commitRetrySettings =
+ options.getSpannerStubSettings().commitSettings().getRetrySettings();
partitionedDmlRetrySettings =
options
.getSpannerStubSettings()
@@ -508,6 +516,8 @@ public UnaryCallable createUnaryCalla
this.readRetryableCodes = null;
this.executeQueryRetrySettings = null;
this.executeQueryRetryableCodes = null;
+ this.commitRetrySettings =
+ SpannerStubSettings.newBuilder().commitSettings().getRetrySettings();
this.partitionedDmlStub = null;
this.databaseAdminStubSettings = null;
this.instanceAdminStubSettings = null;
@@ -1801,6 +1811,11 @@ public CommitResponse commit(CommitRequest commitRequest, @Nullable Map
*/
public Builder clearEncryptionInfo() {
- bitField0_ = (bitField0_ & ~0x00000100);
+ bitField0_ = (bitField0_ & ~0x00000400);
encryptionInfo_ = null;
if (encryptionInfoBuilder_ != null) {
encryptionInfoBuilder_.dispose();
@@ -3515,7 +3944,7 @@ public Builder clearEncryptionInfo() {
*
*/
public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryptionInfoBuilder() {
- bitField0_ |= 0x00000100;
+ bitField0_ |= 0x00000400;
onChanged();
return getEncryptionInfoFieldBuilder().getBuilder();
}
@@ -3572,11 +4001,11 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryption
encryptionInformation_ = java.util.Collections.emptyList();
private void ensureEncryptionInformationIsMutable() {
- if (!((bitField0_ & 0x00000200) != 0)) {
+ if (!((bitField0_ & 0x00000800) != 0)) {
encryptionInformation_ =
new java.util.ArrayList(
encryptionInformation_);
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000800;
}
}
@@ -3873,7 +4302,7 @@ public Builder addAllEncryptionInformation(
public Builder clearEncryptionInformation() {
if (encryptionInformationBuilder_ == null) {
encryptionInformation_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000200);
+ bitField0_ = (bitField0_ & ~0x00000800);
onChanged();
} else {
encryptionInformationBuilder_.clear();
@@ -4050,7 +4479,7 @@ public Builder removeEncryptionInformation(int index) {
com.google.spanner.admin.database.v1.EncryptionInfo.Builder,
com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder>(
encryptionInformation_,
- ((bitField0_ & 0x00000200) != 0),
+ ((bitField0_ & 0x00000800) != 0),
getParentForChildren(),
isClean());
encryptionInformation_ = null;
@@ -4092,7 +4521,7 @@ public int getDatabaseDialectValue() {
*/
public Builder setDatabaseDialectValue(int value) {
databaseDialect_ = value;
- bitField0_ |= 0x00000400;
+ bitField0_ |= 0x00001000;
onChanged();
return this;
}
@@ -4135,7 +4564,7 @@ public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseD
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000400;
+ bitField0_ |= 0x00001000;
databaseDialect_ = value.getNumber();
onChanged();
return this;
@@ -4154,7 +4583,7 @@ public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseD
* @return This builder for chaining.
*/
public Builder clearDatabaseDialect() {
- bitField0_ = (bitField0_ & ~0x00000400);
+ bitField0_ = (bitField0_ & ~0x00001000);
databaseDialect_ = 0;
onChanged();
return this;
@@ -4167,7 +4596,7 @@ private void ensureReferencingBackupsIsMutable() {
if (!referencingBackups_.isModifiable()) {
referencingBackups_ = new com.google.protobuf.LazyStringArrayList(referencingBackups_);
}
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00002000;
}
/**
*
@@ -4287,7 +4716,7 @@ public Builder setReferencingBackups(int index, java.lang.String value) {
}
ensureReferencingBackupsIsMutable();
referencingBackups_.set(index, value);
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00002000;
onChanged();
return this;
}
@@ -4317,7 +4746,7 @@ public Builder addReferencingBackups(java.lang.String value) {
}
ensureReferencingBackupsIsMutable();
referencingBackups_.add(value);
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00002000;
onChanged();
return this;
}
@@ -4344,7 +4773,7 @@ public Builder addReferencingBackups(java.lang.String value) {
public Builder addAllReferencingBackups(java.lang.Iterable values) {
ensureReferencingBackupsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingBackups_);
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00002000;
onChanged();
return this;
}
@@ -4369,7 +4798,7 @@ public Builder addAllReferencingBackups(java.lang.Iterable val
*/
public Builder clearReferencingBackups() {
referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList();
- bitField0_ = (bitField0_ & ~0x00000800);
+ bitField0_ = (bitField0_ & ~0x00002000);
;
onChanged();
return this;
@@ -4401,7 +4830,7 @@ public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value)
checkByteStringIsUtf8(value);
ensureReferencingBackupsIsMutable();
referencingBackups_.add(value);
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00002000;
onChanged();
return this;
}
@@ -4430,7 +4859,7 @@ public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value)
* @return Whether the maxExpireTime field is set.
*/
public boolean hasMaxExpireTime() {
- return ((bitField0_ & 0x00001000) != 0);
+ return ((bitField0_ & 0x00004000) != 0);
}
/**
*
@@ -4482,7 +4911,7 @@ public Builder setMaxExpireTime(com.google.protobuf.Timestamp value) {
} else {
maxExpireTimeBuilder_.setMessage(value);
}
- bitField0_ |= 0x00001000;
+ bitField0_ |= 0x00004000;
onChanged();
return this;
}
@@ -4507,7 +4936,7 @@ public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderFor
} else {
maxExpireTimeBuilder_.setMessage(builderForValue.build());
}
- bitField0_ |= 0x00001000;
+ bitField0_ |= 0x00004000;
onChanged();
return this;
}
@@ -4528,7 +4957,7 @@ public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderFor
*/
public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) {
if (maxExpireTimeBuilder_ == null) {
- if (((bitField0_ & 0x00001000) != 0)
+ if (((bitField0_ & 0x00004000) != 0)
&& maxExpireTime_ != null
&& maxExpireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) {
getMaxExpireTimeBuilder().mergeFrom(value);
@@ -4539,7 +4968,7 @@ public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) {
maxExpireTimeBuilder_.mergeFrom(value);
}
if (maxExpireTime_ != null) {
- bitField0_ |= 0x00001000;
+ bitField0_ |= 0x00004000;
onChanged();
}
return this;
@@ -4560,7 +4989,7 @@ public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) {
*
*/
public Builder clearMaxExpireTime() {
- bitField0_ = (bitField0_ & ~0x00001000);
+ bitField0_ = (bitField0_ & ~0x00004000);
maxExpireTime_ = null;
if (maxExpireTimeBuilder_ != null) {
maxExpireTimeBuilder_.dispose();
@@ -4585,7 +5014,7 @@ public Builder clearMaxExpireTime() {
*
*/
public com.google.protobuf.Timestamp.Builder getMaxExpireTimeBuilder() {
- bitField0_ |= 0x00001000;
+ bitField0_ |= 0x00004000;
onChanged();
return getMaxExpireTimeFieldBuilder().getBuilder();
}
@@ -4652,7 +5081,7 @@ private void ensureBackupSchedulesIsMutable() {
if (!backupSchedules_.isModifiable()) {
backupSchedules_ = new com.google.protobuf.LazyStringArrayList(backupSchedules_);
}
- bitField0_ |= 0x00002000;
+ bitField0_ |= 0x00008000;
}
/**
*
@@ -4777,7 +5206,7 @@ public Builder setBackupSchedules(int index, java.lang.String value) {
}
ensureBackupSchedulesIsMutable();
backupSchedules_.set(index, value);
- bitField0_ |= 0x00002000;
+ bitField0_ |= 0x00008000;
onChanged();
return this;
}
@@ -4808,7 +5237,7 @@ public Builder addBackupSchedules(java.lang.String value) {
}
ensureBackupSchedulesIsMutable();
backupSchedules_.add(value);
- bitField0_ |= 0x00002000;
+ bitField0_ |= 0x00008000;
onChanged();
return this;
}
@@ -4836,7 +5265,7 @@ public Builder addBackupSchedules(java.lang.String value) {
public Builder addAllBackupSchedules(java.lang.Iterable values) {
ensureBackupSchedulesIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backupSchedules_);
- bitField0_ |= 0x00002000;
+ bitField0_ |= 0x00008000;
onChanged();
return this;
}
@@ -4862,7 +5291,7 @@ public Builder addAllBackupSchedules(java.lang.Iterable values
*/
public Builder clearBackupSchedules() {
backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList();
- bitField0_ = (bitField0_ & ~0x00002000);
+ bitField0_ = (bitField0_ & ~0x00008000);
;
onChanged();
return this;
@@ -4895,10 +5324,389 @@ public Builder addBackupSchedulesBytes(com.google.protobuf.ByteString value) {
checkByteStringIsUtf8(value);
ensureBackupSchedulesIsMutable();
backupSchedules_.add(value);
- bitField0_ |= 0x00002000;
+ bitField0_ |= 0x00008000;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object incrementalBackupChainId_ = "";
+ /**
+ *
+ *
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ *
+ * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ *
+ * @param value The incrementalBackupChainId to set.
+ * @return This builder for chaining.
+ */
+ public Builder setIncrementalBackupChainId(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ incrementalBackupChainId_ = value;
+ bitField0_ |= 0x00010000;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. The number of bytes that will be freed by deleting this
+ * backup. This value will be zero if, for example, this backup is part of an
+ * incremental backup chain and younger backups in the chain require that we
+ * keep its data. For backups not in an incremental backup chain, this is
+ * always the size of the backup. This value may change if backups on the same
+ * chain get created, deleted or expired.
+ *
+ * Output only. For a backup in an incremental backup chain, this is the
+ * storage space needed to keep the data that has changed since the previous
+ * backup. For all other backups, this is always the size of the backup. This
+ * value may change if backups on the same chain get deleted or expired.
+ *
+ * This field can be used to calculate the total storage space used by a set
+ * of backups. For example, the total space used by all backups of a database
+ * can be computed by summing up this field.
+ *
+ *
+ * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ * @return The exclusiveSizeBytes.
+ */
+ long getExclusiveSizeBytes();
+
/**
*
*
@@ -762,4 +800,95 @@ com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInform
* @return The bytes of the backupSchedules at the given index.
*/
com.google.protobuf.ByteString getBackupSchedulesBytes(int index);
+
+ /**
+ *
+ *
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Populated only for backups in an incremental backup chain.
+ * Backups share the same chain id if and only if they belong to the same
+ * incremental backup chain. Use this field to determine which backups are
+ * part of the same incremental backup chain. The ordering of backups in the
+ * chain can be determined by ordering the backup `version_time`.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * Output only. Data deleted at a time older than this is guaranteed not to be
+ * retained in order to support this backup. For a backup in an incremental
+ * backup chain, this is the version time of the oldest backup that exists or
+ * ever existed in the chain. For all other backups, this is the version time
+ * of the backup. This field can be used to understand what data is being
+ * retained by the backup system.
+ *
+ * The schedule creates incremental backup chains.
+ *
+ *
+ * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8;
+ *
+ */
+ com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder
+ getIncrementalBackupSpecOrBuilder();
+
/**
*
*
diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java
index d2227049ae4..9f018585afd 100644
--- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java
+++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java
@@ -83,7 +83,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "/v1/backup.proto\"i\n\022BackupScheduleSpec\022B"
+ "\n\tcron_spec\030\001 \001(\0132-.google.spanner.admin"
+ ".database.v1.CrontabSpecH\000B\017\n\rschedule_s"
- + "pec\"\310\004\n\016BackupSchedule\022\021\n\004name\030\001 \001(\tB\003\340A"
+ + "pec\"\244\005\n\016BackupSchedule\022\021\n\004name\030\001 \001(\tB\003\340A"
+ "\010\022G\n\004spec\030\006 \001(\01324.google.spanner.admin.d"
+ "atabase.v1.BackupScheduleSpecB\003\340A\001\022:\n\022re"
+ "tention_duration\030\003 \001(\0132\031.google.protobuf"
@@ -91,43 +91,46 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "\0132>.google.spanner.admin.database.v1.Cre"
+ "ateBackupEncryptionConfigB\003\340A\001\022L\n\020full_b"
+ "ackup_spec\030\007 \001(\01320.google.spanner.admin."
- + "database.v1.FullBackupSpecH\000\0224\n\013update_t"
- + "ime\030\t \001(\0132\032.google.protobuf.TimestampB\003\340"
- + "A\003:\245\001\352A\241\001\n%spanner.googleapis.com/Backup"
- + "Schedule\022Wprojects/{project}/instances/{"
- + "instance}/databases/{database}/backupSch"
- + "edules/{schedule}*\017backupSchedules2\016back"
- + "upScheduleB\022\n\020backup_type_spec\"q\n\013Cronta"
- + "bSpec\022\021\n\004text\030\001 \001(\tB\003\340A\002\022\026\n\ttime_zone\030\002 "
- + "\001(\tB\003\340A\003\0227\n\017creation_window\030\003 \001(\0132\031.goog"
- + "le.protobuf.DurationB\003\340A\003\"\307\001\n\033CreateBack"
- + "upScheduleRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A"
- + "!\n\037spanner.googleapis.com/Database\022\037\n\022ba"
- + "ckup_schedule_id\030\002 \001(\tB\003\340A\002\022N\n\017backup_sc"
- + "hedule\030\003 \001(\01320.google.spanner.admin.data"
- + "base.v1.BackupScheduleB\003\340A\002\"W\n\030GetBackup"
- + "ScheduleRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%s"
- + "panner.googleapis.com/BackupSchedule\"Z\n\033"
- + "DeleteBackupScheduleRequest\022;\n\004name\030\001 \001("
+ + "database.v1.FullBackupSpecH\000\022Z\n\027incremen"
+ + "tal_backup_spec\030\010 \001(\01327.google.spanner.a"
+ + "dmin.database.v1.IncrementalBackupSpecH\000"
+ + "\0224\n\013update_time\030\t \001(\0132\032.google.protobuf."
+ + "TimestampB\003\340A\003:\245\001\352A\241\001\n%spanner.googleapi"
+ + "s.com/BackupSchedule\022Wprojects/{project}"
+ + "/instances/{instance}/databases/{databas"
+ + "e}/backupSchedules/{schedule}*\017backupSch"
+ + "edules2\016backupScheduleB\022\n\020backup_type_sp"
+ + "ec\"q\n\013CrontabSpec\022\021\n\004text\030\001 \001(\tB\003\340A\002\022\026\n\t"
+ + "time_zone\030\002 \001(\tB\003\340A\003\0227\n\017creation_window\030"
+ + "\003 \001(\0132\031.google.protobuf.DurationB\003\340A\003\"\307\001"
+ + "\n\033CreateBackupScheduleRequest\0227\n\006parent\030"
+ + "\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/Da"
+ + "tabase\022\037\n\022backup_schedule_id\030\002 \001(\tB\003\340A\002\022"
+ + "N\n\017backup_schedule\030\003 \001(\01320.google.spanne"
+ + "r.admin.database.v1.BackupScheduleB\003\340A\002\""
+ + "W\n\030GetBackupScheduleRequest\022;\n\004name\030\001 \001("
+ "\tB-\340A\002\372A\'\n%spanner.googleapis.com/Backup"
- + "Schedule\"\206\001\n\032ListBackupSchedulesRequest\022"
- + "7\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googlea"
- + "pis.com/Database\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001"
- + "\022\027\n\npage_token\030\004 \001(\tB\003\340A\001\"\202\001\n\033ListBackup"
- + "SchedulesResponse\022J\n\020backup_schedules\030\001 "
- + "\003(\01320.google.spanner.admin.database.v1.B"
- + "ackupSchedule\022\027\n\017next_page_token\030\002 \001(\t\"\243"
- + "\001\n\033UpdateBackupScheduleRequest\022N\n\017backup"
- + "_schedule\030\001 \001(\01320.google.spanner.admin.d"
- + "atabase.v1.BackupScheduleB\003\340A\002\0224\n\013update"
- + "_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB"
- + "\003\340A\002B\205\002\n$com.google.spanner.admin.databa"
- + "se.v1B\023BackupScheduleProtoP\001ZFcloud.goog"
- + "le.com/go/spanner/admin/database/apiv1/d"
- + "atabasepb;databasepb\252\002&Google.Cloud.Span"
- + "ner.Admin.Database.V1\312\002&Google\\Cloud\\Spa"
- + "nner\\Admin\\Database\\V1\352\002+Google::Cloud::"
- + "Spanner::Admin::Database::V1b\006proto3"
+ + "Schedule\"Z\n\033DeleteBackupScheduleRequest\022"
+ + ";\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%spanner.googleapi"
+ + "s.com/BackupSchedule\"\206\001\n\032ListBackupSched"
+ + "ulesRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spa"
+ + "nner.googleapis.com/Database\022\026\n\tpage_siz"
+ + "e\030\002 \001(\005B\003\340A\001\022\027\n\npage_token\030\004 \001(\tB\003\340A\001\"\202\001"
+ + "\n\033ListBackupSchedulesResponse\022J\n\020backup_"
+ + "schedules\030\001 \003(\01320.google.spanner.admin.d"
+ + "atabase.v1.BackupSchedule\022\027\n\017next_page_t"
+ + "oken\030\002 \001(\t\"\243\001\n\033UpdateBackupScheduleReque"
+ + "st\022N\n\017backup_schedule\030\001 \001(\01320.google.spa"
+ + "nner.admin.database.v1.BackupScheduleB\003\340"
+ + "A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.protobu"
+ + "f.FieldMaskB\003\340A\002B\205\002\n$com.google.spanner."
+ + "admin.database.v1B\023BackupScheduleProtoP\001"
+ + "ZFcloud.google.com/go/spanner/admin/data"
+ + "base/apiv1/databasepb;databasepb\252\002&Googl"
+ + "e.Cloud.Spanner.Admin.Database.V1\312\002&Goog"
+ + "le\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Goo"
+ + "gle::Cloud::Spanner::Admin::Database::V1"
+ + "b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -159,6 +162,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"RetentionDuration",
"EncryptionConfig",
"FullBackupSpec",
+ "IncrementalBackupSpec",
"UpdateTime",
"BackupTypeSpec",
});
diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java
new file mode 100644
index 00000000000..36a605c8aad
--- /dev/null
+++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/spanner/admin/database/v1/backup.proto
+
+// Protobuf Java Version: 3.25.3
+package com.google.spanner.admin.database.v1;
+
+/**
+ *
+ *
+ *
+ * The specification for incremental backup chains.
+ * An incremental backup stores the delta of changes between a previous
+ * backup and the database contents at a given version time. An
+ * incremental backup chain consists of a full backup and zero or more
+ * successive incremental backups. The first backup created for an
+ * incremental backup chain is always a full backup.
+ *
+ * The specification for incremental backup chains.
+ * An incremental backup stores the delta of changes between a previous
+ * backup and the database contents at a given version time. An
+ * incremental backup chain consists of a full backup and zero or more
+ * successive incremental backups. The first backup created for an
+ * incremental backup chain is always a full backup.
+ *
+ *
+ * Protobuf type {@code google.spanner.admin.database.v1.IncrementalBackupSpec}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.IncrementalBackupSpec)
+ com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.spanner.admin.database.v1.BackupProto
+ .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.spanner.admin.database.v1.BackupProto
+ .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.spanner.admin.database.v1.IncrementalBackupSpec.class,
+ com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder.class);
+ }
+
+ // Construct using com.google.spanner.admin.database.v1.IncrementalBackupSpec.newBuilder()
+ private Builder() {}
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.spanner.admin.database.v1.BackupProto
+ .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() {
+ return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.spanner.admin.database.v1.IncrementalBackupSpec build() {
+ com.google.spanner.admin.database.v1.IncrementalBackupSpec result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.spanner.admin.database.v1.IncrementalBackupSpec buildPartial() {
+ com.google.spanner.admin.database.v1.IncrementalBackupSpec result =
+ new com.google.spanner.admin.database.v1.IncrementalBackupSpec(this);
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.spanner.admin.database.v1.IncrementalBackupSpec) {
+ return mergeFrom((com.google.spanner.admin.database.v1.IncrementalBackupSpec) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.spanner.admin.database.v1.IncrementalBackupSpec other) {
+ if (other == com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance())
+ return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default:
+ {
+ if (!super.parseUnknownField(input, extensionRegistry, tag)) {
+ done = true; // was an endgroup tag
+ }
+ break;
+ } // default:
+ } // switch (tag)
+ } // while (!done)
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.unwrapIOException();
+ } finally {
+ onChanged();
+ } // finally
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.IncrementalBackupSpec)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.IncrementalBackupSpec)
+ private static final com.google.spanner.admin.database.v1.IncrementalBackupSpec DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.IncrementalBackupSpec();
+ }
+
+ public static com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public IncrementalBackupSpec parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ Builder builder = newBuilder();
+ try {
+ builder.mergeFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(builder.buildPartial());
+ } catch (com.google.protobuf.UninitializedMessageException e) {
+ throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e)
+ .setUnfinishedMessage(builder.buildPartial());
+ }
+ return builder.buildPartial();
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java
new file mode 100644
index 00000000000..fa0ee3f7e71
--- /dev/null
+++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/spanner/admin/database/v1/backup.proto
+
+// Protobuf Java Version: 3.25.3
+package com.google.spanner.admin.database.v1;
+
+public interface IncrementalBackupSpecOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.IncrementalBackupSpec)
+ com.google.protobuf.MessageOrBuilder {}
diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto
index f684a4c605e..842ab0ff1e8 100644
--- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto
+++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto
@@ -103,6 +103,24 @@ message Backup {
// Output only. Size of the backup in bytes.
int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // Output only. The number of bytes that will be freed by deleting this
+ // backup. This value will be zero if, for example, this backup is part of an
+ // incremental backup chain and younger backups in the chain require that we
+ // keep its data. For backups not in an incremental backup chain, this is
+ // always the size of the backup. This value may change if backups on the same
+ // chain get created, deleted or expired.
+ int64 freeable_size_bytes = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. For a backup in an incremental backup chain, this is the
+ // storage space needed to keep the data that has changed since the previous
+ // backup. For all other backups, this is always the size of the backup. This
+ // value may change if backups on the same chain get deleted or expired.
+ //
+ // This field can be used to calculate the total storage space used by a set
+ // of backups. For example, the total space used by all backups of a database
+ // can be computed by summing up this field.
+ int64 exclusive_size_bytes = 16 [(google.api.field_behavior) = OUTPUT_ONLY];
+
// Output only. The current state of the backup.
State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
@@ -168,6 +186,23 @@ message Backup {
// single backup schedule URI associated with creating this backup.
repeated string backup_schedules = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Populated only for backups in an incremental backup chain.
+ // Backups share the same chain id if and only if they belong to the same
+ // incremental backup chain. Use this field to determine which backups are
+ // part of the same incremental backup chain. The ordering of backups in the
+ // chain can be determined by ordering the backup `version_time`.
+ string incremental_backup_chain_id = 17
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Data deleted at a time older than this is guaranteed not to be
+ // retained in order to support this backup. For a backup in an incremental
+ // backup chain, this is the version time of the oldest backup that exists or
+ // ever existed in the chain. For all other backups, this is the version time
+ // of the backup. This field can be used to understand what data is being
+ // retained by the backup system.
+ google.protobuf.Timestamp oldest_version_time = 18
+ [(google.api.field_behavior) = OUTPUT_ONLY];
}
// The request for
@@ -705,3 +740,11 @@ message CopyBackupEncryptionConfig {
// A full backup stores the entire contents of the database at a given
// version time.
message FullBackupSpec {}
+
+// The specification for incremental backup chains.
+// An incremental backup stores the delta of changes between a previous
+// backup and the database contents at a given version time. An
+// incremental backup chain consists of a full backup and zero or more
+// successive incremental backups. The first backup created for an
+// incremental backup chain is always a full backup.
+message IncrementalBackupSpec {}
diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto
index 9ef4587f814..c9b5e7e3f4b 100644
--- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto
+++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto
@@ -83,6 +83,9 @@ message BackupSchedule {
oneof backup_type_spec {
// The schedule creates only full backups.
FullBackupSpec full_backup_spec = 7;
+
+ // The schedule creates incremental backup chains.
+ IncrementalBackupSpec incremental_backup_spec = 8;
}
// Output only. The timestamp at which the schedule was last updated.
diff --git a/proto-google-cloud-spanner-admin-instance-v1/pom.xml b/proto-google-cloud-spanner-admin-instance-v1/pom.xml
index 22bb2eb1c10..507fe2e57a5 100644
--- a/proto-google-cloud-spanner-admin-instance-v1/pom.xml
+++ b/proto-google-cloud-spanner-admin-instance-v1/pom.xml
@@ -4,13 +4,13 @@
4.0.0com.google.api.grpcproto-google-cloud-spanner-admin-instance-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTproto-google-cloud-spanner-admin-instance-v1PROTO library for proto-google-cloud-spanner-admin-instance-v1com.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
diff --git a/proto-google-cloud-spanner-executor-v1/pom.xml b/proto-google-cloud-spanner-executor-v1/pom.xml
index f4e05e244a6..f4dc5b5bf24 100644
--- a/proto-google-cloud-spanner-executor-v1/pom.xml
+++ b/proto-google-cloud-spanner-executor-v1/pom.xml
@@ -4,13 +4,13 @@
4.0.0com.google.api.grpcproto-google-cloud-spanner-executor-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTproto-google-cloud-spanner-executor-v1Proto library for google-cloud-spannercom.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
diff --git a/proto-google-cloud-spanner-v1/pom.xml b/proto-google-cloud-spanner-v1/pom.xml
index e554f747927..c0c5da690a9 100644
--- a/proto-google-cloud-spanner-v1/pom.xml
+++ b/proto-google-cloud-spanner-v1/pom.xml
@@ -4,13 +4,13 @@
4.0.0com.google.api.grpcproto-google-cloud-spanner-v1
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOTproto-google-cloud-spanner-v1PROTO library for proto-google-cloud-spanner-v1com.google.cloudgoogle-cloud-spanner-parent
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 79d9c665884..56b1c708c8c 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -23,8 +23,8 @@
1.8UTF-80.31.1
- 2.41.0
- 3.44.0
+ 2.47.0
+ 3.48.0
@@ -33,7 +33,7 @@
com.google.cloudgoogle-cloud-spanner
- 6.67.0
+ 6.71.0
@@ -100,7 +100,7 @@
com.google.truthtruth
- 1.4.2
+ 1.4.4test
@@ -116,7 +116,7 @@
org.codehaus.mojobuild-helper-maven-plugin
- 3.5.0
+ 3.6.0add-snippets-source
@@ -145,7 +145,7 @@
org.apache.maven.pluginsmaven-failsafe-plugin
- 3.3.0
+ 3.3.1java-client-integration-tests
diff --git a/samples/native-image/pom.xml b/samples/native-image/pom.xml
index e1cedfbafae..b34fc048486 100644
--- a/samples/native-image/pom.xml
+++ b/samples/native-image/pom.xml
@@ -29,7 +29,7 @@
com.google.cloudlibraries-bom
- 26.39.0
+ 26.43.0pomimport
@@ -51,7 +51,7 @@
com.google.truthtruth
- 1.4.2
+ 1.4.4test
@@ -62,7 +62,7 @@
org.apache.maven.pluginsmaven-jar-plugin
- 3.4.1
+ 3.4.2
@@ -104,7 +104,7 @@
org.junit.vintagejunit-vintage-engine
- 5.10.2
+ 5.10.3test
diff --git a/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java b/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java
index 75efd6b45b1..cbda19236c0 100644
--- a/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java
+++ b/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java
@@ -35,7 +35,7 @@ static void createTestInstance(
InstanceInfo instanceInfo =
InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId))
- .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-central1"))
+ .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-east4"))
.setNodeCount(1)
.setDisplayName(instanceId)
.build();
diff --git a/samples/pom.xml b/samples/pom.xml
index e99ce19e6b0..07c91c62ad0 100644
--- a/samples/pom.xml
+++ b/samples/pom.xml
@@ -48,7 +48,7 @@
org.sonatype.pluginsnexus-staging-maven-plugin
- 1.6.13
+ 1.7.0true
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index 3f37886a324..df9f082403d 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -23,8 +23,8 @@
1.8UTF-80.31.1
- 2.41.0
- 3.44.0
+ 2.47.0
+ 3.48.0
@@ -32,7 +32,7 @@
com.google.cloudgoogle-cloud-spanner
- 6.71.1-SNAPSHOT
+ 6.72.1-SNAPSHOT
@@ -99,7 +99,7 @@
com.google.truthtruth
- 1.4.2
+ 1.4.4test
@@ -115,7 +115,7 @@
org.codehaus.mojobuild-helper-maven-plugin
- 3.5.0
+ 3.6.0add-snippets-source
@@ -144,7 +144,7 @@
org.apache.maven.pluginsmaven-failsafe-plugin
- 3.3.0
+ 3.3.1java-client-integration-tests
diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml
index ced7df0a375..c96c572d509 100644
--- a/samples/snippets/pom.xml
+++ b/samples/snippets/pom.xml
@@ -34,7 +34,7 @@
com.google.cloudlibraries-bom
- 26.39.0
+ 26.43.0pomimport
@@ -111,7 +111,7 @@
com.google.truthtruth
- 1.4.2
+ 1.4.4test
@@ -175,7 +175,7 @@
org.apache.maven.pluginsmaven-failsafe-plugin
- 3.3.0
+ 3.3.1java-client-integration-tests
@@ -192,7 +192,7 @@
org.apache.maven.pluginsmaven-checkstyle-plugin
- 3.3.1
+ 3.4.0**/SingerProto.java
diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java
index b53727ba6d5..0bfb3a54c82 100644
--- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java
@@ -47,9 +47,9 @@ static void createInstance(String projectId, String instanceId) {
.setDisplayName(displayName)
.setNodeCount(nodeCount)
.setConfig(
- InstanceConfigName.of(projectId, "regional-us-central1").toString())
+ InstanceConfigName.of(projectId, "regional-us-east4").toString())
.build();
-
+
try (Spanner spanner =
SpannerOptions.newBuilder()
.setProjectId(projectId)
@@ -74,4 +74,4 @@ static void createInstance(String projectId, String instanceId) {
}
}
}
-//[END spanner_create_instance]
\ No newline at end of file
+//[END spanner_create_instance]
diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java
new file mode 100644
index 00000000000..0e547bdaf7e
--- /dev/null
+++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.spanner;
+
+// [START spanner_create_instance_partition]
+
+import com.google.cloud.spanner.Spanner;
+import com.google.cloud.spanner.SpannerOptions;
+import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient;
+import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest;
+import com.google.spanner.admin.instance.v1.InstanceConfigName;
+import com.google.spanner.admin.instance.v1.InstanceName;
+import com.google.spanner.admin.instance.v1.InstancePartition;
+import java.util.concurrent.ExecutionException;
+
+class CreateInstancePartitionSample {
+
+ static void createInstancePartition() {
+ // TODO(developer): Replace these variables before running the sample.
+ String projectId = "my-project";
+ String instanceId = "my-instance";
+ String instancePartitionId = "my-instance-partition";
+ createInstancePartition(projectId, instanceId, instancePartitionId);
+ }
+
+ static void createInstancePartition(
+ String projectId, String instanceId, String instancePartitionId) {
+ // Set instance partition configuration.
+ int nodeCount = 1;
+ String displayName = "Descriptive name";
+
+ // Create an InstancePartition object that will be used to create the instance partition.
+ InstancePartition instancePartition =
+ InstancePartition.newBuilder()
+ .setDisplayName(displayName)
+ .setNodeCount(nodeCount)
+ .setConfig(InstanceConfigName.of(projectId, "nam3").toString())
+ .build();
+
+ try (Spanner spanner =
+ SpannerOptions.newBuilder().setProjectId(projectId).build().getService();
+ InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) {
+
+ // Wait for the createInstancePartition operation to finish.
+ InstancePartition createdInstancePartition =
+ instanceAdminClient
+ .createInstancePartitionAsync(
+ CreateInstancePartitionRequest.newBuilder()
+ .setParent(InstanceName.of(projectId, instanceId).toString())
+ .setInstancePartitionId(instancePartitionId)
+ .setInstancePartition(instancePartition)
+ .build())
+ .get();
+ System.out.printf(
+ "Instance partition %s was successfully created%n", createdInstancePartition.getName());
+ } catch (ExecutionException e) {
+ System.out.printf(
+ "Error: Creating instance partition %s failed with error message %s%n",
+ instancePartition.getName(), e.getMessage());
+ } catch (InterruptedException e) {
+ System.out.println(
+ "Error: Waiting for createInstancePartition operation to finish was interrupted");
+ }
+ }
+}
+// [END spanner_create_instance_partition]
diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java
index dc62dd7a684..0a6e21ea620 100644
--- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java
@@ -45,7 +45,7 @@ static void createInstance(String projectId, String instanceId) {
.getService();
InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) {
// Set Instance configuration.
- String configId = "regional-us-central1";
+ String configId = "regional-us-east4";
String displayName = "Descriptive name";
// Create an autoscaling config.
diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java
index 293c10249c5..51133194744 100644
--- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java
@@ -44,7 +44,7 @@ static void createInstance(String projectId, String instanceId) {
InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) {
// Set Instance configuration.
- String configId = "regional-us-central1";
+ String configId = "regional-us-east4";
// This will create an instance with the processing power of 0.2 nodes.
int processingUnits = 500;
String displayName = "Descriptive name";
diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java
index 15b33ae8927..a17784d874b 100644
--- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java
@@ -42,7 +42,7 @@ static void createInstance(String projectId, String instanceId) {
InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient();
// Set Instance configuration.
- String configId = "regional-us-central1";
+ String configId = "regional-us-east4";
int nodeCount = 2;
String displayName = "Descriptive name";
diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java
index 3fe60c554bb..f8a683865ac 100644
--- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java
@@ -44,7 +44,7 @@ static void createInstance(String projectId, String instanceId) {
InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient();
// Set Instance configuration.
- String configId = "regional-us-central1";
+ String configId = "regional-us-east4";
// Create an autoscaling config.
AutoscalingConfig autoscalingConfig =
AutoscalingConfig.newBuilder()
diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java
index f688b4cdbf9..95d4f1b6737 100644
--- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java
+++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java
@@ -42,7 +42,7 @@ static void createInstance(String projectId, String instanceId) {
InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient();
// Set Instance configuration.
- String configId = "regional-us-central1";
+ String configId = "regional-us-east4";
// This will create an instance with the processing power of 0.2 nodes.
int processingUnits = 500;
String displayName = "Descriptive name";
diff --git a/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java
new file mode 100644
index 00000000000..3038d29750d
--- /dev/null
+++ b/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.spanner;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.spanner.InstanceAdminClient;
+import com.google.cloud.spanner.InstanceConfigId;
+import com.google.cloud.spanner.InstanceId;
+import com.google.cloud.spanner.InstanceInfo;
+import com.google.spanner.admin.instance.v1.InstancePartitionName;
+import org.junit.Test;
+
+public class CreateInstancePartitionSampleIT extends SampleTestBaseV2 {
+
+ @Test
+ public void testCreateInstancePartition() throws Exception {
+ String instanceId = idGenerator.generateInstanceId();
+ InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient();
+ instanceAdminClient
+ .createInstance(
+ InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId))
+ .setDisplayName("Geo-partitioning test instance")
+ .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-central1"))
+ .setNodeCount(1)
+ .build())
+ .get();
+
+ String instancePartitionId = "my-instance-partition";
+ String out =
+ SampleRunner.runSample(
+ () ->
+ CreateInstancePartitionSample.createInstancePartition(
+ projectId, instanceId, instancePartitionId));
+ assertThat(out)
+ .contains(
+ String.format(
+ "Instance partition %s",
+ InstancePartitionName.of(projectId, instanceId, instancePartitionId).toString()));
+ }
+}
diff --git a/versions.txt b/versions.txt
index 786797cb742..1fa5cb386e5 100644
--- a/versions.txt
+++ b/versions.txt
@@ -1,13 +1,13 @@
# Format:
# module:released-version:current-version
-proto-google-cloud-spanner-admin-instance-v1:6.71.0:6.71.1-SNAPSHOT
-proto-google-cloud-spanner-v1:6.71.0:6.71.1-SNAPSHOT
-proto-google-cloud-spanner-admin-database-v1:6.71.0:6.71.1-SNAPSHOT
-grpc-google-cloud-spanner-v1:6.71.0:6.71.1-SNAPSHOT
-grpc-google-cloud-spanner-admin-instance-v1:6.71.0:6.71.1-SNAPSHOT
-grpc-google-cloud-spanner-admin-database-v1:6.71.0:6.71.1-SNAPSHOT
-google-cloud-spanner:6.71.0:6.71.1-SNAPSHOT
-google-cloud-spanner-executor:6.71.0:6.71.1-SNAPSHOT
-proto-google-cloud-spanner-executor-v1:6.71.0:6.71.1-SNAPSHOT
-grpc-google-cloud-spanner-executor-v1:6.71.0:6.71.1-SNAPSHOT
+proto-google-cloud-spanner-admin-instance-v1:6.72.0:6.72.1-SNAPSHOT
+proto-google-cloud-spanner-v1:6.72.0:6.72.1-SNAPSHOT
+proto-google-cloud-spanner-admin-database-v1:6.72.0:6.72.1-SNAPSHOT
+grpc-google-cloud-spanner-v1:6.72.0:6.72.1-SNAPSHOT
+grpc-google-cloud-spanner-admin-instance-v1:6.72.0:6.72.1-SNAPSHOT
+grpc-google-cloud-spanner-admin-database-v1:6.72.0:6.72.1-SNAPSHOT
+google-cloud-spanner:6.72.0:6.72.1-SNAPSHOT
+google-cloud-spanner-executor:6.72.0:6.72.1-SNAPSHOT
+proto-google-cloud-spanner-executor-v1:6.72.0:6.72.1-SNAPSHOT
+grpc-google-cloud-spanner-executor-v1:6.72.0:6.72.1-SNAPSHOT