2929import com .google .api .gax .rpc .StatusCode .Code ;
3030import com .google .cloud .firestore .v1 .FirestoreSettings ;
3131import com .google .common .annotations .VisibleForTesting ;
32+ import com .google .common .base .Preconditions ;
3233import com .google .common .util .concurrent .MoreExecutors ;
3334import java .util .ArrayList ;
3435import java .util .List ;
@@ -82,6 +83,9 @@ enum OperationType {
8283 /** The maximum number of writes that can be in a single batch. */
8384 public static final int MAX_BATCH_SIZE = 20 ;
8485
86+ /** The maximum number of writes that can be in a batch containing retries. */
87+ public static final int RETRY_MAX_BATCH_SIZE = 10 ;
88+
8589 /**
8690 * The maximum number of retries that will be attempted with backoff before stopping all retry
8791 * attempts.
@@ -237,7 +241,7 @@ public boolean onError(BulkWriterException error) {
237241 : Executors .newSingleThreadScheduledExecutor ();
238242 this .successExecutor = MoreExecutors .directExecutor ();
239243 this .errorExecutor = MoreExecutors .directExecutor ();
240- this .bulkCommitBatch = new BulkCommitBatch (firestore , bulkWriterExecutor );
244+ this .bulkCommitBatch = new BulkCommitBatch (firestore , bulkWriterExecutor , maxBatchSize );
241245
242246 if (!options .getThrottlingEnabled ()) {
243247 this .rateLimiter =
@@ -962,7 +966,7 @@ private void scheduleCurrentBatchLocked(final boolean flush) {
962966 if (bulkCommitBatch .getMutationsSize () == 0 ) return ;
963967
964968 final BulkCommitBatch pendingBatch = bulkCommitBatch ;
965- bulkCommitBatch = new BulkCommitBatch (firestore , bulkWriterExecutor );
969+ bulkCommitBatch = new BulkCommitBatch (firestore , bulkWriterExecutor , maxBatchSize );
966970
967971 // Use the write with the longest backoff duration when determining backoff.
968972 int highestBackoffDuration = 0 ;
@@ -1025,7 +1029,10 @@ public void run() {
10251029
10261030 @ VisibleForTesting
10271031 void setMaxBatchSize (int size ) {
1032+ Preconditions .checkState (
1033+ bulkCommitBatch .getMutationsSize () == 0 , "BulkCommitBatch should be empty" );
10281034 maxBatchSize = size ;
1035+ bulkCommitBatch = new BulkCommitBatch (firestore , bulkWriterExecutor , size );
10291036 }
10301037
10311038 @ VisibleForTesting
@@ -1050,6 +1057,16 @@ void setMaxPendingOpCount(int newMax) {
10501057 private void sendOperationLocked (
10511058 ApiFunction <BulkCommitBatch , ApiFuture <WriteResult >> enqueueOperationOnBatchCallback ,
10521059 final BulkWriterOperation op ) {
1060+ // A backoff duration greater than 0 implies that this batch is a retry.
1061+ // Retried writes are sent with a batch size of 10 in order to guarantee
1062+ // that the batch is under the 10MiB limit.
1063+ if (op .getBackoffDuration () > 0 ) {
1064+ if (bulkCommitBatch .getMutationsSize () >= RETRY_MAX_BATCH_SIZE ) {
1065+ scheduleCurrentBatchLocked (/* flush= */ false );
1066+ }
1067+ bulkCommitBatch .setMaxBatchSize (RETRY_MAX_BATCH_SIZE );
1068+ }
1069+
10531070 if (bulkCommitBatch .has (op .getDocumentReference ())) {
10541071 // Create a new batch since the backend doesn't support batches with two writes to the same
10551072 // document.
@@ -1062,7 +1079,7 @@ private void sendOperationLocked(
10621079 bulkCommitBatch .enqueueOperation (op );
10631080 enqueueOperationOnBatchCallback .apply (bulkCommitBatch );
10641081
1065- if (bulkCommitBatch .getMutationsSize () == maxBatchSize ) {
1082+ if (bulkCommitBatch .getMutationsSize () == bulkCommitBatch . getMaxBatchSize () ) {
10661083 scheduleCurrentBatchLocked (/* flush= */ false );
10671084 }
10681085 }
0 commit comments