data,
* must have {@code data[i].shape = indices[i].shape + constant}. In terms of this
* {@code constant}, the output shape is
*
- * merged.shape = [max(indices)] + constant
+ * merged.shape = [max(indices) + 1] + constant
*
* Values are merged in order, so if an index appears in both {@code indices[m][i]} and
* {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the
@@ -7599,7 +7599,7 @@ public TensorStridedSliceUpdate tensorSt
*
*
* @param data type for {@code output} output
- * @param input 1-D or higher.
+ * @param input Can be of any rank.
* @param multiples 1-D. Length must be the same as the number of dimensions in {@code input}
* @param data type for {@code Tile} output and operands
* @return a new instance of Tile
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java
index 83e6f9d60b0..9e2411bc0a1 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java
@@ -24,19 +24,29 @@
import org.tensorflow.op.tpu.CollateTPUEmbeddingMemory;
import org.tensorflow.op.tpu.Compile;
import org.tensorflow.op.tpu.CompileSucceededAssert;
+import org.tensorflow.op.tpu.ComputeDedupDataSize;
import org.tensorflow.op.tpu.ConfigureAndInitializeGlobalTPU;
import org.tensorflow.op.tpu.ConfigureTPUEmbeddingHost;
import org.tensorflow.op.tpu.ConfigureTPUEmbeddingMemory;
import org.tensorflow.op.tpu.ConnectTPUEmbeddingHosts;
+import org.tensorflow.op.tpu.ConvertToCooTensor;
import org.tensorflow.op.tpu.DTensorRestore;
import org.tensorflow.op.tpu.Execute;
import org.tensorflow.op.tpu.ExecuteAndUpdateVariables;
import org.tensorflow.op.tpu.ExecuteTPUEmbeddingPartitioner;
import org.tensorflow.op.tpu.FinalizeTPUEmbedding;
+import org.tensorflow.op.tpu.GetMinibatchSplitsWithPhysicalReplica;
+import org.tensorflow.op.tpu.GetMinibatchesInCsrWithPhysicalReplica;
+import org.tensorflow.op.tpu.GlobalIterId;
import org.tensorflow.op.tpu.PartitionedOutput;
import org.tensorflow.op.tpu.ShutdownTPUSystem;
+import org.tensorflow.op.tpu.StoreMinibatchStatisticsInFdo;
+import org.tensorflow.op.tpu.TPUAnnotateTensorsWithDynamicShape;
+import org.tensorflow.op.tpu.TPUCopyWithDynamicShape;
import org.tensorflow.op.tpu.TPURoundRobin;
import org.tensorflow.op.tpu.TpuHandleToProtoKey;
+import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TInt32;
import org.tensorflow.types.TInt64;
import org.tensorflow.types.TString;
import org.tensorflow.types.family.TType;
@@ -111,6 +121,19 @@ public CompileSucceededAssert compileSucceededAssert(Operand compilatio
return CompileSucceededAssert.create(scope, compilationStatus);
}
+ /**
+ * An op computes the size of the deduplication data from embedding core and returns the updated config.
+ * This op is to compute size of the deduplication data so to provide this
+ * information to the op that computes the tuple mask of deduplication data can
+ * have static output shape.
+ *
+ * @param config Serialized TPUEmbeddingConfiguration proto.
+ * @return a new instance of ComputeDedupDataSize
+ */
+ public ComputeDedupDataSize computeDedupDataSize(String config) {
+ return ComputeDedupDataSize.create(scope, config);
+ }
+
/**
* An op that sets up the centralized structures for a distributed TPU system.
*
@@ -163,6 +186,21 @@ public ConnectTPUEmbeddingHosts connectTPUEmbeddingHosts(
return ConnectTPUEmbeddingHosts.create(scope, networkConfigs);
}
+ /**
+ * The ConvertToCooTensor operation
+ *
+ * @param indicesOrRowSplits The indicesOrRowSplits value
+ * @param values The values value
+ * @param weights The weights value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param combiner The value of the combiner attribute
+ * @return a new instance of ConvertToCooTensor
+ */
+ public ConvertToCooTensor convertToCooTensor(Operand indicesOrRowSplits,
+ Operand values, Operand weights, Long sampleCount, String combiner) {
+ return ConvertToCooTensor.create(scope, indicesOrRowSplits, values, weights, sampleCount, combiner);
+ }
+
/**
* The DTensorRestoreV2 operation
*
@@ -244,6 +282,66 @@ public FinalizeTPUEmbedding finalizeTPUEmbedding(Operand commonConfig,
return FinalizeTPUEmbedding.create(scope, commonConfig, memoryConfig);
}
+ /**
+ * The GetMinibatchSplitsWithPhysicalReplica operation
+ *
+ * @param programKey The programKey value
+ * @param rowIds The rowIds value
+ * @param colIds The colIds value
+ * @param gains The gains value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param tableVocabSize The value of the tableVocabSize attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchSplits The value of the miniBatchSplits attribute
+ * @return a new instance of GetMinibatchSplitsWithPhysicalReplica
+ */
+ public GetMinibatchSplitsWithPhysicalReplica getMinibatchSplitsWithPhysicalReplica(
+ Operand programKey, Operand rowIds, Operand colIds,
+ Operand gains, Long sampleCount, Long numReplica, Long tableVocabSize,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) {
+ return GetMinibatchSplitsWithPhysicalReplica.create(scope, programKey, rowIds, colIds, gains, sampleCount, numReplica, tableVocabSize, featureWidth, numScPerChip, tableName, miniBatchSplits);
+ }
+
+ /**
+ * The GetMinibatchesInCsrWithPhysicalReplica operation
+ *
+ * @param programKey The programKey value
+ * @param rowIds The rowIds value
+ * @param colIds The colIds value
+ * @param gains The gains value
+ * @param splits The splits value
+ * @param idCounts The idCounts value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute
+ * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute
+ * @param tableVocabSize The value of the tableVocabSize attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchInCsr The value of the miniBatchInCsr attribute
+ * @return a new instance of GetMinibatchesInCsrWithPhysicalReplica
+ */
+ public GetMinibatchesInCsrWithPhysicalReplica getMinibatchesInCsrWithPhysicalReplica(
+ Operand programKey, Operand rowIds, Operand colIds,
+ Operand gains, Operand splits, Operand idCounts, Long sampleCount,
+ Long numReplica, Long maxMinibatchesPerSc, Long maxIdsPerChipPerSample, Long tableVocabSize,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchInCsr) {
+ return GetMinibatchesInCsrWithPhysicalReplica.create(scope, programKey, rowIds, colIds, gains, splits, idCounts, sampleCount, numReplica, maxMinibatchesPerSc, maxIdsPerChipPerSample, tableVocabSize, featureWidth, numScPerChip, tableName, miniBatchInCsr);
+ }
+
+ /**
+ * The GlobalIterId operation
+ *
+ * @return a new instance of GlobalIterId
+ */
+ public GlobalIterId globalIterId() {
+ return GlobalIterId.create(scope);
+ }
+
/**
* An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
* outputs outside the XLA computation. Supports ND sharding.
@@ -270,6 +368,50 @@ public ShutdownTPUSystem shutdownTPUSystem() {
return ShutdownTPUSystem.create(scope);
}
+ /**
+ * The StoreMinibatchStatisticsInFdo operation
+ *
+ * @param programKey The programKey value
+ * @param maxIds The maxIds value
+ * @param maxUniques The maxUniques value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchSplits The value of the miniBatchSplits attribute
+ * @return a new instance of StoreMinibatchStatisticsInFdo
+ */
+ public StoreMinibatchStatisticsInFdo storeMinibatchStatisticsInFdo(Operand programKey,
+ Operand maxIds, Operand maxUniques, Long sampleCount, Long numReplica,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) {
+ return StoreMinibatchStatisticsInFdo.create(scope, programKey, maxIds, maxUniques, sampleCount, numReplica, featureWidth, numScPerChip, tableName, miniBatchSplits);
+ }
+
+ /**
+ * The TPUAnnotateTensorsWithDynamicShape operation
+ *
+ * @param tensors The tensors value
+ * @return a new instance of TPUAnnotateTensorsWithDynamicShape
+ */
+ public TPUAnnotateTensorsWithDynamicShape tPUAnnotateTensorsWithDynamicShape(
+ Iterable> tensors) {
+ return TPUAnnotateTensorsWithDynamicShape.create(scope, tensors);
+ }
+
+ /**
+ * Op that copies host tensor to device with dynamic shape support.
+ * For internal use only.
+ *
+ * @param tensors The tensors value
+ * @param unpaddedSizes The unpaddedSizes value
+ * @return a new instance of TPUCopyWithDynamicShape
+ */
+ public TPUCopyWithDynamicShape tPUCopyWithDynamicShape(Iterable> tensors,
+ Iterable> unpaddedSizes) {
+ return TPUCopyWithDynamicShape.create(scope, tensors, unpaddedSizes);
+ }
+
/**
* Round-robin load balancing on TPU cores.
* A load balancing op that round-robins among TPU cores.
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java
index 1136fb98bf1..116388309ba 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java
@@ -24,6 +24,20 @@
import org.tensorflow.op.xla.XlaHostCompute;
import org.tensorflow.op.xla.XlaRecvFromHost;
import org.tensorflow.op.xla.XlaSendToHost;
+import org.tensorflow.op.xla.XlaSparseCoreAdagrad;
+import org.tensorflow.op.xla.XlaSparseCoreAdagradMomentum;
+import org.tensorflow.op.xla.XlaSparseCoreAdam;
+import org.tensorflow.op.xla.XlaSparseCoreFtrl;
+import org.tensorflow.op.xla.XlaSparseCoreSgd;
+import org.tensorflow.op.xla.XlaSparseDenseMatmul;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradAndCsrInput;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithAdamAndCsrInput;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithFtrlAndCsrInput;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulGradWithSgdAndCsrInput;
+import org.tensorflow.op.xla.XlaSparseDenseMatmulWithCsrInput;
+import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TInt32;
import org.tensorflow.types.family.TType;
/**
@@ -94,6 +108,304 @@ public XlaSendToHost xlaSendToHost(Operand extends TType> input, String key) {
return XlaSendToHost.create(scope, input, key);
}
+ /**
+ * The XlaSparseCoreAdagrad operation
+ *
+ * @param indices The indices value
+ * @param gradient The gradient value
+ * @param learningRate The learningRate value
+ * @param accumulator The accumulator value
+ * @param embeddingTable The embeddingTable value
+ * @param featureWidth The value of the featureWidth attribute
+ * @return a new instance of XlaSparseCoreAdagrad
+ */
+ public XlaSparseCoreAdagrad xlaSparseCoreAdagrad(Operand indices,
+ Operand gradient, Operand learningRate, Operand accumulator,
+ Operand embeddingTable, Long featureWidth) {
+ return XlaSparseCoreAdagrad.create(scope, indices, gradient, learningRate, accumulator, embeddingTable, featureWidth);
+ }
+
+ /**
+ * The XlaSparseCoreAdagradMomentum operation
+ *
+ * @param indices The indices value
+ * @param gradient The gradient value
+ * @param learningRate The learningRate value
+ * @param beta1 The beta1 value
+ * @param epsilon The epsilon value
+ * @param accumulator The accumulator value
+ * @param momentum The momentum value
+ * @param embeddingTable The embeddingTable value
+ * @param featureWidth The value of the featureWidth attribute
+ * @param useNesterov The value of the useNesterov attribute
+ * @param beta2 The value of the beta2 attribute
+ * @param exponent The value of the exponent attribute
+ * @return a new instance of XlaSparseCoreAdagradMomentum
+ */
+ public XlaSparseCoreAdagradMomentum xlaSparseCoreAdagradMomentum(Operand indices,
+ Operand gradient, Operand learningRate, Operand beta1,
+ Operand epsilon, Operand accumulator, Operand momentum,
+ Operand embeddingTable, Long featureWidth, Boolean useNesterov, Float beta2,
+ Float exponent) {
+ return XlaSparseCoreAdagradMomentum.create(scope, indices, gradient, learningRate, beta1, epsilon, accumulator, momentum, embeddingTable, featureWidth, useNesterov, beta2, exponent);
+ }
+
+ /**
+ * The XlaSparseCoreAdam operation
+ *
+ * @param embeddingTable The embeddingTable value
+ * @param indices The indices value
+ * @param gradient The gradient value
+ * @param learningRate The learningRate value
+ * @param momentum The momentum value
+ * @param velocity The velocity value
+ * @param beta1 The beta1 value
+ * @param beta2 The beta2 value
+ * @param epsilon The epsilon value
+ * @param featureWidth The value of the featureWidth attribute
+ * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute
+ * @return a new instance of XlaSparseCoreAdam
+ */
+ public XlaSparseCoreAdam xlaSparseCoreAdam(Operand embeddingTable,
+ Operand indices, Operand gradient, Operand learningRate,
+ Operand momentum, Operand velocity, Operand beta1,
+ Operand beta2, Operand epsilon, Long featureWidth,
+ Boolean useSumInsideSqrt) {
+ return XlaSparseCoreAdam.create(scope, embeddingTable, indices, gradient, learningRate, momentum, velocity, beta1, beta2, epsilon, featureWidth, useSumInsideSqrt);
+ }
+
+ /**
+ * The XlaSparseCoreFtrl operation
+ *
+ * @param embeddingTable The embeddingTable value
+ * @param accumulator The accumulator value
+ * @param linear The linear value
+ * @param learningRate The learningRate value
+ * @param indices The indices value
+ * @param gradient The gradient value
+ * @param beta The beta value
+ * @param learningRatePower The learningRatePower value
+ * @param l2RegularizationStrength The l2RegularizationStrength value
+ * @param featureWidth The value of the featureWidth attribute
+ * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute
+ * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute
+ * @return a new instance of XlaSparseCoreFtrl
+ */
+ public XlaSparseCoreFtrl xlaSparseCoreFtrl(Operand embeddingTable,
+ Operand accumulator, Operand linear, Operand learningRate,
+ Operand indices, Operand gradient, Operand beta,
+ Operand learningRatePower, Operand l2RegularizationStrength,
+ Long featureWidth, Boolean multiplyLinearByLearningRate, Float l1RegularizationStrength) {
+ return XlaSparseCoreFtrl.create(scope, embeddingTable, accumulator, linear, learningRate, indices, gradient, beta, learningRatePower, l2RegularizationStrength, featureWidth, multiplyLinearByLearningRate, l1RegularizationStrength);
+ }
+
+ /**
+ * The XlaSparseCoreSgd operation
+ *
+ * @param indices The indices value
+ * @param gradient The gradient value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param featureWidth The value of the featureWidth attribute
+ * @return a new instance of XlaSparseCoreSgd
+ */
+ public XlaSparseCoreSgd xlaSparseCoreSgd(Operand indices, Operand gradient,
+ Operand learningRate, Operand embeddingTable, Long featureWidth) {
+ return XlaSparseCoreSgd.create(scope, indices, gradient, learningRate, embeddingTable, featureWidth);
+ }
+
+ /**
+ * The XlaSparseDenseMatmul operation
+ *
+ * @param rowIds The rowIds value
+ * @param colIds The colIds value
+ * @param values The values value
+ * @param offsets The offsets value
+ * @param embeddingTable The embeddingTable value
+ * @param maxIdsPerPartition The value of the maxIdsPerPartition attribute
+ * @param maxUniqueIdsPerPartition The value of the maxUniqueIdsPerPartition attribute
+ * @param inputSize The value of the inputSize attribute
+ * @return a new instance of XlaSparseDenseMatmul
+ */
+ public XlaSparseDenseMatmul xlaSparseDenseMatmul(Operand rowIds,
+ Operand extends TType> colIds, Operand values, Operand extends TType> offsets,
+ Operand embeddingTable, Long maxIdsPerPartition, Long maxUniqueIdsPerPartition,
+ Long inputSize) {
+ return XlaSparseDenseMatmul.create(scope, rowIds, colIds, values, offsets, embeddingTable, maxIdsPerPartition, maxUniqueIdsPerPartition, inputSize);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulGradWithAdagradAndCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param activationGradients The activationGradients value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param accumulator The accumulator value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param tableName The value of the tableName attribute
+ * @param options carries optional attribute values
+ * @return a new instance of XlaSparseDenseMatmulGradWithAdagradAndCsrInput
+ */
+ public XlaSparseDenseMatmulGradWithAdagradAndCsrInput xlaSparseDenseMatmulGradWithAdagradAndCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand activationGradients,
+ Operand learningRate, Operand embeddingTable,
+ Operand accumulator, Operand numMinibatchesPerPhysicalSparseCore,
+ String tableName, XlaSparseDenseMatmulGradWithAdagradAndCsrInput.Options... options) {
+ return XlaSparseDenseMatmulGradWithAdagradAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, numMinibatchesPerPhysicalSparseCore, tableName, options);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param activationGradients The activationGradients value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param accumulator The accumulator value
+ * @param momenta The momenta value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param useNesterov The value of the useNesterov attribute
+ * @param exponent The value of the exponent attribute
+ * @param beta1 The value of the beta1 attribute
+ * @param beta2 The value of the beta2 attribute
+ * @param epsilon The value of the epsilon attribute
+ * @param tableName The value of the tableName attribute
+ * @param options carries optional attribute values
+ * @return a new instance of XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput
+ */
+ public XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput xlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand activationGradients,
+ Operand learningRate, Operand embeddingTable,
+ Operand accumulator, Operand momenta,
+ Operand numMinibatchesPerPhysicalSparseCore, Boolean useNesterov, Float exponent,
+ Float beta1, Float beta2, Float epsilon, String tableName,
+ XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.Options... options) {
+ return XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, momenta, numMinibatchesPerPhysicalSparseCore, useNesterov, exponent, beta1, beta2, epsilon, tableName, options);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulGradWithAdamAndCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param activationGradients The activationGradients value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param momenta The momenta value
+ * @param velocity The velocity value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param useSumInsideSqrt The value of the useSumInsideSqrt attribute
+ * @param beta1 The value of the beta1 attribute
+ * @param beta2 The value of the beta2 attribute
+ * @param epsilon The value of the epsilon attribute
+ * @param tableName The value of the tableName attribute
+ * @param options carries optional attribute values
+ * @return a new instance of XlaSparseDenseMatmulGradWithAdamAndCsrInput
+ */
+ public XlaSparseDenseMatmulGradWithAdamAndCsrInput xlaSparseDenseMatmulGradWithAdamAndCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand activationGradients,
+ Operand learningRate, Operand embeddingTable, Operand momenta,
+ Operand velocity, Operand numMinibatchesPerPhysicalSparseCore,
+ Boolean useSumInsideSqrt, Float beta1, Float beta2, Float epsilon, String tableName,
+ XlaSparseDenseMatmulGradWithAdamAndCsrInput.Options... options) {
+ return XlaSparseDenseMatmulGradWithAdamAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, momenta, velocity, numMinibatchesPerPhysicalSparseCore, useSumInsideSqrt, beta1, beta2, epsilon, tableName, options);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulGradWithFtrlAndCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param activationGradients The activationGradients value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param accumulator The accumulator value
+ * @param linear The linear value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param multiplyLinearByLearningRate The value of the multiplyLinearByLearningRate attribute
+ * @param beta The value of the beta attribute
+ * @param learningRatePower The value of the learningRatePower attribute
+ * @param l1RegularizationStrength The value of the l1RegularizationStrength attribute
+ * @param l2RegularizationStrength The value of the l2RegularizationStrength attribute
+ * @param tableName The value of the tableName attribute
+ * @param options carries optional attribute values
+ * @return a new instance of XlaSparseDenseMatmulGradWithFtrlAndCsrInput
+ */
+ public XlaSparseDenseMatmulGradWithFtrlAndCsrInput xlaSparseDenseMatmulGradWithFtrlAndCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand activationGradients,
+ Operand learningRate, Operand embeddingTable,
+ Operand accumulator, Operand linear,
+ Operand numMinibatchesPerPhysicalSparseCore, Boolean multiplyLinearByLearningRate,
+ Float beta, Float learningRatePower, Float l1RegularizationStrength,
+ Float l2RegularizationStrength, String tableName,
+ XlaSparseDenseMatmulGradWithFtrlAndCsrInput.Options... options) {
+ return XlaSparseDenseMatmulGradWithFtrlAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, accumulator, linear, numMinibatchesPerPhysicalSparseCore, multiplyLinearByLearningRate, beta, learningRatePower, l1RegularizationStrength, l2RegularizationStrength, tableName, options);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulGradWithSgdAndCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param activationGradients The activationGradients value
+ * @param learningRate The learningRate value
+ * @param embeddingTable The embeddingTable value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param tableName The value of the tableName attribute
+ * @param options carries optional attribute values
+ * @return a new instance of XlaSparseDenseMatmulGradWithSgdAndCsrInput
+ */
+ public XlaSparseDenseMatmulGradWithSgdAndCsrInput xlaSparseDenseMatmulGradWithSgdAndCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand activationGradients,
+ Operand learningRate, Operand embeddingTable,
+ Operand numMinibatchesPerPhysicalSparseCore, String tableName,
+ XlaSparseDenseMatmulGradWithSgdAndCsrInput.Options... options) {
+ return XlaSparseDenseMatmulGradWithSgdAndCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, activationGradients, learningRate, embeddingTable, numMinibatchesPerPhysicalSparseCore, tableName, options);
+ }
+
+ /**
+ * The XlaSparseDenseMatmulWithCsrInput operation
+ *
+ * @param rowPointers The rowPointers value
+ * @param sortedSampleIds The sortedSampleIds value
+ * @param sortedTokenIds The sortedTokenIds value
+ * @param sortedGains The sortedGains value
+ * @param embeddingTable The embeddingTable value
+ * @param numMinibatchesPerPhysicalSparseCore The numMinibatchesPerPhysicalSparseCore value
+ * @param inputSize The value of the inputSize attribute
+ * @param quantizationConfigLow The value of the quantizationConfigLow attribute
+ * @param quantizationConfigHigh The value of the quantizationConfigHigh attribute
+ * @param quantizationConfigNumBuckets The value of the quantizationConfigNumBuckets attribute
+ * @param tableName The value of the tableName attribute
+ * @return a new instance of XlaSparseDenseMatmulWithCsrInput
+ */
+ public XlaSparseDenseMatmulWithCsrInput xlaSparseDenseMatmulWithCsrInput(
+ Operand rowPointers, Operand sortedSampleIds, Operand sortedTokenIds,
+ Operand sortedGains, Operand embeddingTable,
+ Operand numMinibatchesPerPhysicalSparseCore, Long inputSize,
+ Float quantizationConfigLow, Float quantizationConfigHigh, Long quantizationConfigNumBuckets,
+ String tableName) {
+ return XlaSparseDenseMatmulWithCsrInput.create(scope, rowPointers, sortedSampleIds, sortedTokenIds, sortedGains, embeddingTable, numMinibatchesPerPhysicalSparseCore, inputSize, quantizationConfigLow, quantizationConfigHigh, quantizationConfigNumBuckets, tableName);
+ }
+
/**
* Get the parent {@link Ops} object.
*/
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java
index 258aabce0e0..9aba2968627 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java
@@ -54,7 +54,7 @@
* must have {@code data[i].shape = indices[i].shape + constant}. In terms of this
* {@code constant}, the output shape is
*
- * merged.shape = [max(indices)] + constant
+ * merged.shape = [max(indices) + 1] + constant
*
* Values are merged in order, so if an index appears in both {@code indices[m][i]} and
* {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java
index fa25cd34464..c9a58b9158c 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java
@@ -93,7 +93,7 @@ public Tile(Operation operation) {
* Factory method to create a class wrapping a new Tile operation.
*
* @param scope current scope
- * @param input 1-D or higher.
+ * @param input Can be of any rank.
* @param multiples 1-D. Length must be the same as the number of dimensions in {@code input}
* @param data type for {@code Tile} output and operands
* @return a new instance of Tile
@@ -128,7 +128,7 @@ public Output asOutput() {
)
public static class Inputs extends RawOpInputs> {
/**
- * 1-D or higher.
+ * Can be of any rank.
*/
public final Operand input;
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java
new file mode 100644
index 00000000000..573670b6b26
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DatasetFingerprint.java
@@ -0,0 +1,107 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.data;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.family.TType;
+
+/**
+ * Returns the fingerprint of {@code input_dataset}.
+ * Returns the fingerprint of {@code input_dataset}.
+ */
+@OpMetadata(
+ opType = DatasetFingerprint.OP_NAME,
+ inputsClass = DatasetFingerprint.Inputs.class
+)
+@Operator(
+ group = "data"
+)
+public final class DatasetFingerprint extends RawOp implements Operand {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "DatasetFingerprint";
+
+ private Output extends TType> fingerprint;
+
+ @SuppressWarnings("unchecked")
+ public DatasetFingerprint(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ fingerprint = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new DatasetFingerprint operation.
+ *
+ * @param scope current scope
+ * @param inputDataset A variant tensor representing the dataset to return fingerprint for.
+ * @return a new instance of DatasetFingerprint
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static DatasetFingerprint create(Scope scope, Operand extends TType> inputDataset) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DatasetFingerprint");
+ opBuilder.addInput(inputDataset.asOutput());
+ return new DatasetFingerprint(opBuilder.build());
+ }
+
+ /**
+ * Gets fingerprint.
+ * The fingerprint of {@code input_dataset} in {@code uint64}
+ * @return fingerprint.
+ */
+ public Output extends TType> fingerprint() {
+ return fingerprint;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public Output asOutput() {
+ return (Output) fingerprint;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = DatasetFingerprint.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * A variant tensor representing the dataset to return fingerprint for.
+ */
+ public final Operand extends TType> inputDataset;
+
+ public Inputs(GraphOperation op) {
+ super(new DatasetFingerprint(op), op, Arrays.asList());
+ int inputIndex = 0;
+ inputDataset = (Operand extends TType>) op.input(inputIndex++);
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java
new file mode 100644
index 00000000000..0fe1bbb447b
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/ListSnapshotChunksDataset.java
@@ -0,0 +1,132 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.data;
+
+import java.util.Arrays;
+import java.util.List;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.ndarray.Shape;
+import org.tensorflow.op.Operands;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.proto.DataType;
+import org.tensorflow.types.TString;
+import org.tensorflow.types.family.TType;
+
+/**
+ * The ListSnapshotChunksDataset operation
+ */
+@OpMetadata(
+ opType = ListSnapshotChunksDataset.OP_NAME,
+ inputsClass = ListSnapshotChunksDataset.Inputs.class
+)
+@Operator(
+ group = "data"
+)
+public final class ListSnapshotChunksDataset extends RawOp implements Operand {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "ListSnapshotChunksDataset";
+
+ private Output extends TType> handle;
+
+ @SuppressWarnings("unchecked")
+ public ListSnapshotChunksDataset(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ handle = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new ListSnapshotChunksDataset operation.
+ *
+ * @param scope current scope
+ * @param snapshotPath The snapshotPath value
+ * @param outputTypes The value of the outputTypes attribute
+ * @param outputShapes The value of the outputShapes attribute
+ * @return a new instance of ListSnapshotChunksDataset
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static ListSnapshotChunksDataset create(Scope scope, Operand snapshotPath,
+ List> outputTypes, List outputShapes) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ListSnapshotChunksDataset");
+ opBuilder.addInput(snapshotPath.asOutput());
+ opBuilder.setAttr("output_types", Operands.toDataTypes(outputTypes));
+ Shape[] outputShapesArray = new Shape[outputShapes.size()];
+ for (int i = 0 ; i < outputShapesArray.length ; i++) {
+ outputShapesArray[i] = outputShapes.get(i);
+ }
+ opBuilder.setAttr("output_shapes", outputShapesArray);
+ return new ListSnapshotChunksDataset(opBuilder.build());
+ }
+
+ /**
+ * Gets handle.
+ *
+ * @return handle.
+ */
+ public Output extends TType> handle() {
+ return handle;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public Output asOutput() {
+ return (Output) handle;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = ListSnapshotChunksDataset.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * The snapshotPath input
+ */
+ public final Operand snapshotPath;
+
+ /**
+ * The outputTypes attribute
+ */
+ public final DataType[] outputTypes;
+
+ /**
+ * The outputShapes attribute
+ */
+ public final Shape[] outputShapes;
+
+ public Inputs(GraphOperation op) {
+ super(new ListSnapshotChunksDataset(op), op, Arrays.asList("output_types", "output_shapes"));
+ int inputIndex = 0;
+ snapshotPath = (Operand) op.input(inputIndex++);
+ outputTypes = op.attributes().getAttrTypeList("output_types");
+ outputShapes = op.attributes().getAttrShapeList("output_shapes");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java
index 90a74daac9f..a984a502a7a 100644
--- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java
@@ -91,6 +91,12 @@ public static MatMul create(Scope scope, Operand a, Oper
if (opts.transposeB != null) {
opBuilder.setAttr("transpose_b", opts.transposeB);
}
+ if (opts.gradA != null) {
+ opBuilder.setAttr("grad_a", opts.gradA);
+ }
+ if (opts.gradB != null) {
+ opBuilder.setAttr("grad_b", opts.gradB);
+ }
}
}
return new MatMul<>(opBuilder.build());
@@ -116,6 +122,26 @@ public static Options transposeB(Boolean transposeB) {
return new Options().transposeB(transposeB);
}
+ /**
+ * Sets the gradA option.
+ *
+ * @param gradA the gradA option
+ * @return this Options instance.
+ */
+ public static Options gradA(Boolean gradA) {
+ return new Options().gradA(gradA);
+ }
+
+ /**
+ * Sets the gradB option.
+ *
+ * @param gradB the gradB option
+ * @return this Options instance.
+ */
+ public static Options gradB(Boolean gradB) {
+ return new Options().gradB(gradB);
+ }
+
/**
* Gets product.
*
@@ -138,6 +164,10 @@ public static class Options {
private Boolean transposeB;
+ private Boolean gradA;
+
+ private Boolean gradB;
+
private Options() {
}
@@ -162,6 +192,28 @@ public Options transposeB(Boolean transposeB) {
this.transposeB = transposeB;
return this;
}
+
+ /**
+ * Sets the gradA option.
+ *
+ * @param gradA the gradA option
+ * @return this Options instance.
+ */
+ public Options gradA(Boolean gradA) {
+ this.gradA = gradA;
+ return this;
+ }
+
+ /**
+ * Sets the gradB option.
+ *
+ * @param gradB the gradB option
+ * @return this Options instance.
+ */
+ public Options gradB(Boolean gradB) {
+ this.gradB = gradB;
+ return this;
+ }
}
@OpInputsMetadata(
@@ -193,14 +245,26 @@ public static class Inputs extends RawOpInputs> {
*/
public final DataType T;
+ /**
+ * The gradA attribute
+ */
+ public final boolean gradA;
+
+ /**
+ * The gradB attribute
+ */
+ public final boolean gradB;
+
public Inputs(GraphOperation op) {
- super(new MatMul<>(op), op, Arrays.asList("transpose_a", "transpose_b", "T"));
+ super(new MatMul<>(op), op, Arrays.asList("transpose_a", "transpose_b", "T", "grad_a", "grad_b"));
int inputIndex = 0;
a = (Operand) op.input(inputIndex++);
b = (Operand) op.input(inputIndex++);
transposeA = op.attributes().getAttrBool("transpose_a");
transposeB = op.attributes().getAttrBool("transpose_b");
T = op.attributes().getAttrType("T");
+ gradA = op.attributes().getAttrBool("grad_a");
+ gradB = op.attributes().getAttrBool("grad_b");
}
}
}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java
new file mode 100644
index 00000000000..4305affd43e
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ComputeDedupDataSize.java
@@ -0,0 +1,107 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TInt32;
+
+/**
+ * An op computes the size of the deduplication data from embedding core and returns the updated config.
+ * This op is to compute size of the deduplication data so to provide this
+ * information to the op that computes the tuple mask of deduplication data can
+ * have static output shape.
+ */
+@OpMetadata(
+ opType = ComputeDedupDataSize.OP_NAME,
+ inputsClass = ComputeDedupDataSize.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class ComputeDedupDataSize extends RawOp implements Operand {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "ComputeDedupDataSize";
+
+ private Output numElements;
+
+ public ComputeDedupDataSize(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ numElements = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new ComputeDedupDataSize operation.
+ *
+ * @param scope current scope
+ * @param config Serialized TPUEmbeddingConfiguration proto.
+ * @return a new instance of ComputeDedupDataSize
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static ComputeDedupDataSize create(Scope scope, String config) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ComputeDedupDataSize");
+ opBuilder.setAttr("config", config);
+ return new ComputeDedupDataSize(opBuilder.build());
+ }
+
+ /**
+ * Gets numElements.
+ * The size of the deduplicated data from infeed.
+ * @return numElements.
+ */
+ public Output numElements() {
+ return numElements;
+ }
+
+ @Override
+ public Output asOutput() {
+ return numElements;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = ComputeDedupDataSize.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * Serialized TPUEmbeddingConfiguration proto.
+ */
+ public final String config;
+
+ public Inputs(GraphOperation op) {
+ super(new ComputeDedupDataSize(op), op, Arrays.asList("config"));
+ int inputIndex = 0;
+ config = op.attributes().getAttrString("config");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java
new file mode 100644
index 00000000000..efec4caa44a
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ConvertToCooTensor.java
@@ -0,0 +1,157 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TInt32;
+
+/**
+ * The ConvertToCooTensor operation
+ */
+@OpMetadata(
+ opType = ConvertToCooTensor.OP_NAME,
+ inputsClass = ConvertToCooTensor.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class ConvertToCooTensor extends RawOp {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "ConvertToCooTensor";
+
+ private Output rowIds;
+
+ private Output colIds;
+
+ private Output gains;
+
+ public ConvertToCooTensor(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ rowIds = operation.output(outputIdx++);
+ colIds = operation.output(outputIdx++);
+ gains = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new ConvertToCooTensor operation.
+ *
+ * @param scope current scope
+ * @param indicesOrRowSplits The indicesOrRowSplits value
+ * @param values The values value
+ * @param weights The weights value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param combiner The value of the combiner attribute
+ * @return a new instance of ConvertToCooTensor
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static ConvertToCooTensor create(Scope scope, Operand indicesOrRowSplits,
+ Operand values, Operand weights, Long sampleCount, String combiner) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "ConvertToCooTensor");
+ opBuilder.addInput(indicesOrRowSplits.asOutput());
+ opBuilder.addInput(values.asOutput());
+ opBuilder.addInput(weights.asOutput());
+ opBuilder.setAttr("sample_count", sampleCount);
+ opBuilder.setAttr("combiner", combiner);
+ return new ConvertToCooTensor(opBuilder.build());
+ }
+
+ /**
+ * Gets rowIds.
+ *
+ * @return rowIds.
+ */
+ public Output rowIds() {
+ return rowIds;
+ }
+
+ /**
+ * Gets colIds.
+ *
+ * @return colIds.
+ */
+ public Output colIds() {
+ return colIds;
+ }
+
+ /**
+ * Gets gains.
+ *
+ * @return gains.
+ */
+ public Output gains() {
+ return gains;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = ConvertToCooTensor.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * The indicesOrRowSplits input
+ */
+ public final Operand indicesOrRowSplits;
+
+ /**
+ * The values input
+ */
+ public final Operand values;
+
+ /**
+ * The weights input
+ */
+ public final Operand weights;
+
+ /**
+ * The sampleCount attribute
+ */
+ public final long sampleCount;
+
+ /**
+ * The combiner attribute
+ */
+ public final String combiner;
+
+ public Inputs(GraphOperation op) {
+ super(new ConvertToCooTensor(op), op, Arrays.asList("sample_count", "combiner"));
+ int inputIndex = 0;
+ indicesOrRowSplits = (Operand) op.input(inputIndex++);
+ values = (Operand) op.input(inputIndex++);
+ weights = (Operand) op.input(inputIndex++);
+ sampleCount = op.attributes().getAttrInt("sample_count");
+ combiner = op.attributes().getAttrString("combiner");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java
new file mode 100644
index 00000000000..7746ebadb48
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchSplitsWithPhysicalReplica.java
@@ -0,0 +1,257 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TInt32;
+import org.tensorflow.types.TInt64;
+import org.tensorflow.types.TString;
+
+/**
+ * The GetMinibatchSplitsWithPhysicalReplica operation
+ */
+@OpMetadata(
+ opType = GetMinibatchSplitsWithPhysicalReplica.OP_NAME,
+ inputsClass = GetMinibatchSplitsWithPhysicalReplica.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class GetMinibatchSplitsWithPhysicalReplica extends RawOp {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "GetMinibatchSplitsWithPhysicalReplica";
+
+ private Output sortedRowIds;
+
+ private Output sortedColIds;
+
+ private Output sortedGains;
+
+ private Output splits;
+
+ private Output idCounts;
+
+ private Output maxIds;
+
+ private Output maxUniques;
+
+ public GetMinibatchSplitsWithPhysicalReplica(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ sortedRowIds = operation.output(outputIdx++);
+ sortedColIds = operation.output(outputIdx++);
+ sortedGains = operation.output(outputIdx++);
+ splits = operation.output(outputIdx++);
+ idCounts = operation.output(outputIdx++);
+ maxIds = operation.output(outputIdx++);
+ maxUniques = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new GetMinibatchSplitsWithPhysicalReplica operation.
+ *
+ * @param scope current scope
+ * @param programKey The programKey value
+ * @param rowIds The rowIds value
+ * @param colIds The colIds value
+ * @param gains The gains value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param tableVocabSize The value of the tableVocabSize attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchSplits The value of the miniBatchSplits attribute
+ * @return a new instance of GetMinibatchSplitsWithPhysicalReplica
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static GetMinibatchSplitsWithPhysicalReplica create(Scope scope,
+ Operand programKey, Operand rowIds, Operand colIds,
+ Operand gains, Long sampleCount, Long numReplica, Long tableVocabSize,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetMinibatchSplitsWithPhysicalReplica");
+ opBuilder.addInput(programKey.asOutput());
+ opBuilder.addInput(rowIds.asOutput());
+ opBuilder.addInput(colIds.asOutput());
+ opBuilder.addInput(gains.asOutput());
+ opBuilder.setAttr("sample_count", sampleCount);
+ opBuilder.setAttr("num_replica", numReplica);
+ opBuilder.setAttr("table_vocab_size", tableVocabSize);
+ opBuilder.setAttr("feature_width", featureWidth);
+ opBuilder.setAttr("num_sc_per_chip", numScPerChip);
+ opBuilder.setAttr("table_name", tableName);
+ opBuilder.setAttr("mini_batch_splits", miniBatchSplits);
+ return new GetMinibatchSplitsWithPhysicalReplica(opBuilder.build());
+ }
+
+ /**
+ * Gets sortedRowIds.
+ *
+ * @return sortedRowIds.
+ */
+ public Output sortedRowIds() {
+ return sortedRowIds;
+ }
+
+ /**
+ * Gets sortedColIds.
+ *
+ * @return sortedColIds.
+ */
+ public Output sortedColIds() {
+ return sortedColIds;
+ }
+
+ /**
+ * Gets sortedGains.
+ *
+ * @return sortedGains.
+ */
+ public Output sortedGains() {
+ return sortedGains;
+ }
+
+ /**
+ * Gets splits.
+ *
+ * @return splits.
+ */
+ public Output splits() {
+ return splits;
+ }
+
+ /**
+ * Gets idCounts.
+ *
+ * @return idCounts.
+ */
+ public Output idCounts() {
+ return idCounts;
+ }
+
+ /**
+ * Gets maxIds.
+ *
+ * @return maxIds.
+ */
+ public Output maxIds() {
+ return maxIds;
+ }
+
+ /**
+ * Gets maxUniques.
+ *
+ * @return maxUniques.
+ */
+ public Output maxUniques() {
+ return maxUniques;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = GetMinibatchSplitsWithPhysicalReplica.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * The programKey input
+ */
+ public final Operand programKey;
+
+ /**
+ * The rowIds input
+ */
+ public final Operand rowIds;
+
+ /**
+ * The colIds input
+ */
+ public final Operand colIds;
+
+ /**
+ * The gains input
+ */
+ public final Operand gains;
+
+ /**
+ * The sampleCount attribute
+ */
+ public final long sampleCount;
+
+ /**
+ * The numReplica attribute
+ */
+ public final long numReplica;
+
+ /**
+ * The tableVocabSize attribute
+ */
+ public final long tableVocabSize;
+
+ /**
+ * The featureWidth attribute
+ */
+ public final long featureWidth;
+
+ /**
+ * The numScPerChip attribute
+ */
+ public final long numScPerChip;
+
+ /**
+ * The tableName attribute
+ */
+ public final String tableName;
+
+ /**
+ * The miniBatchSplits attribute
+ */
+ public final String miniBatchSplits;
+
+ public Inputs(GraphOperation op) {
+ super(new GetMinibatchSplitsWithPhysicalReplica(op), op, Arrays.asList("sample_count", "num_replica", "table_vocab_size", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_splits"));
+ int inputIndex = 0;
+ programKey = (Operand) op.input(inputIndex++);
+ rowIds = (Operand) op.input(inputIndex++);
+ colIds = (Operand) op.input(inputIndex++);
+ gains = (Operand) op.input(inputIndex++);
+ sampleCount = op.attributes().getAttrInt("sample_count");
+ numReplica = op.attributes().getAttrInt("num_replica");
+ tableVocabSize = op.attributes().getAttrInt("table_vocab_size");
+ featureWidth = op.attributes().getAttrInt("feature_width");
+ numScPerChip = op.attributes().getAttrInt("num_sc_per_chip");
+ tableName = op.attributes().getAttrString("table_name");
+ miniBatchSplits = op.attributes().getAttrString("mini_batch_splits");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java
new file mode 100644
index 00000000000..d51f1c3959f
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GetMinibatchesInCsrWithPhysicalReplica.java
@@ -0,0 +1,290 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TFloat32;
+import org.tensorflow.types.TInt32;
+import org.tensorflow.types.TInt64;
+import org.tensorflow.types.TString;
+
+/**
+ * The GetMinibatchesInCsrWithPhysicalReplica operation
+ */
+@OpMetadata(
+ opType = GetMinibatchesInCsrWithPhysicalReplica.OP_NAME,
+ inputsClass = GetMinibatchesInCsrWithPhysicalReplica.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class GetMinibatchesInCsrWithPhysicalReplica extends RawOp {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "GetMinibatchesInCsrWithPhysicalReplica";
+
+ private Output rowPointers;
+
+ private Output sortedSampleIds;
+
+ private Output sortedTokenIds;
+
+ private Output sortedGains;
+
+ private Output rowPointersUnpaddedSize;
+
+ private Output idsUnpaddedSize;
+
+ private Output numMinibatchesPerPhysicalSparseCore;
+
+ public GetMinibatchesInCsrWithPhysicalReplica(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ rowPointers = operation.output(outputIdx++);
+ sortedSampleIds = operation.output(outputIdx++);
+ sortedTokenIds = operation.output(outputIdx++);
+ sortedGains = operation.output(outputIdx++);
+ rowPointersUnpaddedSize = operation.output(outputIdx++);
+ idsUnpaddedSize = operation.output(outputIdx++);
+ numMinibatchesPerPhysicalSparseCore = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new GetMinibatchesInCsrWithPhysicalReplica operation.
+ *
+ * @param scope current scope
+ * @param programKey The programKey value
+ * @param rowIds The rowIds value
+ * @param colIds The colIds value
+ * @param gains The gains value
+ * @param splits The splits value
+ * @param idCounts The idCounts value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param maxMinibatchesPerSc The value of the maxMinibatchesPerSc attribute
+ * @param maxIdsPerChipPerSample The value of the maxIdsPerChipPerSample attribute
+ * @param tableVocabSize The value of the tableVocabSize attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchInCsr The value of the miniBatchInCsr attribute
+ * @return a new instance of GetMinibatchesInCsrWithPhysicalReplica
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static GetMinibatchesInCsrWithPhysicalReplica create(Scope scope,
+ Operand programKey, Operand rowIds, Operand colIds,
+ Operand gains, Operand splits, Operand idCounts, Long sampleCount,
+ Long numReplica, Long maxMinibatchesPerSc, Long maxIdsPerChipPerSample, Long tableVocabSize,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchInCsr) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GetMinibatchesInCsrWithPhysicalReplica");
+ opBuilder.addInput(programKey.asOutput());
+ opBuilder.addInput(rowIds.asOutput());
+ opBuilder.addInput(colIds.asOutput());
+ opBuilder.addInput(gains.asOutput());
+ opBuilder.addInput(splits.asOutput());
+ opBuilder.addInput(idCounts.asOutput());
+ opBuilder.setAttr("sample_count", sampleCount);
+ opBuilder.setAttr("num_replica", numReplica);
+ opBuilder.setAttr("max_minibatches_per_sc", maxMinibatchesPerSc);
+ opBuilder.setAttr("max_ids_per_chip_per_sample", maxIdsPerChipPerSample);
+ opBuilder.setAttr("table_vocab_size", tableVocabSize);
+ opBuilder.setAttr("feature_width", featureWidth);
+ opBuilder.setAttr("num_sc_per_chip", numScPerChip);
+ opBuilder.setAttr("table_name", tableName);
+ opBuilder.setAttr("mini_batch_in_csr", miniBatchInCsr);
+ return new GetMinibatchesInCsrWithPhysicalReplica(opBuilder.build());
+ }
+
+ /**
+ * Gets rowPointers.
+ *
+ * @return rowPointers.
+ */
+ public Output rowPointers() {
+ return rowPointers;
+ }
+
+ /**
+ * Gets sortedSampleIds.
+ *
+ * @return sortedSampleIds.
+ */
+ public Output sortedSampleIds() {
+ return sortedSampleIds;
+ }
+
+ /**
+ * Gets sortedTokenIds.
+ *
+ * @return sortedTokenIds.
+ */
+ public Output sortedTokenIds() {
+ return sortedTokenIds;
+ }
+
+ /**
+ * Gets sortedGains.
+ *
+ * @return sortedGains.
+ */
+ public Output sortedGains() {
+ return sortedGains;
+ }
+
+ /**
+ * Gets rowPointersUnpaddedSize.
+ *
+ * @return rowPointersUnpaddedSize.
+ */
+ public Output rowPointersUnpaddedSize() {
+ return rowPointersUnpaddedSize;
+ }
+
+ /**
+ * Gets idsUnpaddedSize.
+ *
+ * @return idsUnpaddedSize.
+ */
+ public Output idsUnpaddedSize() {
+ return idsUnpaddedSize;
+ }
+
+ /**
+ * Gets numMinibatchesPerPhysicalSparseCore.
+ *
+ * @return numMinibatchesPerPhysicalSparseCore.
+ */
+ public Output numMinibatchesPerPhysicalSparseCore() {
+ return numMinibatchesPerPhysicalSparseCore;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = GetMinibatchesInCsrWithPhysicalReplica.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * The programKey input
+ */
+ public final Operand programKey;
+
+ /**
+ * The rowIds input
+ */
+ public final Operand rowIds;
+
+ /**
+ * The colIds input
+ */
+ public final Operand colIds;
+
+ /**
+ * The gains input
+ */
+ public final Operand gains;
+
+ /**
+ * The splits input
+ */
+ public final Operand splits;
+
+ /**
+ * The idCounts input
+ */
+ public final Operand idCounts;
+
+ /**
+ * The sampleCount attribute
+ */
+ public final long sampleCount;
+
+ /**
+ * The numReplica attribute
+ */
+ public final long numReplica;
+
+ /**
+ * The maxMinibatchesPerSc attribute
+ */
+ public final long maxMinibatchesPerSc;
+
+ /**
+ * The maxIdsPerChipPerSample attribute
+ */
+ public final long maxIdsPerChipPerSample;
+
+ /**
+ * The tableVocabSize attribute
+ */
+ public final long tableVocabSize;
+
+ /**
+ * The featureWidth attribute
+ */
+ public final long featureWidth;
+
+ /**
+ * The numScPerChip attribute
+ */
+ public final long numScPerChip;
+
+ /**
+ * The tableName attribute
+ */
+ public final String tableName;
+
+ /**
+ * The miniBatchInCsr attribute
+ */
+ public final String miniBatchInCsr;
+
+ public Inputs(GraphOperation op) {
+ super(new GetMinibatchesInCsrWithPhysicalReplica(op), op, Arrays.asList("sample_count", "num_replica", "max_minibatches_per_sc", "max_ids_per_chip_per_sample", "table_vocab_size", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_in_csr"));
+ int inputIndex = 0;
+ programKey = (Operand) op.input(inputIndex++);
+ rowIds = (Operand) op.input(inputIndex++);
+ colIds = (Operand) op.input(inputIndex++);
+ gains = (Operand) op.input(inputIndex++);
+ splits = (Operand) op.input(inputIndex++);
+ idCounts = (Operand) op.input(inputIndex++);
+ sampleCount = op.attributes().getAttrInt("sample_count");
+ numReplica = op.attributes().getAttrInt("num_replica");
+ maxMinibatchesPerSc = op.attributes().getAttrInt("max_minibatches_per_sc");
+ maxIdsPerChipPerSample = op.attributes().getAttrInt("max_ids_per_chip_per_sample");
+ tableVocabSize = op.attributes().getAttrInt("table_vocab_size");
+ featureWidth = op.attributes().getAttrInt("feature_width");
+ numScPerChip = op.attributes().getAttrInt("num_sc_per_chip");
+ tableName = op.attributes().getAttrString("table_name");
+ miniBatchInCsr = op.attributes().getAttrString("mini_batch_in_csr");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java
new file mode 100644
index 00000000000..f0f71accb37
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/GlobalIterId.java
@@ -0,0 +1,96 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TInt64;
+
+/**
+ * The GlobalIterId operation
+ */
+@OpMetadata(
+ opType = GlobalIterId.OP_NAME,
+ inputsClass = GlobalIterId.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class GlobalIterId extends RawOp implements Operand {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "GlobalIterId";
+
+ private Output iterId;
+
+ public GlobalIterId(Operation operation) {
+ super(operation, OP_NAME);
+ int outputIdx = 0;
+ iterId = operation.output(outputIdx++);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new GlobalIterId operation.
+ *
+ * @param scope current scope
+ * @return a new instance of GlobalIterId
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static GlobalIterId create(Scope scope) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "GlobalIterId");
+ return new GlobalIterId(opBuilder.build());
+ }
+
+ /**
+ * Gets iterId.
+ *
+ * @return iterId.
+ */
+ public Output iterId() {
+ return iterId;
+ }
+
+ @Override
+ public Output asOutput() {
+ return iterId;
+ }
+
+ @OpInputsMetadata(
+ outputsClass = GlobalIterId.class
+ )
+ public static class Inputs extends RawOpInputs {
+ public Inputs(GraphOperation op) {
+ super(new GlobalIterId(op), op, Arrays.asList());
+ int inputIndex = 0;
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java
new file mode 100644
index 00000000000..a3c05fd31fb
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/StoreMinibatchStatisticsInFdo.java
@@ -0,0 +1,152 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.types.TInt32;
+import org.tensorflow.types.TString;
+
+/**
+ * The StoreMinibatchStatisticsInFdo operation
+ */
+@OpMetadata(
+ opType = StoreMinibatchStatisticsInFdo.OP_NAME,
+ inputsClass = StoreMinibatchStatisticsInFdo.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class StoreMinibatchStatisticsInFdo extends RawOp {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "StoreMinibatchStatisticsInFdo";
+
+ public StoreMinibatchStatisticsInFdo(Operation operation) {
+ super(operation, OP_NAME);
+ }
+
+ /**
+ * Factory method to create a class wrapping a new StoreMinibatchStatisticsInFdo operation.
+ *
+ * @param scope current scope
+ * @param programKey The programKey value
+ * @param maxIds The maxIds value
+ * @param maxUniques The maxUniques value
+ * @param sampleCount The value of the sampleCount attribute
+ * @param numReplica The value of the numReplica attribute
+ * @param featureWidth The value of the featureWidth attribute
+ * @param numScPerChip The value of the numScPerChip attribute
+ * @param tableName The value of the tableName attribute
+ * @param miniBatchSplits The value of the miniBatchSplits attribute
+ * @return a new instance of StoreMinibatchStatisticsInFdo
+ */
+ @Endpoint(
+ describeByClass = true
+ )
+ public static StoreMinibatchStatisticsInFdo create(Scope scope, Operand programKey,
+ Operand maxIds, Operand maxUniques, Long sampleCount, Long numReplica,
+ Long featureWidth, Long numScPerChip, String tableName, String miniBatchSplits) {
+ OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "StoreMinibatchStatisticsInFdo");
+ opBuilder.addInput(programKey.asOutput());
+ opBuilder.addInput(maxIds.asOutput());
+ opBuilder.addInput(maxUniques.asOutput());
+ opBuilder.setAttr("sample_count", sampleCount);
+ opBuilder.setAttr("num_replica", numReplica);
+ opBuilder.setAttr("feature_width", featureWidth);
+ opBuilder.setAttr("num_sc_per_chip", numScPerChip);
+ opBuilder.setAttr("table_name", tableName);
+ opBuilder.setAttr("mini_batch_splits", miniBatchSplits);
+ return new StoreMinibatchStatisticsInFdo(opBuilder.build());
+ }
+
+ @OpInputsMetadata(
+ outputsClass = StoreMinibatchStatisticsInFdo.class
+ )
+ public static class Inputs extends RawOpInputs {
+ /**
+ * The programKey input
+ */
+ public final Operand programKey;
+
+ /**
+ * The maxIds input
+ */
+ public final Operand maxIds;
+
+ /**
+ * The maxUniques input
+ */
+ public final Operand maxUniques;
+
+ /**
+ * The sampleCount attribute
+ */
+ public final long sampleCount;
+
+ /**
+ * The numReplica attribute
+ */
+ public final long numReplica;
+
+ /**
+ * The featureWidth attribute
+ */
+ public final long featureWidth;
+
+ /**
+ * The numScPerChip attribute
+ */
+ public final long numScPerChip;
+
+ /**
+ * The tableName attribute
+ */
+ public final String tableName;
+
+ /**
+ * The miniBatchSplits attribute
+ */
+ public final String miniBatchSplits;
+
+ public Inputs(GraphOperation op) {
+ super(new StoreMinibatchStatisticsInFdo(op), op, Arrays.asList("sample_count", "num_replica", "feature_width", "num_sc_per_chip", "table_name", "mini_batch_splits"));
+ int inputIndex = 0;
+ programKey = (Operand) op.input(inputIndex++);
+ maxIds = (Operand) op.input(inputIndex++);
+ maxUniques = (Operand) op.input(inputIndex++);
+ sampleCount = op.attributes().getAttrInt("sample_count");
+ numReplica = op.attributes().getAttrInt("num_replica");
+ featureWidth = op.attributes().getAttrInt("feature_width");
+ numScPerChip = op.attributes().getAttrInt("num_sc_per_chip");
+ tableName = op.attributes().getAttrString("table_name");
+ miniBatchSplits = op.attributes().getAttrString("mini_batch_splits");
+ }
+ }
+}
diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java
new file mode 100644
index 00000000000..a4dc34f7fc4
--- /dev/null
+++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUAnnotateTensorsWithDynamicShape.java
@@ -0,0 +1,121 @@
+/* Copyright 2018-2023 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+=======================================================================*/
+
+// This class has been generated, DO NOT EDIT!
+
+package org.tensorflow.op.tpu;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import org.tensorflow.GraphOperation;
+import org.tensorflow.Operand;
+import org.tensorflow.Operation;
+import org.tensorflow.OperationBuilder;
+import org.tensorflow.Output;
+import org.tensorflow.op.Operands;
+import org.tensorflow.op.RawOp;
+import org.tensorflow.op.RawOpInputs;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Endpoint;
+import org.tensorflow.op.annotation.OpInputsMetadata;
+import org.tensorflow.op.annotation.OpMetadata;
+import org.tensorflow.op.annotation.Operator;
+import org.tensorflow.proto.DataType;
+import org.tensorflow.types.family.TType;
+
+/**
+ * The TPUAnnotateTensorsWithDynamicShape operation
+ */
+@OpMetadata(
+ opType = TPUAnnotateTensorsWithDynamicShape.OP_NAME,
+ inputsClass = TPUAnnotateTensorsWithDynamicShape.Inputs.class
+)
+@Operator(
+ group = "tpu"
+)
+public final class TPUAnnotateTensorsWithDynamicShape extends RawOp implements Iterable> {
+ /**
+ * The name of this op, as known by TensorFlow core engine
+ */
+ public static final String OP_NAME = "TPUAnnotateTensorsWithDynamicShape";
+
+ private List