diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/BitwiseOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/BitwiseOps.java index 21073274635..5cf8e620d72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/BitwiseOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/BitwiseOps.java @@ -61,7 +61,6 @@ public final class BitwiseOps { * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code BitwiseAnd} output and operands @@ -91,7 +90,6 @@ public BitwiseAnd bitwiseAnd(Operand x, Operand y) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code BitwiseOr} output and operands @@ -121,7 +119,6 @@ public BitwiseOr bitwiseOr(Operand x, Operand y) { * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code BitwiseXor} output and operands @@ -172,7 +169,6 @@ public BitwiseXor bitwiseXor(Operand x, Operand y) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Invert} output and operands * @return a new instance of Invert @@ -212,7 +208,6 @@ public Invert invert(Operand x) { * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code LeftShift} output and operands @@ -255,7 +250,6 @@ public LeftShift leftShift(Operand x, Operand y) { * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code RightShift} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java index 23a96e4bfdf..de786dc95fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/CollectiveOps.java @@ -49,7 +49,6 @@ public final class CollectiveOps { /** * Mutually exchanges multiple tensors of identical type and shape. * - * @param data type for {@code data} output * @param input The input value * @param communicator The communicator value * @param groupAssignment The groupAssignment value @@ -79,7 +78,6 @@ public CollectiveAssignGroup collectiveAssignGroup(Operand groupAssignme /** * Receives a tensor value broadcast from another device. * - * @param data type for {@code data} output * @param groupSize The groupSize value * @param groupKey The groupKey value * @param instanceKey The instanceKey value @@ -98,7 +96,6 @@ public CollectiveBcastRecv collectiveBcastRecv(Operand data type for {@code data} output * @param input The input value * @param groupSize The groupSize value * @param groupKey The groupKey value @@ -119,7 +116,6 @@ public CollectiveBcastSend collectiveBcastSend(Operand i * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. * - * @param data type for {@code data} output * @param input The input value * @param groupSize The groupSize value * @param groupKey The groupKey value @@ -157,7 +153,6 @@ public CollectiveInitializeCommunicator collectiveInitializeCommunicator(Operand * source_target_pairs={@code [[0,1],[1,2],[2,3],[3,0]]} gets the outputs: * {@code [D, A, B, C]}. * - * @param data type for {@code output} output * @param input The local input to be permuted. Currently only supports float and * bfloat16. * @param sourceTargetPairs A tensor with shape [num_pairs, 2]. @@ -172,7 +167,6 @@ public CollectivePermute collectivePermute(Operand input /** * Mutually reduces multiple tensors of identical type and shape. * - * @param data type for {@code data} output * @param input The input value * @param communicator The communicator value * @param groupAssignment The groupAssignment value @@ -193,7 +187,6 @@ public CollectiveReduce collectiveReduce(Operand input * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. * - * @param data type for {@code data} output * @param input The input value * @param groupSize The groupSize value * @param groupKey The groupKey value diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java index 49f7e238a3f..8047510d2c2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java @@ -987,7 +987,6 @@ public LatencyStatsDataset latencyStatsDataset(Operand inputDat /** * Computes rectified linear gradients for a LeakyRelu operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding LeakyRelu operation. * @param features The features passed as input to the corresponding LeakyRelu operation, * OR the outputs of that operation (both work equivalently). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java index b50f697f8d5..4ea1efd10db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DebuggingOps.java @@ -43,7 +43,6 @@ public final class DebuggingOps { * tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf * in the errors it throws. * - * @param data type for {@code output} output * @param tensor The tensor value * @param message Prefix of the error message. * @param data type for {@code CheckNumericsV2} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java index e5a6c71c20a..4f30df6352d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DistributeOps.java @@ -52,7 +52,6 @@ public final class DistributeOps { * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. * - * @param data type for {@code data} output * @param input The input value * @param reduction The value of the reduction attribute * @param numDevices The value of the numDevices attribute @@ -74,7 +73,6 @@ public NcclAllReduce ncclAllReduce(Operand input, Stri * output: The same as input. * shape: The shape of the input tensor. * - * @param data type for {@code output} output * @param input The input value * @param shape The value of the shape attribute * @param data type for {@code NcclBroadcast} output and operands @@ -93,7 +91,6 @@ public NcclBroadcast ncclBroadcast(Operand input, Shap * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. * - * @param data type for {@code data} output * @param input The input value * @param reduction The value of the reduction attribute * @param data type for {@code NcclReduce} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java index 3ef6847d4f7..42f59c161d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DtypesOps.java @@ -69,7 +69,6 @@ public AsString asString(Operand input, AsString.Options... opt /** * Cast x of type SrcT to y of DstT. * - * @param data type for {@code y} output * @param x The x value * @param DstT The value of the DstT attribute * @param options carries optional attribute values @@ -95,7 +94,6 @@ public Cast cast(Operand x, Class DstT, * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * * - * @param data type for {@code out} output * @param real The real value * @param imag The imag value * @param Tout The value of the Tout attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java index 559ffc0d80a..f3fa3e6bbc0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ImageOps.java @@ -93,7 +93,6 @@ public final class ImageOps { * channel and then adjusts each component of each pixel to * {@code (x - mean) * contrast_factor + mean}. * - * @param data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. * @param data type for {@code AdjustContrastv2} output and operands @@ -112,7 +111,6 @@ public AdjustContrast adjustContrast(Operand images, * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. * - * @param data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. * @param data type for {@code AdjustHue} output and operands @@ -130,7 +128,6 @@ public AdjustHue adjustHue(Operand images, Operand data type for {@code output} output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. * @param data type for {@code AdjustSaturation} output and operands @@ -250,7 +247,6 @@ public CropAndResizeGradBoxes cropAndResizeGradBoxes(Operand grads, /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param data type for {@code output} output * @param grads A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. * @param boxes A 2-D tensor of shape {@code [num_boxes, 4]}. The {@code i}-th row of the tensor * specifies the coordinates of a box in the {@code box_ind[i]} image and is specified @@ -357,7 +353,6 @@ public DecodeGif decodeGif(Operand contents) { * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param data type for {@code image} output * @param contents 0-D. The encoded image bytes. * @param options carries optional attribute values * @return a new instance of DecodeImage, with default output types @@ -384,7 +379,6 @@ public DecodeImage decodeImage(Operand contents, DecodeImage.Op * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param data type for {@code image} output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. * @param options carries optional attribute values @@ -438,7 +432,6 @@ public DecodeJpeg decodeJpeg(Operand contents, DecodeJpeg.Options... op *

This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. * - * @param data type for {@code image} output * @param contents 0-D. The PNG-encoded image. * @param options carries optional attribute values * @return a new instance of DecodePng, with default output types @@ -463,7 +456,6 @@ public DecodePng decodePng(Operand contents, DecodePng.Options[ *

This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. * - * @param data type for {@code image} output * @param contents 0-D. The PNG-encoded image. * @param dtype The value of the dtype attribute * @param options carries optional attribute values @@ -487,7 +479,6 @@ public DecodePng decodePng(Operand contents, Cla * the bounding box will be {@code (40, 10)} to {@code (100, 50)} (in (x,y) coordinates). *

Parts of the bounding box may fall outside the image. * - * @param data type for {@code output} output * @param images 4-D with shape {@code [batch, height, width, depth]}. A batch of images. * @param boxes 3-D with shape {@code [batch, num_bounding_boxes, 4]} containing bounding * boxes. @@ -602,7 +593,6 @@ public ExtractGlimpse extractGlimpse(Operand input, Operand si /** * Extract {@code patches} from {@code images} and put them in the "depth" output dimension. * - * @param data type for {@code patches} output * @param images 4-D Tensor with shape {@code [batch, in_rows, in_cols, depth]}. * @param ksizes The size of the sliding window for each dimension of {@code images}. * @param strides How far the centers of two consecutive patches are in @@ -626,7 +616,6 @@ public ExtractImagePatches extractImagePatches(Operand i * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param data type for {@code image_shape} output * @param contents 0-D. The JPEG-encoded image. * @return a new instance of ExtractJpegShape, with default output types */ @@ -638,7 +627,6 @@ public ExtractJpegShape extractJpegShape(Operand contents) { * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param data type for {@code image_shape} output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. @@ -691,7 +679,6 @@ public GenerateBoundingBoxProposals generateBoundingBoxProposals(OperandSee {@code rgb_to_hsv} for a description of the HSV encoding. * - * @param data type for {@code output} output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. * @param data type for {@code HSVToRGB} output and operands * @return a new instance of HsvToRgb @@ -708,7 +695,6 @@ public HsvToRgb hsvToRgb(Operand images) { * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input * image, the output pixel is set to 0. * - * @param data type for {@code transformed_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param transforms 2-D Tensor, {@code [batch, 8]} or {@code [1, 8]} matrix, where each row corresponds to a 3 x 3 * projective transformation matrix, with the last entry assumed to be 1. If there @@ -733,7 +719,6 @@ public ImageProjectiveTransformV2 imageProjectiveTransfor * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input * image, the output pixel is set to fill_value. * - * @param data type for {@code transformed_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param transforms 2-D Tensor, {@code [batch, 8]} or {@code [1, 8]} matrix, where each row corresponds to a 3 x 3 * projective transformation matrix, with the last entry assumed to be 1. If there @@ -794,7 +779,6 @@ public NearestNeighbors nearestNeighbors(Operand points, Operand data type for {@code selected_scores} output * @param boxes A 2-D float tensor of shape {@code [num_boxes, 4]}. * @param scores A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). @@ -854,7 +838,6 @@ public NonMaxSuppressionWithOverlaps nonMaxSuppressionWithOverlaps(Operand data type for {@code resized_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -878,7 +861,6 @@ public QuantizedResizeBilinear quantizedResizeBilinear(Op * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. * - * @param data type for {@code output} output * @param image 3-D of shape {@code [height, width, channels]}. * @param sizeOutput 1-D of length 2 containing: {@code crop_height}, {@code crop_width}.. * @param options carries optional attribute values @@ -931,7 +913,6 @@ public ResizeBicubic resizeBicubic(Operand images, Operand data type for {@code output} output * @param grads 4-D with shape {@code [batch, height, width, channels]}. * @param originalImage 4-D with shape {@code [batch, orig_height, orig_width, channels]}, * The image tensor that was resized. @@ -962,7 +943,6 @@ public ResizeBilinear resizeBilinear(Operand images, /** * Computes the gradient of bilinear interpolation. * - * @param data type for {@code output} output * @param grads 4-D with shape {@code [batch, height, width, channels]}. * @param originalImage 4-D with shape {@code [batch, orig_height, orig_width, channels]}, * The image tensor that was resized. @@ -978,7 +958,6 @@ public ResizeBilinearGrad resizeBilinearGrad(Operand data type for {@code resized_images} output * @param images 4-D with shape {@code [batch, height, width, channels]}. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -994,7 +973,6 @@ public ResizeNearestNeighbor resizeNearestNeighbor(Operan /** * Computes the gradient of nearest neighbor interpolation. * - * @param data type for {@code output} output * @param grads 4-D with shape {@code [batch, height, width, channels]}. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: {@code orig_height, orig_width}. The * original input size. @@ -1031,7 +1009,6 @@ public ResizeNearestNeighborGrad resizeNearestNeighborGra * * * - * @param data type for {@code output} output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. * @param data type for {@code RGBToHSV} output and operands * @return a new instance of RgbToHsv @@ -1076,7 +1053,6 @@ public RgbToHsv rgbToHsv(Operand images) { * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * - * @param data type for {@code begin} output * @param imageSize 1-D, containing {@code [height, width, channels]}. * @param boundingBoxes 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. @@ -1113,7 +1089,6 @@ public ScaleAndTranslate scaleAndTranslate(Operand images, /** * The ScaleAndTranslateGrad operation * - * @param data type for {@code output} output * @param grads The grads value * @param originalImage The originalImage value * @param scale The scale value @@ -1189,7 +1164,6 @@ public ScaleAndTranslateGrad scaleAndTranslateGrad(Operan * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * - * @param data type for {@code begin} output * @param imageSize 1-D, containing {@code [height, width, channels]}. * @param boundingBoxes 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java index e038446af4a..5c33c56e962 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java @@ -160,7 +160,6 @@ public DecodeJsonExample decodeJsonExample(Operand jsonExamples) { /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param data type for {@code output} output * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. @@ -177,7 +176,6 @@ public DecodePaddedRaw decodePaddedRaw(Operand i /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param data type for {@code output} output * @param bytes All the elements must have the same length. * @param outType The value of the outType attribute * @param options carries optional attribute values @@ -231,7 +229,6 @@ public DecodeRaw decodeRaw(Operand bytes, Class * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param serializedSparse 2-D, The {@code N} serialized {@code SparseTensor} objects. * Must have 3 columns. * @param dtype The {@code dtype} of the serialized {@code SparseTensor} objects. @@ -581,7 +578,6 @@ public ParseSingleSequenceExample parseSingleSequenceExample(Operand se /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param data type for {@code output} output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. @@ -883,7 +879,6 @@ public ReaderSerializeState readerSerializeState(Operand reader * rank {@code R-1}. *

The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the minibatch {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the minibatch {@code SparseTensor}. @@ -903,7 +898,6 @@ public SerializeManySparse serializeManySparse(Operand sparseIn * rank {@code R-1}. *

The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the minibatch {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the minibatch {@code SparseTensor}. @@ -920,7 +914,6 @@ public SerializeManySparse serializeManySparse(Operand data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the {@code SparseTensor}. @@ -934,7 +927,6 @@ public SerializeSparse serializeSparse(Operand sparseIndices, /** * Serialize a {@code SparseTensor} into a {@code [3]} {@code Tensor} object. * - * @param data type for {@code serialized_sparse} output * @param sparseIndices 2-D. The {@code indices} of the {@code SparseTensor}. * @param sparseValues 1-D. The {@code values} of the {@code SparseTensor}. * @param sparseShape 1-D. The {@code shape} of the {@code SparseTensor}. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java index 87d87f85dcf..b83771a2930 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgOps.java @@ -127,7 +127,6 @@ public final class LinalgOps { * tf.linalg.band_part(input, 0, 0) ==> Diagonal. * * - * @param data type for {@code band} output * @param input Rank {@code k} tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire * lower triangle. @@ -145,7 +144,6 @@ public BandPart bandPart(Operand inpu /** * The BandedTriangularSolve operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param options carries optional attribute values @@ -160,7 +158,6 @@ public BandedTriangularSolve bandedTriangularSolve(Operand< /** * The BatchCholesky operation * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code BatchCholesky} output and operands * @return a new instance of BatchCholesky @@ -172,7 +169,6 @@ public BatchCholesky batchCholesky(Operand input) { /** * The BatchCholeskyGrad operation * - * @param data type for {@code output} output * @param l The l value * @param grad The grad value * @param data type for {@code BatchCholeskyGrad} output and operands @@ -185,7 +181,6 @@ public BatchCholeskyGrad batchCholeskyGrad(Operand l, /** * The BatchMatrixBandPart operation * - * @param data type for {@code band} output * @param input The input value * @param numLower The numLower value * @param numUpper The numUpper value @@ -200,7 +195,6 @@ public BatchMatrixBandPart batchMatrixBandPart(Operand i /** * The BatchMatrixDeterminant operation * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code BatchMatrixDeterminant} output and operands * @return a new instance of BatchMatrixDeterminant @@ -212,7 +206,6 @@ public BatchMatrixDeterminant batchMatrixDeterminant(Operan /** * The BatchMatrixDiag operation * - * @param data type for {@code output} output * @param diagonal The diagonal value * @param data type for {@code BatchMatrixDiag} output and operands * @return a new instance of BatchMatrixDiag @@ -224,7 +217,6 @@ public BatchMatrixDiag batchMatrixDiag(Operand diagonal) /** * The BatchMatrixDiagPart operation * - * @param data type for {@code diagonal} output * @param input The input value * @param data type for {@code BatchMatrixDiagPart} output and operands * @return a new instance of BatchMatrixDiagPart @@ -236,7 +228,6 @@ public BatchMatrixDiagPart batchMatrixDiagPart(Operand i /** * The BatchMatrixInverse operation * - * @param data type for {@code output} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchMatrixInverse} output and operands @@ -250,7 +241,6 @@ public BatchMatrixInverse batchMatrixInverse(Operand i /** * The BatchMatrixSetDiag operation * - * @param data type for {@code output} output * @param input The input value * @param diagonal The diagonal value * @param data type for {@code BatchMatrixSetDiag} output and operands @@ -264,7 +254,6 @@ public BatchMatrixSetDiag batchMatrixSetDiag(Operand inp /** * The BatchMatrixSolve operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param options carries optional attribute values @@ -279,7 +268,6 @@ public BatchMatrixSolve batchMatrixSolve(Operand matri /** * The BatchMatrixSolveLs operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param l2Regularizer The l2Regularizer value @@ -295,7 +283,6 @@ public BatchMatrixSolveLs batchMatrixSolveLs(Operand m /** * The BatchMatrixTriangularSolve operation * - * @param data type for {@code output} output * @param matrix The matrix value * @param rhs The rhs value * @param options carries optional attribute values @@ -310,7 +297,6 @@ public BatchMatrixTriangularSolve batchMatrixTriangularSo /** * The BatchSelfAdjointEigV2 operation * - * @param data type for {@code e} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchSelfAdjointEigV2} output and operands @@ -324,7 +310,6 @@ public BatchSelfAdjointEig batchSelfAdjointEig(Operand /** * The BatchSvd operation * - * @param data type for {@code s} output * @param input The input value * @param options carries optional attribute values * @param data type for {@code BatchSvd} output and operands @@ -347,7 +332,6 @@ public BatchSvd batchSvd(Operand input, BatchSvd.Options * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code Cholesky} output and operands * @return a new instance of Cholesky @@ -361,7 +345,6 @@ public Cholesky cholesky(Operand input) { * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. * - * @param data type for {@code output} output * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is {@code [..., M, M]}. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. @@ -381,7 +364,6 @@ public CholeskyGrad choleskyGrad(Operand l, Operand * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])} * - * @param data type for {@code y} output * @param x The x value * @param perm The perm value * @param data type for {@code ConjugateTranspose} output and operands @@ -398,7 +380,6 @@ public ConjugateTranspose conjugateTranspose(Operand x, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. * - * @param data type for {@code product} output * @param a A tensor containing 3-element vectors. * @param b Another tensor, of same type and shape as {@code a}. * @param data type for {@code Cross} output and operands @@ -414,7 +395,6 @@ public Cross cross(Operand a, Operand b) { * form square matrices. The output is a tensor containing the determinants * for all input submatrices {@code [..., :, :]}. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code MatrixDeterminant} output and operands * @return a new instance of Det @@ -436,7 +416,6 @@ public Det det(Operand input) { * e = eig(a, compute_v=False) * * - * @param data type for {@code e} output * @param input {@code Tensor} input of shape {@code [N, N]}. * @param Tout The value of the Tout attribute * @param options carries optional attribute values @@ -514,7 +493,6 @@ public Eig eig(Operand input, Class Tou *
{@literal @}end_compatibility * * - * @param data type for {@code output} output * @param inputs List of 1 or 2 Tensors. * @param equation String describing the Einstein Summation operation; in the format of np.einsum. * @param data type for {@code Einsum} output and operands @@ -531,7 +509,6 @@ public Einsum einsum(Iterable> inputs, String eq * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -554,7 +531,6 @@ public EuclideanNorm euclideanNorm(Operand input, * may detect the condition and raise an exception or it may simply return a * garbage result. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param options carries optional attribute values * @param data type for {@code MatrixInverse} output and operands @@ -632,7 +608,6 @@ public LoadAndRemapMatrix loadAndRemapMatrix(Operand ckptPath, * is the {@code LU} decomposition of the input and {@code P} is the corresponding * permutation matrix. * - * @param data type for {@code sign} output * @param input Shape is {@code [N, M, M]}. * @param data type for {@code LogMatrixDeterminant} output and operands * @return a new instance of LogMatrixDeterminant @@ -657,8 +632,6 @@ public LogMatrixDeterminant logMatrixDeterminant(Operand * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param data type for {@code lu} output - * @param data type for {@code p} output * @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of * size {@code [M, M]}. * @param data type for {@code Lu} output and operands @@ -684,8 +657,6 @@ public Lu lu(Operand input) { * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param data type for {@code lu} output - * @param data type for {@code p} output * @param input A tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form matrices of * size {@code [M, M]}. * @param outputIdxType The value of the outputIdxType attribute @@ -707,7 +678,6 @@ public Lu lu(Operand input, *

Note: The default kernel implementation for MatMul on GPUs uses * cublas. * - * @param data type for {@code product} output * @param a The a value * @param b The b value * @param options carries optional attribute values @@ -801,7 +771,6 @@ public MatMul matMul(Operand a, Operand b, MatMul.Opt * [9, 2]] * * - * @param data type for {@code output} output * @param diagonal Rank {@code r}, where {@code r >= 1} * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -886,7 +855,6 @@ public MatrixDiag matrixDiag(Operand diagonal, Operand * - * @param data type for {@code diagonal} output * @param input Rank {@code r} tensor where {@code r >= 2}. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -995,7 +963,6 @@ public MatrixDiagPart matrixDiagPart(Operand input, Oper * * * - * @param data type for {@code diagonal} output * @param input Rank {@code r} tensor where {@code r >= 2}. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -1123,7 +1090,6 @@ public MatrixDiagPartV3 matrixDiagPartV3(Operand input, * * * - * @param data type for {@code output} output * @param diagonal Rank {@code r}, where {@code r >= 1} * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. {@code k} can be a single integer @@ -1150,7 +1116,6 @@ public MatrixDiagV3 matrixDiagV3(Operand diagonal, Opera /** * Deprecated, use python implementation tf.linalg.matrix_exponential. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code MatrixExponential} output and operands * @return a new instance of MatrixExponential @@ -1173,7 +1138,6 @@ public MatrixExponential matrixExponential(Operand input * form square matrices. The output is a tensor of the same shape as the input * containing the exponential for all input submatrices {@code [..., :, :]}. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code MatrixLogarithm} output and operands * @return a new instance of MatrixLogarithm @@ -1281,7 +1245,6 @@ public MatrixLogarithm matrixLogarithm(Operand input) { * * * - * @param data type for {@code output} output * @param input Rank {@code r+1}, where {@code r >= 1}. * @param diagonal Rank {@code r} when {@code k} is an integer or {@code k[0] == k[1]}. Otherwise, it has rank {@code r+1}. * {@code k >= 1}. @@ -1331,7 +1294,6 @@ public MatrixSetDiag matrixSetDiag(Operand input, Operan * typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then * {@code l2_regularizer} is ignored. * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, N]}. * @param rhs Shape is {@code [..., M, K]}. * @param l2Regularizer Scalar tensor. @@ -1362,7 +1324,6 @@ public MatrixSolveLs matrixSolveLs(Operand matrix, Opera * q_full, r_full = qr(a, full_matrices=True) * * - * @param data type for {@code q} output * @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * @param options carries optional attribute values @@ -1380,7 +1341,6 @@ public Qr qr(Operand input, Qr.Options... options) { * outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). * - * @param data type for {@code out} output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. * @param minA The float value that the lowest quantized {@code a} value represents. @@ -1411,7 +1371,6 @@ public QuantizedMatMul quantizedMatMul * non-zero). Then do broadcast add operation with bias values on the matrix * multiplication result. The bias size must match inner dimension of {@code b}. * - * @param data type for {@code out} output * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. * @param bias A 1D bias tensor with size matching inner dimension of {@code b} (after being @@ -1442,7 +1401,6 @@ public QuantizedMatMulWithBias quantizedMatMulWithBias( * multiplication result. The bias size must match inner dimension of {@code b}. Then do * relu activation to get non-negative result. * - * @param data type for {@code out} output * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. * @param bias A 1D bias tensor with size matching with inner dimension of {@code b} (after being @@ -1474,7 +1432,6 @@ public QuantizedMatMulWithBiasAndRelu quantizedMatMulWith * relu activation to get non-negative result. Then do requantize operation to get * final uint8 result. * - * @param data type for {@code out} output * @param a A matrix to be multiplied. Must be a two-dimensional tensor of type {@code quint8}. * @param b A matrix to be multiplied and must be a two-dimensional tensor of type {@code qint8}. * @param bias A 1D bias tensor with size matching with inner dimension of {@code b} (after being @@ -1512,7 +1469,6 @@ public QuantizedMatMulWithBiasAndReluAndRequantize quanti * e = self_adjoint_eig(a, compute_v=False) * * - * @param data type for {@code e} output * @param input {@code Tensor} input of shape {@code [N, N]}. * @param options carries optional attribute values * @param data type for {@code SelfAdjointEigV2} output and operands @@ -1532,7 +1488,6 @@ public SelfAdjointEig selfAdjointEig(Operand input, * If {@code adjoint} is {@code True} then each output matrix satisfies * {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}. * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, M]}. * @param rhs Shape is {@code [..., M, K]}. * @param options carries optional attribute values @@ -1559,7 +1514,6 @@ public Solve solve(Operand matrix, Operand rhs, * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices {@code [..., :, :]}. * - * @param data type for {@code output} output * @param input Shape is {@code [..., M, M]}. * @param data type for {@code MatrixSquareRoot} output and operands * @return a new instance of Sqrtm @@ -1581,7 +1535,6 @@ public Sqrtm sqrtm(Operand input) { * s, _, _ = svd(a, compute_uv=False) * * - * @param data type for {@code s} output * @param input A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * @param options carries optional attribute values @@ -1608,7 +1561,6 @@ public Svd svd(Operand input, Svd.Options... options) { * [0, 0, 0, 4]] * * - * @param data type for {@code output} output * @param diagonal Rank k tensor where k is at most 1. * @param data type for {@code Diag} output and operands * @return a new instance of TensorDiag @@ -1634,7 +1586,6 @@ public TensorDiag tensorDiag(Operand diagonal) { * tf.diag_part(input) ==> [1, 2, 3, 4] * * - * @param data type for {@code diagonal} output * @param input Rank k tensor where k is even and not zero. * @param data type for {@code DiagPart} output and operands * @return a new instance of TensorDiagPart @@ -1648,7 +1599,6 @@ public TensorDiagPart tensorDiagPart(Operand input) { * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * - * @param data type for {@code y} output * @param x The x value * @param perm The perm value * @param data type for {@code Transpose} output and operands @@ -1703,7 +1653,6 @@ public Transpose transpose(Operand x, Operand * - * @param data type for {@code output} output * @param matrix Shape is {@code [..., M, M]}. * @param rhs Shape is {@code [..., M, K]}. * @param options carries optional attribute values @@ -1719,7 +1668,6 @@ public TriangularSolve triangularSolve(Operand matrix, O * Calculate product with tridiagonal matrix. * Calculates product of two matrices, where left matrix is a tridiagonal matrix. * - * @param data type for {@code output} output * @param superdiag Tensor of shape {@code [..., 1, M]}, representing superdiagonals of * tri-diagonal matrices to the left of multiplication. Last element is ignored. * @param maindiag Tensor of shape {@code [..., 1, M]}, representing main diagonals of tri-diagonal @@ -1746,7 +1694,6 @@ public TridiagonalMatMul tridiagonalMatMul(Operand super * library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv * Partial pivoting is not yet supported by XLA backends. * - * @param data type for {@code output} output * @param diagonals Tensor of shape {@code [..., 3, M]} whose innermost 2 dimensions represent the * tridiagonal matrices with three rows being the superdiagonal, diagonals, and * subdiagonals, in order. The last element of the superdiagonal and the first diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java index ed8c4fdbb90..7210249ba1f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/LinalgSparseOps.java @@ -59,7 +59,6 @@ public final class LinalgSparseOps { * This op is meant only for debugging / testing, and its interface is not expected * to be stable. * - * @param data type for {@code values} output * @param csrSparseMatrix A batched CSRSparseMatrix. * @param index The index in {@code csr_sparse_matrix}'s batch. * @param type The value of the type attribute @@ -74,7 +73,6 @@ public CSRSparseMatrixComponents cSRSparseMatrixComponents( /** * Convert a (possibly batched) CSRSparseMatrix to dense. * - * @param data type for {@code dense_output} output * @param sparseInput A batched CSRSparseMatrix. * @param type The value of the type attribute * @param data type for {@code CSRSparseMatrixToDense} output and operands @@ -88,7 +86,6 @@ public CSRSparseMatrixToDense cSRSparseMatrixToDense( /** * Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. * - * @param data type for {@code values} output * @param sparseMatrix A (possibly batched) CSRSparseMatrix. * @param type The value of the type attribute * @param data type for {@code CSRSparseMatrixToSparseTensor} output and operands @@ -152,7 +149,6 @@ public SparseMatrixAdd sparseMatrixAdd(Operand * - * @param data type for {@code output} output * @param a A CSRSparseMatrix. * @param b A dense tensor. * @param options carries optional attribute values diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java index ee2e3a46c27..d3dcfc686ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java @@ -168,7 +168,6 @@ public final class MathOps { * value of each element in {@code x}. For example, if x is an input element and y is * an output element, this operation computes \(y = |x|\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Abs} output and operands * @return a new instance of Abs @@ -186,7 +185,6 @@ public Abs abs(Operand x) { *

Unlike the original {@code accumulate_n}, {@code accumulate_n_v2} is differentiable. *

Returns a {@code Tensor} of same shape and type as the elements of {@code inputs}. * - * @param data type for {@code sum} output * @param inputs A list of {@code Tensor} objects, each with same shape and type. * @param shape Shape of elements of {@code inputs}. * @param data type for {@code AccumulateNV2} output and operands @@ -201,7 +199,6 @@ public AccumulateN accumulateN(Iterable> inputs, * Provided an input tensor, the {@code tf.math.acos} operation returns the inverse cosine of each element of the tensor. If {@code y = tf.math.cos(x)} then, {@code x = tf.math.acos(y)}. *

Input range is {@code [-1, 1]} and the output has a range of {@code [0, pi]}. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Acos} output and operands * @return a new instance of Acos @@ -219,7 +216,6 @@ public Acos acos(Operand x) { * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Acosh} output and operands * @return a new instance of Acosh @@ -235,7 +231,6 @@ public Acosh acosh(Operand x) { *

Given two input tensors, the {@code tf.add} operation computes the sum for every element in the tensor. *

Both input and output have a range {@code (-inf, inf)}. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Add} output and operands @@ -253,7 +248,6 @@ public Add add(Operand x, Operand y) { * tf.math.add_n(x) ==> 26 * * - * @param data type for {@code sum} output * @param inputs The inputs value * @param data type for {@code AddN} output and operands * @return a new instance of AddN @@ -278,7 +272,6 @@ public AddN addN(Iterable> inputs) { * Equivalent to np.angle. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Angle, with default output types */ @@ -302,7 +295,6 @@ public Angle angle(Operand input) { * Equivalent to np.angle. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Angle} output and operands @@ -339,7 +331,6 @@ public ApproximateEqual approximateEqual(Operand x, Operand * # here a[4] = 166.32 which is the largest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int16, int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -364,7 +355,6 @@ public ArgMax argMax(Operand input, * # here a[4] = 166.32 which is the largest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int16, int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -391,7 +381,6 @@ public ArgMax argMax(Operand input, * # here a[0] = 1 which is the smallest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -416,7 +405,6 @@ public ArgMin argMin(Operand input, * # here a[0] = 1 which is the smallest element of a across axis 0 * * - * @param data type for {@code output} output * @param input The input value * @param dimension int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, @@ -445,7 +433,6 @@ public ArgMin argMin(Operand input, * tf.math.asin(y) # [1.047, 0.785] = x * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Asin} output and operands * @return a new instance of Asin @@ -464,7 +451,6 @@ public Asin asin(Operand x) { * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Asinh} output and operands * @return a new instance of Asinh @@ -488,7 +474,6 @@ public Asinh asinh(Operand x) { * tf.math.atan(y) # [1.047, 0.785] = x * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Atan} output and operands * @return a new instance of Atan @@ -516,7 +501,6 @@ public Atan atan(Operand x) { * * * - * @param data type for {@code z} output * @param y The y value * @param x The x value * @param data type for {@code Atan2} output and operands @@ -538,7 +522,6 @@ public Atan2 atan2(Operand y, Operand x) { * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Atanh} output and operands * @return a new instance of Atanh @@ -550,7 +533,6 @@ public Atanh atanh(Operand x) { /** * The BesselI0 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselI0} output and operands * @return a new instance of BesselI0 @@ -562,7 +544,6 @@ public BesselI0 besselI0(Operand x) { /** * The BesselI0e operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselI0e} output and operands * @return a new instance of BesselI0e @@ -574,7 +555,6 @@ public BesselI0e besselI0e(Operand x) { /** * The BesselI1 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselI1} output and operands * @return a new instance of BesselI1 @@ -586,7 +566,6 @@ public BesselI1 besselI1(Operand x) { /** * The BesselI1e operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselI1e} output and operands * @return a new instance of BesselI1e @@ -604,7 +583,6 @@ public BesselI1e besselI1e(Operand x) { *

is the incomplete beta function and \(B(a, b)\) is the complete * beta function. * - * @param data type for {@code z} output * @param a The a value * @param b The b value * @param x The x value @@ -624,7 +602,6 @@ public Betainc betainc(Operand a, Operand b, Operan * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code bins} output * @param arr int32 {@code Tensor}. * @param sizeOutput non-negative int32 scalar {@code Tensor}. * @param weights is an int32, int64, float32, or float64 {@code Tensor} with the same @@ -641,7 +618,6 @@ public Bincount bincount(Operand arr, Operand data type for {@code y} output * @param x The x value * @param data type for {@code Ceil} output and operands * @return a new instance of Ceil @@ -667,7 +643,6 @@ public Ceil ceil(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @return a new instance of ComplexAbs, with default output types */ @@ -692,7 +667,6 @@ public ComplexAbs complexAbs(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @param Tout The value of the Tout attribute * @param data type for {@code ComplexAbs} output and operands @@ -715,7 +689,6 @@ public ComplexAbs complexAbs(Operand x, * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Conj} output and operands * @return a new instance of Conj @@ -735,7 +708,6 @@ public Conj conj(Operand input) { * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Cos} output and operands * @return a new instance of Cos @@ -754,7 +726,6 @@ public Cos cos(Operand x) { * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Cosh} output and operands * @return a new instance of Cosh @@ -786,7 +757,6 @@ public Cosh cosh(Operand x) { * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * * - * @param data type for {@code out} output * @param x A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. @@ -824,7 +794,6 @@ public Cumprod cumprod(Operand x, Operand * - * @param data type for {@code out} output * @param x A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. @@ -858,7 +827,6 @@ public Cumsum cumsum(Operand x, OperandBy setting the {@code reverse} kwarg to {@code True}, the cumulative log-sum-exp is performed in the * opposite direction. * - * @param data type for {@code out} output * @param x A {@code Tensor}. Must be one of the following types: {@code float16}, {@code float32}, {@code float64}. * @param axis A {@code Tensor} of type {@code int32} (default: 0). Must be in the range * {@code [-rank(x), rank(x))}. @@ -880,7 +848,6 @@ public CumulativeLogsumexp cumulativeLogsumexp(Operand * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param input 1D or 2D int {@code Tensor}. * @param sizeOutput non-negative int scalar {@code Tensor}. * @param weights is an int32, int64, float32, or float64 {@code Tensor} with the same @@ -900,7 +867,6 @@ public DenseBincount denseBincount(Ope * Computes Psi, the derivative of Lgamma (the log of the absolute value of * {@code Gamma(x)}), element-wise. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Digamma} output and operands * @return a new instance of Digamma @@ -914,7 +880,6 @@ public Digamma digamma(Operand x) { * NOTE: {@code math.Div} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Div} output and operands @@ -929,7 +894,6 @@ public Div div(Operand x, Operand y) { * NOTE: {@code math.DivNoNan} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code DivNoNan} output and operands @@ -966,7 +930,6 @@ public Equal equal(Operand x, Operand y, Equal.Options.. /** * Computes the Gauss error function of {@code x} element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erf} output and operands * @return a new instance of Erf @@ -978,7 +941,6 @@ public Erf erf(Operand x) { /** * Computes the complementary error function of {@code x} element-wise. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erfc} output and operands * @return a new instance of Erfc @@ -990,7 +952,6 @@ public Erfc erfc(Operand x) { /** * The Erfinv operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Erfinv} output and operands * @return a new instance of erfinv @@ -1023,7 +984,6 @@ public erfinv erfinv(Operand x) { * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Exp} output and operands * @return a new instance of Exp @@ -1047,7 +1007,6 @@ public Exp exp(Operand x) { * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Expm1} output and operands * @return a new instance of Expm1 @@ -1068,7 +1027,6 @@ public Fact fact() { /** * Returns element-wise largest integer not greater than x. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Floor} output and operands * @return a new instance of Floor @@ -1082,7 +1040,6 @@ public Floor floor(Operand x) { * NOTE: {@code math.FloorDiv} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code FloorDiv} output and operands @@ -1100,7 +1057,6 @@ public FloorDiv floorDiv(Operand x, Operand y) { *

NOTE: {@code math.FloorMod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code FloorMod} output and operands @@ -1168,7 +1124,6 @@ public GreaterEqual greaterEqual(Operand x, Operand y) *

Note, above {@code Q(a, x)} ({@code Igammac}) is the upper regularized complete * Gamma function. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Igamma} output and operands @@ -1181,7 +1136,6 @@ public Igamma igamma(Operand a, Operand x) { /** * Computes the gradient of {@code igamma(a, x)} wrt {@code a}. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code IgammaGradA} output and operands @@ -1201,7 +1155,6 @@ public IgammaGradA igammaGradA(Operand a, Operand x *

Note, above {@code P(a, x)} ({@code Igamma}) is the lower regularized complete * Gamma function. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Igammac} output and operands @@ -1223,7 +1176,6 @@ public Igammac igammac(Operand a, Operand x) { * tf.imag(input) ==> [4.75, 5.75] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Imag, with default output types */ @@ -1243,7 +1195,6 @@ public Imag imag(Operand input) { * tf.imag(input) ==> [4.75, 5.75] * * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Imag} output and operands @@ -1267,7 +1218,6 @@ public Imag imag(Operand input, Class * invert_permutation(x) ==> [2, 4, 3, 0, 1] * * - * @param data type for {@code y} output * @param x 1-D. * @param data type for {@code InvertPermutation} output and operands * @return a new instance of InvertPermutation @@ -1388,7 +1338,6 @@ public LessEqual lessEqual(Operand x, Operand y) { * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Lgamma} output and operands * @return a new instance of Lgamma @@ -1406,7 +1355,6 @@ public Lgamma lgamma(Operand x) { * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Log} output and operands * @return a new instance of Log @@ -1424,7 +1372,6 @@ public Log log(Operand x) { * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Log1p} output and operands * @return a new instance of Log1p @@ -1474,7 +1421,6 @@ public LogicalOr logicalOr(Operand x, Operand y) { * NOTE: {@code math.Maximum} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Maximum} output and operands @@ -1491,7 +1437,6 @@ public Maximum maximum(Operand x, Operand y) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -1509,7 +1454,6 @@ public Mean mean(Operand input, OperandNOTE: {@code math.Minimum} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Minimum} output and operands @@ -1526,7 +1470,6 @@ public Minimum minimum(Operand x, Operand y) { *

NOTE: {@code math.Mod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Mod} output and operands @@ -1541,7 +1484,6 @@ public Mod mod(Operand x, Operand y) { * NOTE: {@code math.Mul} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Mul} output and operands @@ -1556,7 +1498,6 @@ public Mul mul(Operand x, Operand y) { * NOTE: {@code math.MulNoNan} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code MulNoNan} output and operands @@ -1569,7 +1510,6 @@ public MulNoNan mulNoNan(Operand x, Operand y) { /** * The Ndtri operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Ndtri} output and operands * @return a new instance of Ndtri @@ -1582,7 +1522,6 @@ public Ndtri ndtri(Operand x) { * Computes numerical negative value element-wise. * I.e., \(y = -x\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Neg} output and operands * @return a new instance of Neg @@ -1599,7 +1538,6 @@ public Neg neg(Operand x) { * Equivalent to C++ std::nextafter function. *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param x1 The x1 value * @param x2 The x2 value * @param data type for {@code NextAfter} output and operands @@ -1632,7 +1570,6 @@ public NotEqual notEqual(Operand x, Operand y, *

where \(\psi(x)\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. * - * @param data type for {@code z} output * @param a The a value * @param x The x value * @param data type for {@code Polygamma} output and operands @@ -1667,7 +1604,6 @@ public PopulationCount populationCount(Operand x) { * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Pow} output and operands @@ -1680,7 +1616,6 @@ public Pow pow(Operand x, Operand y) { /** * Returns x + y element-wise, working on quantized buffers. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param minX The float value that the lowest quantized {@code x} value represents. @@ -1700,7 +1635,6 @@ public QuantizedAdd quantizedAdd(Operand data type for {@code z} output * @param x The x value * @param y The y value * @param minX The float value that the lowest quantized {@code x} value represents. @@ -1729,7 +1663,6 @@ public QuantizedMul quantizedMul(Operand * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Real, with default output types */ @@ -1749,7 +1682,6 @@ public Real real(Operand input) { * tf.real(input) ==> [-2.25, 3.25] * * - * @param data type for {@code output} output * @param input The input value * @param Tout The value of the Tout attribute * @param data type for {@code Real} output and operands @@ -1765,7 +1697,6 @@ public Real real(Operand input, Class *

NOTE: {@code Div} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code RealDiv} output and operands @@ -1779,7 +1710,6 @@ public RealDiv realDiv(Operand x, Operand y) { * Computes the reciprocal of x element-wise. * I.e., \(y = 1 / x\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Reciprocal} output and operands * @return a new instance of Reciprocal @@ -1793,7 +1723,6 @@ public Reciprocal reciprocal(Operand x) { * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code ReciprocalGrad} output and operands @@ -1822,7 +1751,6 @@ public RequantizationRangePerChannel requantizationRangePerChannel( /** * Requantizes input with min and max values known per channel. * - * @param data type for {@code output} output * @param input The original input tensor. * @param inputMin The minimum value of the input tensor * @param inputMax The maximum value of the input tensor. @@ -1850,7 +1778,6 @@ public RequantizePerChannel requantizePerChannel( * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Rint} output and operands * @return a new instance of Rint @@ -1864,7 +1791,6 @@ public Rint rint(Operand x) { * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Round} output and operands * @return a new instance of Round @@ -1877,7 +1803,6 @@ public Round round(Operand x) { * Computes reciprocal of square root of x element-wise. * I.e., \(y = 1 / \sqrt{x}\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Rsqrt} output and operands * @return a new instance of Rsqrt @@ -1891,7 +1816,6 @@ public Rsqrt rsqrt(Operand x) { * Specifically, {@code grad = dy * -0.5 * y^3}, where {@code y = rsqrt(x)}, and {@code dy} * is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code RsqrtGrad} output and operands @@ -1942,7 +1866,6 @@ public RsqrtGrad rsqrtGrad(Operand y, Operand dy) { * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -1989,7 +1912,6 @@ public SegmentMax segmentMax(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -2044,7 +1966,6 @@ public SegmentMean segmentMean(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -2093,7 +2014,6 @@ public SegmentMin segmentMin(Operand data, * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -2119,9 +2039,7 @@ public SegmentProd segmentProd(Operand data, * that {@code segment_ids[j] == i}. *

If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. *

Note that this op is currently only supported with jit_compile=True. - * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. @@ -2141,7 +2059,6 @@ public SegmentSum segmentSum(Operand data, * Computes sigmoid of {@code x} element-wise. * Specifically, {@code y = 1 / (1 + exp(-x))}. * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sigmoid} output and operands * @return a new instance of Sigmoid @@ -2155,7 +2072,6 @@ public Sigmoid sigmoid(Operand x) { * Specifically, {@code grad = dy * y * (1 - y)}, where {@code y = sigmoid(x)}, and * {@code dy} is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code SigmoidGrad} output and operands @@ -2179,7 +2095,6 @@ public SigmoidGrad sigmoidGrad(Operand y, Operand dy) * * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sign} output and operands * @return a new instance of Sign @@ -2198,7 +2113,6 @@ public Sign sign(Operand x) { * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sin} output and operands * @return a new instance of Sin @@ -2217,7 +2131,6 @@ public Sin sin(Operand x) { * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sinh} output and operands * @return a new instance of Sinh @@ -2231,7 +2144,6 @@ public Sinh sinh(Operand x) { * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension * {@code dim}. Skips the first {@code skip} samples. * - * @param data type for {@code samples} output * @param dim Positive scalar {@code Tensor} representing each sample's dimension. * @param numResults Positive scalar {@code Tensor} of dtype int32. The number of Sobol points to return * in the output. @@ -2249,7 +2161,6 @@ public SobolSample sobolSample(Operand dim, Operand nu * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension * {@code dim}. Skips the first {@code skip} samples. * - * @param data type for {@code samples} output * @param dim Positive scalar {@code Tensor} representing each sample's dimension. * @param numResults Positive scalar {@code Tensor} of dtype int32. The number of Sobol points to return * in the output. @@ -2267,7 +2178,6 @@ public SobolSample sobolSample(Operand dim, /** * The Softplus operation * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Softplus} output and operands * @return a new instance of Softplus @@ -2279,7 +2189,6 @@ public Softplus softplus(Operand features) { /** * Computes softplus gradients for a softplus operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding softplus operation. * @param features The features passed as input to the corresponding softplus operation. * @param data type for {@code SoftplusGrad} output and operands @@ -2294,7 +2203,6 @@ public SoftplusGrad softplusGrad(Operand gradients, * Computes square root of x element-wise. * I.e., \(y = \sqrt{x} = x^{1/2}\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Sqrt} output and operands * @return a new instance of Sqrt @@ -2308,7 +2216,6 @@ public Sqrt sqrt(Operand x) { * Specifically, {@code grad = dy * 0.5 / y}, where {@code y = sqrt(x)}, and {@code dy} * is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code SqrtGrad} output and operands @@ -2322,7 +2229,6 @@ public SqrtGrad sqrtGrad(Operand y, Operand dy) { * Computes square of x element-wise. * I.e., \(y = x * x = x^2\). * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Square} output and operands * @return a new instance of Square @@ -2336,7 +2242,6 @@ public Square square(Operand x) { * NOTE: {@code math.SquaredDifference} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code SquaredDifference} output and operands @@ -2351,7 +2256,6 @@ public SquaredDifference squaredDifference(Operand x, Op * NOTE: {@code math.Sub} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Sub} output and operands @@ -2372,7 +2276,6 @@ public Sub sub(Operand x, Operand y) { * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Tan} output and operands * @return a new instance of Tan @@ -2398,7 +2301,6 @@ public Tan tan(Operand x) { * * * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Tanh} output and operands * @return a new instance of Tanh @@ -2412,7 +2314,6 @@ public Tanh tanh(Operand x) { * Specifically, {@code grad = dy * (1 - y*y)}, where {@code y = tanh(x)}, and {@code dy} * is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code TanhGrad} output and operands @@ -2431,7 +2332,6 @@ public TanhGrad tanhGrad(Operand y, Operand dy) { *

NOTE: {@code math.TruncateDiv} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code TruncateDiv} output and operands @@ -2447,7 +2347,6 @@ public TruncateDiv truncateDiv(Operand x, Operand y) *

NOTE: {@code math.TruncateMod} supports broadcasting. More about broadcasting * here * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code TruncateMod} output and operands @@ -2475,7 +2374,6 @@ public TruncateMod truncateMod(Operand x, Operand y * if {@code operand.quantization_axis} >= 0 and {@code output.quantization_axis} >= 0, * {@code operand.dims} - {@code operand.quantization_axis} must be equal to {@code output.dims} - {@code output.quantization_axis}. * - * @param data type for {@code output} output * @param lhs Must be a quantized tensor. * @param rhs Must be a quantized tensor. * @param lhsScales The float value(s) used as scale factors when quantizing the original data that {@code lhs} represents. @@ -2547,7 +2445,6 @@ public UniformQuantizedAdd uniformQuantizedAdd(Operand * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2594,7 +2491,6 @@ public UnsortedSegmentMax unsortedSegmentMax(Operand d * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2640,7 +2536,6 @@ public UnsortedSegmentMin unsortedSegmentMin(Operand d * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2689,7 +2584,6 @@ public UnsortedSegmentProd unsortedSegmentProd(Operand d * * * - * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. * The values must be less than {@code num_segments}. @@ -2707,7 +2601,6 @@ public UnsortedSegmentSum unsortedSegmentSum(Operand dat /** * Returns 0 if x == 0, and x / y otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xdivy} output and operands @@ -2720,7 +2613,6 @@ public Xdivy xdivy(Operand x, Operand y) { /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xlog1py} output and operands @@ -2733,7 +2625,6 @@ public Xlog1py xlog1py(Operand x, Operand y) { /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * - * @param data type for {@code z} output * @param x The x value * @param y The y value * @param data type for {@code Xlogy} output and operands @@ -2748,7 +2639,6 @@ public Xlogy xlogy(Operand x, Operand y) { * The Hurwitz zeta function is defined as: *

\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) * - * @param data type for {@code z} output * @param x The x value * @param q The q value * @param data type for {@code Zeta} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java index 05af5fe921d..e486615af1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathSpecialOps.java @@ -51,7 +51,6 @@ public final class MathSpecialOps { /** * The BesselJ0 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselJ0} output and operands * @return a new instance of BesselJ0 @@ -63,7 +62,6 @@ public BesselJ0 besselJ0(Operand x) { /** * The BesselJ1 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselJ1} output and operands * @return a new instance of BesselJ1 @@ -75,7 +73,6 @@ public BesselJ1 besselJ1(Operand x) { /** * The BesselK0 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselK0} output and operands * @return a new instance of BesselK0 @@ -87,7 +84,6 @@ public BesselK0 besselK0(Operand x) { /** * The BesselK0e operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselK0e} output and operands * @return a new instance of BesselK0e @@ -99,7 +95,6 @@ public BesselK0e besselK0e(Operand x) { /** * The BesselK1 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselK1} output and operands * @return a new instance of BesselK1 @@ -111,7 +106,6 @@ public BesselK1 besselK1(Operand x) { /** * The BesselK1e operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselK1e} output and operands * @return a new instance of BesselK1e @@ -123,7 +117,6 @@ public BesselK1e besselK1e(Operand x) { /** * The BesselY0 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselY0} output and operands * @return a new instance of BesselY0 @@ -135,7 +128,6 @@ public BesselY0 besselY0(Operand x) { /** * The BesselY1 operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code BesselY1} output and operands * @return a new instance of BesselY1 @@ -147,7 +139,6 @@ public BesselY1 besselY1(Operand x) { /** * The Dawsn operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Dawsn} output and operands * @return a new instance of Dawsn @@ -159,7 +150,6 @@ public Dawsn dawsn(Operand x) { /** * The Expint operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Expint} output and operands * @return a new instance of Expint @@ -171,7 +161,6 @@ public Expint expint(Operand x) { /** * The FresnelCos operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code FresnelCos} output and operands * @return a new instance of FresnelCos @@ -183,7 +172,6 @@ public FresnelCos fresnelCos(Operand x) { /** * The FresnelSin operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code FresnelSin} output and operands * @return a new instance of FresnelSin @@ -195,7 +183,6 @@ public FresnelSin fresnelSin(Operand x) { /** * The Spence operation * - * @param data type for {@code y} output * @param x The x value * @param data type for {@code Spence} output and operands * @return a new instance of Spence diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 2e20b52b946..9859a308562 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -155,7 +155,6 @@ public final class NnOps { * Each entry in {@code output} is the mean of the corresponding size {@code ksize} * window in {@code value}. * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param ksize The size of the sliding window for each dimension of {@code value}. * @param strides The stride of the sliding window for each dimension of {@code value}. @@ -174,7 +173,6 @@ public AvgPool avgPool(Operand value, List ksize * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in * {@code value}. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. @@ -193,7 +191,6 @@ public AvgPool3d avgPool3d(Operand input, List k /** * Computes gradients of average pooling function. * - * @param data type for {@code output} output * @param origInputShape The original input dimensions. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -214,7 +211,6 @@ public AvgPool3dGrad avgPool3dGrad(Operand origIn /** * Computes gradients of the average pooling function. * - * @param data type for {@code output} output * @param origInputShape 1-D. Shape of the original input to {@code avg_pool}. * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. * the output of {@code avg_pool}. @@ -235,7 +231,6 @@ public AvgPoolGrad avgPoolGrad(Operand origInputS * Batch normalization. * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. * - * @param data type for {@code result} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -264,7 +259,6 @@ public BatchNormWithGlobalNormalization batchNormWithGlobal * Gradients for batch normalization. * This op is deprecated. See {@code tf.nn.batch_normalization}. * - * @param data type for {@code dx} output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -293,7 +287,6 @@ public BatchNormWithGlobalNormalizationGrad batchNormWithGl * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. * Broadcasting is supported, so {@code value} may have any number of dimensions. * - * @param data type for {@code output} output * @param value Any number of dimensions. * @param bias 1-D with size the last dimension of {@code value}. * @param options carries optional attribute values @@ -311,7 +304,6 @@ public BiasAdd biasAdd(Operand value, Operand bias, * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param data type for {@code output} output * @param outBackprop Any number of dimensions. * @param options carries optional attribute values * @param data type for {@code BiasAddGrad} output and operands @@ -345,7 +337,6 @@ public BiasAddGrad biasAddGrad(Operand outBackprop, * all gate-related outputs should be reordered. * * - * @param data type for {@code i} output * @param seqLenMax Maximum time length actually used by this input. Outputs are padded * with zeros beyond this length. * @param x The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). @@ -370,7 +361,6 @@ public BlockLSTM blockLSTM(Operand seqLenMax, Ope * Computes the LSTM cell backward propagation for the entire time sequence. * This implementation is to be used in conjunction of BlockLSTMV2. * - * @param data type for {@code x_grad} output * @param seqLenMax Maximum time length actually used by this input. Outputs are padded * with zeros beyond this length. * @param x The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). @@ -445,7 +435,6 @@ public ComputeAccidentalHits computeAccidentalHits(Operand trueClasses, * General function for computing a N-D convolution. It is required that * {@code 1 <= N <= 3}. * - * @param data type for {@code output} output * @param input Tensor of type T and shape {@code batch_shape + spatial_shape + [in_channels]} in the * case that {@code channels_last_format = true} or shape * {@code batch_shape + [in_channels] + spatial_shape} if {@code channels_last_format = false}. @@ -490,7 +479,6 @@ public Conv conv(Operand input, Operand filter, Lis *

Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * - * @param data type for {@code output} output * @param input A 4-D tensor. The dimension order is interpreted according to the value * of {@code data_format}, see below for details. * @param filter A 4-D tensor of shape @@ -511,7 +499,6 @@ public Conv2d conv2d(Operand input, Operand filter, /** * Computes the gradients of convolution with respect to the filter. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 4-D @@ -535,7 +522,6 @@ public Conv2dBackpropFilter conv2dBackpropFilter(Operand< /** * Computes the gradients of convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the shape of {@code input}, * where {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * @param filter 4-D with shape @@ -563,7 +549,6 @@ public Conv2dBackpropInput conv2dBackpropInput(OperandOur Conv3D implements a form of cross-correlation. * - * @param data type for {@code output} output * @param input Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. * @param filter Shape {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]}. {@code in_channels} must match between {@code input} and {@code filter}. * @param strides 1-D tensor of length 5. The stride of the sliding window for each @@ -581,7 +566,6 @@ public Conv3d conv3d(Operand input, Operand filter, /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, in_channels]}. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 5-D @@ -604,7 +588,6 @@ public Conv3dBackpropFilter conv3dBackpropFilter(Operand< /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the tensor shape of {@code input}, * where {@code input} is a 5-D * {@code [batch, depth, rows, cols, in_channels]} tensor. @@ -632,7 +615,6 @@ public Conv3dBackpropInput conv3dBackpropInput( * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param data type for {@code log_probability} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param sequenceLength A vector containing sequence lengths, size {@code (batch)}. * @param beamWidth A scalar >= 0 (beam search beam width). @@ -658,7 +640,6 @@ public CtcBeamSearchDecoder ctcBeamSearchDecoder(Operand< * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new * element is emitted. * - * @param data type for {@code log_probability} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param sequenceLength A vector containing sequence lengths, size {@code (batch_size)}. * @param options carries optional attribute values @@ -675,7 +656,6 @@ public CtcGreedyDecoder ctcGreedyDecoder(Operand input * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param data type for {@code loss} output * @param inputs 3-D, shape: {@code (max_time x batch_size x num_classes)}, the logits. * @param labelsIndices The indices of a {@code SparseTensor}. * {@code labels_indices(i, :) == [b, t]} means {@code labels_values(i)} stores the id for @@ -730,7 +710,6 @@ public CtcLoss ctcLoss(Operand inputs, Operand * reserve_space: An opaque tensor that can be used in backprop calculation. It * is only produced if is_training is true. * - * @param data type for {@code output} output * @param input The input value * @param inputH The inputH value * @param inputC The inputC value @@ -795,7 +774,6 @@ public CudnnRNN cudnnRNN(Operand input, Operand inp * params_backprop: The backprop to the params buffer in the forward pass. Has the * same shape as params. * - * @param data type for {@code input_backprop} output * @param input The input value * @param inputH The inputH value * @param inputC The inputC value @@ -852,7 +830,6 @@ public CudnnRNNBackprop cudnnRNNBackprop(Operand input * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param data type for {@code params} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -900,7 +877,6 @@ public CudnnRNNCanonicalToParams cudnnRNNCanonicalToParam * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param data type for {@code weights} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -941,7 +917,6 @@ public CudnnRNNParamsToCanonical cudnnRNNParamsToCanonica * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. * - * @param data type for {@code params_size} output * @param numLayers The numLayers value * @param numUnits The numUnits value * @param inputSize The inputSize value @@ -962,7 +937,6 @@ public CudnnRnnParamsSize cudnnRnnPara * Returns the dimension index in the destination data format given the one in * the source data format. * - * @param data type for {@code y} output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). * @param options carries optional attribute values @@ -1006,7 +980,6 @@ public DataFormatDimMap dataFormatDimMap(Operand x, * [1, 2] * * - * @param data type for {@code y} output * @param x Tensor of rank 1 or 2 in source data format. * @param options carries optional attribute values * @param data type for {@code DataFormatVecPermute} output and operands @@ -1094,7 +1067,6 @@ public DataFormatVecPermute dataFormatVecPermute(Operand< * * * - * @param data type for {@code output} output * @param input The input value * @param blockSize The size of the spatial block, same as in Space2Depth. * @param options carries optional attribute values @@ -1125,7 +1097,6 @@ public DepthToSpace depthToSpace(Operand input, Long blo *

Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension @@ -1144,7 +1115,6 @@ public DepthwiseConv2dNative depthwiseConv2dNative(Operan /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param data type for {@code output} output * @param input 4-D with shape based on {@code data_format}. For example, if * {@code data_format} is 'NHWC' then {@code input} is a 4-D {@code [batch, in_height, in_width, in_channels]} tensor. * @param filterSizes An integer vector representing the tensor shape of {@code filter}, @@ -1170,7 +1140,6 @@ public DepthwiseConv2dNativeBackpropFilter depthwiseConv2 /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param data type for {@code output} output * @param inputSizes An integer vector representing the shape of {@code input}, based * on {@code data_format}. For example, if {@code data_format} is 'NHWC' then * {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. @@ -1217,7 +1186,6 @@ public DepthwiseConv2dNativeBackpropInput depthwiseConv2d *

Note on duality: The dilation of {@code input} by the {@code filter} is equal to the * negation of the erosion of {@code -input} by the reflected {@code filter}. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param strides The stride of the sliding window for each dimension of the input @@ -1236,7 +1204,6 @@ public Dilation2d dilation2d(Operand input, Operand /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param data type for {@code filter_backprop} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. @@ -1257,7 +1224,6 @@ public Dilation2dBackpropFilter dilation2dBackpropFilter( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param data type for {@code in_backprop} output * @param input 4-D with shape {@code [batch, in_height, in_width, depth]}. * @param filter 3-D with shape {@code [filter_height, filter_width, depth]}. * @param outBackprop 4-D with shape {@code [batch, out_height, out_width, depth]}. @@ -1298,7 +1264,6 @@ public Dilation2dBackpropInput dilation2dBackpropInput(Op *

See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Elu} output and operands * @return a new instance of Elu @@ -1310,7 +1275,6 @@ public Elu elu(Operand features) { /** * Computes gradients for the exponential linear (Elu) operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding Elu operation. * @param outputs The outputs of the corresponding Elu operation. * @param data type for {@code EluGrad} output and operands @@ -1358,7 +1322,6 @@ public FixedUnigramCandidateSampler fixedUnigramCandidateSampler(Operand * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid @@ -1383,7 +1346,6 @@ public FractionalAvgPool fractionalAvgPool(Operand val * just need to know the shape of original input tensor, instead of the whole * tensor. * - * @param data type for {@code output} output * @param origInputTensorShape Original input tensor shape for {@code fractional_avg_pool} * @param outBackprop 4-D with shape {@code [batch, height, width, channels]}. Gradients * w.r.t. the output of {@code fractional_avg_pool}. @@ -1431,7 +1393,6 @@ public FractionalAvgPoolGrad fractionalAvgPoolGrad( *

For more details on fractional max pooling, see this paper: * Benjamin Graham, Fractional Max-Pooling * - * @param data type for {@code output} output * @param value 4-D with shape {@code [batch, height, width, channels]}. * @param poolingRatio Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid @@ -1451,7 +1412,6 @@ public FractionalMaxPool fractionalMaxPool(Operand val /** * Computes gradient of the FractionalMaxPool function. * - * @param data type for {@code output} output * @param origInput Original input for {@code fractional_max_pool} * @param origOutput Original output for {@code fractional_max_pool} * @param outBackprop 4-D with shape {@code [batch, height, width, channels]}. Gradients @@ -1475,8 +1435,6 @@ public FractionalMaxPoolGrad fractionalMaxPoolGrad(Operan * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code y} output - * @param data type for {@code batch_mean} output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -1500,8 +1458,6 @@ public FusedBatchNorm fusedBatchNor * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param data type for {@code x_backprop} output - * @param data type for {@code scale_backprop} output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -1542,7 +1498,6 @@ public FusedBatchNormGrad fusedBatc * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param paddings A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. @@ -1574,7 +1529,6 @@ public FusedPadConv2d fusedPadConv2d(Operand input, * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * @param sizeOutput A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. @@ -1637,7 +1591,6 @@ public FusedResizeAndPadConv2d fusedResizeAndPadConv2d(Op * h = (1-u) \circ c + u \circ h_prev * * - * @param data type for {@code r} output * @param x The x value * @param hPrev The hPrev value * @param wRu The wRu value @@ -1728,7 +1681,6 @@ public GRUBlockCell gRUBlockCell(Operand x, Operand * d_b_c = sum of d_c_bar along axis = 0 * * - * @param data type for {@code d_x} output * @param x The x value * @param hPrev The hPrev value * @param wRu The wRu value @@ -1778,7 +1730,6 @@ public InTopK inTopK(Operand predictions, Operand< * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. * - * @param data type for {@code z} output * @param y The y value * @param dy The dy value * @param data type for {@code InvGrad} output and operands @@ -1791,7 +1742,6 @@ public InvGrad invGrad(Operand y, Operand dy) { /** * Solves a batch of isotonic regression problems. * - * @param data type for {@code output} output * @param input A (batch_size, dim)-tensor holding a batch of inputs. * @return a new instance of IsotonicRegression, with default output types */ @@ -1802,7 +1752,6 @@ public IsotonicRegression isotonicRegression(Operand data type for {@code output} output * @param input A (batch_size, dim)-tensor holding a batch of inputs. * @param outputDtype Dtype of output. * @param data type for {@code IsotonicRegression} output and operands @@ -1820,7 +1769,6 @@ public IsotonicRegression isotonicRegression( * output = sum(t ** 2) / 2 * * - * @param data type for {@code output} output * @param t Typically 2-D, but may have any dimensions. * @param data type for {@code L2Loss} output and operands * @return a new instance of L2Loss @@ -1854,7 +1802,6 @@ public L2Loss l2Loss(Operand t) { * h = co .* o * * - * @param data type for {@code i} output * @param x The input to the LSTM cell, shape (batch_size, num_inputs). * @param csPrev Value of the cell state at previous time step. * @param hPrev Output of the previous cell at previous time step. @@ -1877,7 +1824,6 @@ public LSTMBlockCell lSTMBlockCell(Operand x, Operand< * Computes the LSTM cell backward propagation for 1 timestep. * This implementation is to be used in conjunction of LSTMBlockCell. * - * @param data type for {@code cs_prev_grad} output * @param x The input to the LSTM cell, shape (batch_size, num_inputs). * @param csPrev The previous cell state. * @param hPrev The previous h state. @@ -1908,7 +1854,6 @@ public LSTMBlockCellGrad lSTMBlockCellGrad(Operand x, /** * Computes rectified linear: {@code max(features, features * alpha)}. * - * @param data type for {@code activations} output * @param features The features value * @param options carries optional attribute values * @param data type for {@code LeakyRelu} output and operands @@ -1960,7 +1905,6 @@ public LearnedUnigramCandidateSampler learnedUnigramCandidateSampler(OperandFor details, see Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS 2012) . * - * @param data type for {@code output} output * @param input 4-D. * @param options carries optional attribute values * @param data type for {@code LRN} output and operands @@ -1974,7 +1918,6 @@ public LocalResponseNormalization localResponseNormalizat /** * Gradients for Local Response Normalization. * - * @param data type for {@code output} output * @param inputGrads 4-D with shape {@code [batch, height, width, channels]}. * @param inputImage 4-D with shape {@code [batch, height, width, channels]}. * @param outputImage 4-D with shape {@code [batch, height, width, channels]}. @@ -1995,7 +1938,6 @@ public LocalResponseNormalizationGrad localResponseNormal * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * * - * @param data type for {@code logsoftmax} output * @param logits 2-D with shape {@code [batch_size, num_classes]}. * @param data type for {@code LogSoftmax} output and operands * @return a new instance of LogSoftmax @@ -2007,7 +1949,6 @@ public LogSoftmax logSoftmax(Operand logits) { /** * Performs max pooling on the input. * - * @param data type for {@code output} output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -2025,7 +1966,6 @@ public MaxPool maxPool(Operand input, Operand /** * Performs 3D max pooling on the input. * - * @param data type for {@code output} output * @param input Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. @@ -2044,7 +1984,6 @@ public MaxPool3d maxPool3d(Operand input, List k /** * Computes gradients of 3D max pooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. @@ -2067,7 +2006,6 @@ public MaxPool3dGrad maxPool3dGrad(Ope /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad Output backprop of shape {@code [batch, depth, rows, cols, channels]}. @@ -2089,7 +2027,6 @@ public MaxPool3dGradGrad maxPool3dGradGrad(Operand ori /** * Computes gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad 4-D. Gradients w.r.t. the output of {@code max_pool}. @@ -2110,7 +2047,6 @@ public MaxPoolGrad maxPoolGrad(Operand origInput, Oper /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param origInput The original input tensor. * @param origOutput The original output tensor. * @param grad 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. @@ -2131,7 +2067,6 @@ public MaxPoolGradGrad maxPoolGradGrad(Operand origInp /** * Computes second-order gradients of the maxpooling function. * - * @param data type for {@code output} output * @param input The original input. * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the * input of {@code max_pool}. @@ -2153,7 +2088,6 @@ public MaxPoolGradGradWithArgmax maxPoolGradGradWithArgma /** * Computes gradients of the maxpooling function. * - * @param data type for {@code output} output * @param input The original input. * @param grad 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the * output of {@code max_pool}. @@ -2183,8 +2117,6 @@ public MaxPoolGradWithArgmax maxPoolGradWithArgmax(Operan * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -2210,8 +2142,6 @@ public MaxPoolWithArgmax maxPoolWithArgmax(Operan * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param data type for {@code output} output - * @param data type for {@code argmax} output * @param input 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the @@ -2239,7 +2169,6 @@ public MaxPoolWithArgmax maxPoolWit * values.shape = input.shape[:-1] * * - * @param data type for {@code values} output * @param input 1-D or higher with last dimension at least {@code n+1}. * @param n 0-D. Position of sorted vector to select along the last dimension (along * each row for matrices). Valid range of n is {@code [0, input.shape[:-1])} @@ -2255,7 +2184,6 @@ public NthElement nthElement(Operand input, Operand data type for {@code output} output * @param input 4-D with shape {@code [batch, height, width, channels]}. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -2278,7 +2206,6 @@ public QuantizedAvgPool quantizedAvgPool(Operand input * This op is deprecated and will be removed in the future. Prefer * {@code tf.nn.batch_normalization}. * - * @param data type for {@code result} output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -2322,7 +2249,6 @@ public QuantizedBatchNormWithGlobalNormal * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param data type for {@code output} output * @param input The input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. @@ -2342,7 +2268,6 @@ public QuantizedBiasAdd quantizedBiasAdd(Operand data type for {@code output} output * @param input The input value * @param filter The filter value * @param minInput The minInput value @@ -2367,7 +2292,6 @@ public QuantizedConv2DAndRelu quantizedConv2DAndRelu( /** * The QuantizedConv2DAndReluAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param minInput The minInput value @@ -2395,7 +2319,6 @@ public QuantizedConv2DAndReluAndRequantize quantizedConv2 /** * The QuantizedConv2DAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param minInput The minInput value @@ -2423,7 +2346,6 @@ public QuantizedConv2DAndRequantize quantizedConv2DAndReq /** * Computes QuantizedConv2D per channel. * - * @param data type for {@code output} output * @param input The original input tensor. * @param filter The original filter tensor. * @param minInput The minimum value of the input tensor @@ -2448,7 +2370,6 @@ public QuantizedConv2DPerChannel quantizedConv2DPerChanne /** * The QuantizedConv2DWithBias operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2474,7 +2395,6 @@ public QuantizedConv2DWithBias quantizedConv2DWithBias( /** * The QuantizedConv2DWithBiasAndRelu operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2500,7 +2420,6 @@ public QuantizedConv2DWithBiasAndRelu quantizedConv2DWith /** * The QuantizedConv2DWithBiasAndReluAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2529,7 +2448,6 @@ public QuantizedConv2DWithBiasAndReluAndRequantize quanti /** * The QuantizedConv2DWithBiasAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2558,7 +2476,6 @@ public QuantizedConv2DWithBiasAndRequantize quantizedConv /** * The QuantizedConv2DWithBiasSignedSumAndReluAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2592,7 +2509,6 @@ public QuantizedConv2DWithBiasSignedSumAndReluAndRequantize< /** * The QuantizedConv2DWithBiasSumAndRelu operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2619,7 +2535,6 @@ public QuantizedConv2DWithBiasSumAndRelu quantizedConv2DW /** * The QuantizedConv2DWithBiasSumAndReluAndRequantize operation * - * @param data type for {@code output} output * @param input The input value * @param filter The filter value * @param bias The bias value @@ -2657,7 +2572,6 @@ public QuantizedConv2DWithBiasSumAndReluAndRequantize qua * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param data type for {@code output} output * @param input The input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. @@ -2682,7 +2596,6 @@ public QuantizedConv2d quantizedConv2d(Operand data type for {@code output} output * @param input The original input tensor. * @param filter The original filter tensor. * @param minInput The float value that the minimum quantized input value represents. @@ -2707,7 +2620,6 @@ public QuantizedDepthwiseConv2D quantizedDepthwiseConv2D( /** * Computes quantized depthwise Conv2D with Bias. * - * @param data type for {@code output} output * @param input The original input tensor. * @param filter The original filter tensor. * @param bias The original bias tensor. @@ -2733,7 +2645,6 @@ public QuantizedDepthwiseConv2DWithBias quantizedDepthwis /** * Computes quantized depthwise Conv2D with Bias and Relu. * - * @param data type for {@code output} output * @param input The original input tensor. * @param filter The original filter tensor. * @param bias The original bias tensor. @@ -2759,7 +2670,6 @@ public QuantizedDepthwiseConv2DWithBiasAndRelu quantizedD /** * Computes quantized depthwise Conv2D with Bias, Relu and Requantize. * - * @param data type for {@code output} output * @param input The original input tensor. * @param filter The original filter tensor. * @param bias The original bias tensor. @@ -2788,7 +2698,6 @@ public QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize< /** * Quantized Instance normalization. * - * @param data type for {@code y} output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. @@ -2804,7 +2713,6 @@ public QuantizedInstanceNorm quantizedInstanceNorm(Operan /** * Produces the max pool of the input tensor for quantized types. * - * @param data type for {@code output} output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -2825,7 +2733,6 @@ public QuantizedMaxPool quantizedMaxPool(Operand input /** * Computes Quantized Rectified Linear: {@code max(features, 0)} * - * @param data type for {@code activations} output * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. @@ -2841,7 +2748,6 @@ public QuantizedRelu quantizedRelu(Operand data type for {@code activations} output * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. @@ -2857,7 +2763,6 @@ public QuantizedRelu6 quantizedRelu6(Operand data type for {@code activations} output * @param features The features value * @param maxValue The maxValue value * @param minFeatures The float value that the lowest quantized value represents. @@ -2885,7 +2790,6 @@ public QuantizedReluX quantizedReluX(Operand * * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Relu} output and operands * @return a new instance of Relu @@ -2897,7 +2801,6 @@ public Relu relu(Operand features) { /** * Computes rectified linear 6: {@code min(max(features, 0), 6)}. * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Relu6} output and operands * @return a new instance of Relu6 @@ -2909,7 +2812,6 @@ public Relu6 relu6(Operand features) { /** * Computes rectified linear 6 gradients for a Relu6 operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding Relu6 operation. * @param features The features passed as input to the corresponding Relu6 operation, or * its output; using either one produces the same result. @@ -2923,7 +2825,6 @@ public Relu6Grad relu6Grad(Operand gradients, Operand< /** * Computes rectified linear gradients for a Relu operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding Relu operation. * @param features The features passed as input to the corresponding Relu operation, OR * the outputs of that operation (both work equivalently). @@ -2942,7 +2843,6 @@ public ReluGrad reluGrad(Operand gradients, Operand * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. *

See Self-Normalizing Neural Networks * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Selu} output and operands * @return a new instance of Selu @@ -2954,7 +2854,6 @@ public Selu selu(Operand features) { /** * Computes gradients for the scaled exponential linear (Selu) operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding Selu operation. * @param outputs The outputs of the corresponding Selu operation. * @param data type for {@code SeluGrad} output and operands @@ -2971,7 +2870,6 @@ public SeluGrad seluGrad(Operand gradients, Operand * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * * - * @param data type for {@code softmax} output * @param logits 2-D with shape {@code [batch_size, num_classes]}. * @param data type for {@code Softmax} output and operands * @return a new instance of Softmax @@ -2984,7 +2882,6 @@ public Softmax softmax(Operand logits) { * Computes softmax cross entropy cost and gradients to backpropagate. * Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid @@ -3000,7 +2897,6 @@ public SoftmaxCrossEntropyWithLogits softmaxCrossEntropyW /** * Computes softsign: {@code features / (abs(features) + 1)}. * - * @param data type for {@code activations} output * @param features The features value * @param data type for {@code Softsign} output and operands * @return a new instance of Softsign @@ -3012,7 +2908,6 @@ public Softsign softsign(Operand features) { /** * Computes softsign gradients for a softsign operation. * - * @param data type for {@code backprops} output * @param gradients The backpropagated gradients to the corresponding softsign operation. * @param features The features passed as input to the corresponding softsign operation. * @param data type for {@code SoftsignGrad} output and operands @@ -3090,7 +2985,6 @@ public SoftsignGrad softsignGrad(Operand gradients, *

Among others, this operation is useful for reducing atrous convolution into * regular convolution. * - * @param data type for {@code output} output * @param input 4-D with shape {@code [batch, height, width, depth]}. * @param paddings 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies * the padding of the input with zeros across the spatial dimensions as follows: @@ -3182,7 +3076,6 @@ public SpaceToBatch spaceToBatch(Operand input, * [13, 14, 15, 16]]]] * * - * @param data type for {@code output} output * @param input The input value * @param blockSize The size of the spatial block. * @param options carries optional attribute values @@ -3202,7 +3095,6 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * given row. *

Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. @@ -3226,8 +3118,6 @@ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxC * *

If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values} output - * @param data type for {@code indices} output * @param input 1-D or higher with last dimension at least {@code k}. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). @@ -3252,8 +3142,6 @@ public TopK topK(Operand input, Operand *

If two elements are equal, the lower-index element appears first. * - * @param data type for {@code values} output - * @param data type for {@code indices} output * @param input 1-D or higher with last dimension at least {@code k}. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). @@ -3287,7 +3175,6 @@ public TopK topK(Operand input, *

{@code output} is also quantized, using the same formula. * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. * - * @param data type for {@code output} output * @param lhs Must be a quantized tensor, rank >= 3. * @param rhs Must be a quantized tensor, same rank as {@code lhs}. * @param lhsScales The float value(s) used as scale factors when quantizing the original data that {@code lhs} represents. @@ -3358,7 +3245,6 @@ public UniformQuantizedConvolution uni *

{@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). * - * @param data type for {@code output} output * @param lhs Must be a non-quantized Tensor of {@code Tlhs}, rank >= 3. * @param rhs Must be a quantized Tensor of {@code Trhs}, same rank as {@code lhs}. * @param rhsScales The float value(s) used as scale factors when quantizing the original data that {@code rhs} represents. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 93a6a3eb05c..765a67f27e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -407,10 +407,10 @@ public final class Ops { public final CollectiveOps collective; - public final AudioOps audio; - public final DistributeOps distribute; + public final AudioOps audio; + public final SignalOps signal; public final TrainOps train; @@ -419,10 +419,10 @@ public final class Ops { public final SummaryOps summary; - public final ImageOps image; - public final RaggedOps ragged; + public final ImageOps image; + public final ShapeOps shape; public final IoOps io; @@ -450,14 +450,14 @@ public final class Ops { bitwise = new BitwiseOps(this); debugging = new DebuggingOps(this); collective = new CollectiveOps(this); - audio = new AudioOps(this); distribute = new DistributeOps(this); + audio = new AudioOps(this); signal = new SignalOps(this); train = new TrainOps(this); quantization = new QuantizationOps(this); summary = new SummaryOps(this); - image = new ImageOps(this); ragged = new RaggedOps(this); + image = new ImageOps(this); shape = new ShapeOps(this); io = new IoOps(this); dtypes = new DtypesOps(this); @@ -618,7 +618,6 @@ public Any any(Operand input, Operand axis, Any.Option * See https://arxiv.org/abs/2206.14286 for the algorithm details. * This op is only optimized on TPU currently. * - * @param data type for {@code values} output * @param input Array to search. Must be at least 1-D of the floating type * @param k Specifies the number of min/max-k. * @param options carries optional attribute values @@ -732,7 +731,6 @@ public AssertThat assertThat(Operand condition, Iterable> data * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. May be uninitialized. * @param value The value to be assigned to the variable. * @param options carries optional attribute values @@ -749,7 +747,6 @@ public Assign assign(Operand ref, Operand value, * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param value The value to be added to the variable. * @param options carries optional attribute values @@ -780,7 +777,6 @@ public AssignAddVariableOp assignAddVariableOp(Operand resource * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param value The value to be subtracted to the variable. * @param options carries optional attribute values @@ -1027,7 +1023,6 @@ public BatchFunction batchFunction(Iterable> inTensors, * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, * followed by cropping along the {@code height} and {@code width} dimensions. * - * @param data type for {@code output} output * @param input 4-D tensor with shape * {@code [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]}. Note that the batch size of the input tensor must be divisible by * {@code block_size * block_size}. @@ -1055,7 +1050,6 @@ public BatchToSpace batchToSpace(Operand input, * optionally cropped according to {@code crops} to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. * - * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has M dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. @@ -1221,7 +1215,6 @@ public BatchToSpaceNd batchToSpaceNd(Operand input, * buffer is made on BE machines when types are of different sizes in order to get * the same casting results as on LE machines. * - * @param data type for {@code output} output * @param input The input value * @param type The value of the type attribute * @param data type for {@code Bitcast} output and operands @@ -1292,7 +1285,6 @@ public Operand booleanMaskUpdate(Operand tensor, Operand * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. * - * @param data type for {@code r0} output * @param s0 The s0 value * @param s1 The s1 value * @param data type for {@code BroadcastArgs} output and operands @@ -1307,7 +1299,6 @@ public BroadcastDynamicShape broadcastDynamicShape(Operan * Return the reduction indices for computing gradients of s0 op s1 with broadcast. * This is typically used by gradient computations for a broadcasting operation. * - * @param data type for {@code r0} output * @param s0 The s0 value * @param s1 The s1 value * @param data type for {@code BroadcastGradientArgs} output and operands @@ -1357,7 +1348,6 @@ public BroadcastGradientArgs broadcastGradientArgs(Operan * shape. (In a graph context, {@code broadcast_to} might be fused to * subsequent operation and then be optimized away, however.) * - * @param data type for {@code output} output * @param input A Tensor to broadcast. * @param shape An 1-D {@code int} Tensor. The shape of the desired output. * @param data type for {@code BroadcastTo} output and operands @@ -1458,7 +1448,6 @@ public Case caseOp(Operand branchIndex, Iterable> input, * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values * greater than {@code clip_value_max} are set to {@code clip_value_max}. * - * @param data type for {@code output} output * @param t A {@code Tensor}. * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape * as {@code t}. The minimum value to clip by. @@ -1508,7 +1497,6 @@ public CompositeTensorVariantToComponents compositeTensorVariantToComponents( /** * Concatenates tensors along one dimension. * - * @param data type for {@code output} output * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except {@code concat_dim}. * @param axis 0-D. The dimension along which to concatenate. Must be in the @@ -1538,7 +1526,6 @@ public Concat concat(Iterable> values, * *

This is typically used by gradient computations for a concat operation. * - * @param data type for {@code offset} output * @param concatDim The dimension along which to concatenate. * @param shape The {@code N} int32 or int64 vectors representing shape of tensors being concatenated. * @param data type for {@code ConcatOffset} output and operands @@ -2262,11 +2249,7 @@ public Constant constant(Class type, Shape shape, ByteDa /** * Create a constant by making an immutable copy of {@code tensor}. {@code tensor} may be closed - * afterwards without issue. - * - *

Note: this endpoint cannot be simply called {@code constant} since it will conflict with - * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, - * FloatNdArray)}}. + * afterward without issue. * * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` @@ -2318,7 +2301,6 @@ public ControlTrigger controlTrigger() { /** * The CopyToMesh operation * - * @param data type for {@code output} output * @param input The input value * @param mesh The value of the mesh attribute * @param data type for {@code CopyToMesh} output and operands @@ -2331,7 +2313,6 @@ public CopyToMesh copyToMesh(Operand input, String mesh) /** * The CopyToMeshGrad operation * - * @param data type for {@code output} output * @param input The input value * @param forwardInput The forwardInput value * @param data type for {@code CopyToMeshGrad} output and operands @@ -2345,7 +2326,6 @@ public CopyToMeshGrad copyToMeshGrad(Operand input, /** * Increments 'ref' until it reaches 'limit'. * - * @param data type for {@code output} output * @param ref Should be from a scalar {@code Variable} node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. @@ -2440,7 +2420,6 @@ public DecodeProto decodeProto(Operand bytes, String messageType, /** * Makes a copy of {@code x}. * - * @param data type for {@code y} output * @param x The source tensor of type {@code T}. * @param data type for {@code DeepCopy} output and operands * @return a new instance of DeepCopy @@ -2482,7 +2461,6 @@ public DestroyResourceOp destroyResourceOp(Operand resource, * using control dependencies. *

Outputs the final value of the tensor pointed to by 'ref'. * - * @param data type for {@code value} output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching * 'TemporaryVariable' op. @@ -2560,7 +2538,6 @@ public DummyMemoryCache dummyMemoryCache() { * * * - * @param data type for {@code outputs} output * @param data The data value * @param partitions Any shape. Indices in the range {@code [0, num_partitions)}. * @param numPartitions The number of partitions to output. @@ -2628,7 +2605,6 @@ public DynamicPartition dynamicPartition(Operand data, * * * - * @param data type for {@code merged} output * @param indices The indices value * @param data The data value * @param data type for {@code DynamicStitch} output and operands @@ -2672,7 +2648,6 @@ public EditDistance editDistance(Operand hypothesisInd * Creates a tensor with the given shape. *

This operation creates a tensor of {@code shape} and {@code dtype}. * - * @param data type for {@code output} output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype The value of the dtype attribute * @param options carries optional attribute values @@ -2778,7 +2753,6 @@ public EncodeProto encodeProto(Operand sizes, Iterable> value * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. * - * @param data type for {@code output} output * @param input A tensor, whose shape is to be validated. * @param shape The expected (possibly partially specified) shape of the input tensor. * @param data type for {@code EnsureShape} output and operands @@ -2796,7 +2770,6 @@ public EnsureShape ensureShape(Operand input, Shape shap * it may be changed in the child frame. At most {@code parallel_iterations} iterations * are run in parallel in the child frame. * - * @param data type for {@code output} output * @param data The tensor to be made available to the child frame. * @param frameName The name of the child frame. * @param options carries optional attribute values @@ -2812,7 +2785,6 @@ public Enter enter(Operand data, String frameName, * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. * - * @param data type for {@code output} output * @param data The tensor to be made available to the parent frame. * @param data type for {@code Exit} output and operands * @return a new instance of Exit @@ -2847,7 +2819,6 @@ public Exit exit(Operand data) { *

This operation is related to {@code squeeze()}, which removes dimensions of * size 1. * - * @param data type for {@code output} output * @param input The input value * @param axis 0-D (scalar). Specifies the dimension index at which to * expand the shape of {@code input}. Must be in the range @@ -2863,7 +2834,6 @@ public ExpandDims expandDims(Operand input, /** * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output dimension. 3D extension of {@code extract_image_patches}. * - * @param data type for {@code patches} output * @param input 5-D Tensor with shape {@code [batch, in_planes, in_rows, in_cols, depth]}. * @param ksizes The size of the sliding window for each dimension of {@code input}. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in @@ -2888,7 +2858,6 @@ public ExtractVolumePatches extractVolumePatches(Operand< * function input) or guaranteed not to be used (e.g. if mirroring an * intermediate output needed for the gradient computation of the other branch). * - * @param data type for {@code output} output * @param dtype The type of the output. * @param shape

    *  The purported shape of the output. This is only used for shape inference;
@@ -2934,7 +2903,6 @@ public FileSystemSetConfiguration fileSystemSetConfiguration(Operand sc
    *  based on other runtime Tensors, unlike {@code tf.constant}.
    *  
    *
-   * @param  data type for {@code output} output
    * @param dims 1-D. Represents the shape of the output tensor.
    * @param value 0-D (scalar). Value to fill the returned tensor.
    *  

{@literal @}compatibility(numpy)
@@ -3030,7 +2998,6 @@ public For forOp(Operand start, Operand limit, Operand d * corresponding output value. *

See also {@code tf.batch_gather} and {@code tf.gather_nd}. * - * @param data type for {@code output} output * @param params The tensor from which to gather values. Must be at least rank * {@code axis + 1}. * @param indices Index tensor. Must be in range {@code [0, params.shape[axis])}. @@ -3137,7 +3104,6 @@ public Gather gather(Operand params, Operand *

See also {@code tf.gather} and {@code tf.batch_gather}. * - * @param data type for {@code output} output * @param params The tensor from which to gather values. * @param indices Index tensor. * @param data type for {@code GatherNd} output and operands @@ -3185,7 +3151,6 @@ public GetSessionHandle getSessionHandle(Operand value) { /** * Get the value of the tensor specified by its handle. * - * @param data type for {@code value} output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. * @param data type for {@code GetSessionTensor} output and operands @@ -3250,7 +3215,6 @@ public Gradients gradients(Iterable> y, IterableReturns the input tensor without modification. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code GuaranteeConst} output and operands * @return a new instance of GuaranteeConst @@ -3294,7 +3258,6 @@ public HashTable hashTable(Class keyDtype, * sess.run(hist) => [2, 1, 1, 0, 2] *

* - * @param data type for {@code out} output * @param values Numeric {@code Tensor}. * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. * values <= value_range[0] will be mapped to hist[0], @@ -3325,7 +3288,6 @@ public HistogramFixedWidth histogramFixedWidth(Opera * sess.run(hist) => [2, 1, 1, 0, 2] * * - * @param data type for {@code out} output * @param values Numeric {@code Tensor}. * @param valueRange Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. * values <= value_range[0] will be mapped to hist[0], @@ -3344,7 +3306,6 @@ public HistogramFixedWidth histogramFi /** * Returns a constant tensor on the host. Only for writing C++ tests. * - * @param data type for {@code output} output * @param value Attr {@code value} is the tensor to return. * @param dtype The value of the dtype attribute * @param data type for {@code HostConst} output and operands @@ -3357,7 +3318,6 @@ public HostConst hostConst(Tensor value, Class dtype) { /** * Return a tensor with the same shape and contents as the input tensor or value. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Identity} output and operands * @return a new instance of Identity @@ -3425,7 +3385,6 @@ public If ifOp(Operand cond, Iterable> input, * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. * - * @param data type for {@code tensor} output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see @@ -3485,7 +3444,6 @@ public InitializeTableFromTextFile initializeTableFromTextFile( * Computes y = x; y[i, :] += v; return y. * * - * @param data type for {@code y} output * @param x A {@code Tensor} of type T. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3503,7 +3461,6 @@ public InplaceAdd inplaceAdd(Operand x, Operand * Computes y = x; y[i, :] -= v; return y. * * - * @param data type for {@code y} output * @param x A {@code Tensor} of type T. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3520,7 +3477,6 @@ public InplaceSub inplaceSub(Operand x, Operand *

Originally this function is mutative however for compilation we make this * operation create / operate on a copy of {@code x}. * - * @param data type for {@code y} output * @param x A tensor of type {@code T}. * @param i A vector. Indices into the left-most dimension of {@code x}. * @param v A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. @@ -3578,7 +3534,6 @@ public KthOrderStatistic kthOrderStatistic(Operand input, Long k) { * tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] * * - * @param data type for {@code output} output * @param start 0-D tensor. First entry in the range. * @param stop 0-D tensor. Last entry in the range. * @param num 0-D tensor. Number of values to generate. @@ -3593,8 +3548,6 @@ public LinSpace linSpace(Operand start, Operand sto /** * Outputs all keys and values in the table. * - * @param data type for {@code keys} output - * @param data type for {@code values} output * @param tableHandle Handle to the table. * @param Tkeys The value of the Tkeys attribute * @param Tvalues The value of the Tvalues attribute @@ -3614,7 +3567,6 @@ public LookupTableExport lookupTableExp *

The scalar {@code default_value} is the value output for keys not present in the * table. It must also be of the same type as the table values. * - * @param data type for {@code values} output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param defaultValue The defaultValue value @@ -3708,7 +3660,6 @@ public LoopCond loopCond(Operand input) { *

result == [[1, 2, 2], * [0, 1, 5]] * - * @param data type for {@code output} output * @param sortedInputs 2-D Tensor where each row is ordered. * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains * the values that will be searched for in {@code sorted_search_values}. @@ -3736,7 +3687,6 @@ public LowerBound lowerBound(Operand sortedInputs, *

result == [[1, 2, 2], * [0, 1, 5]] * - * @param data type for {@code output} output * @param sortedInputs 2-D Tensor where each row is ordered. * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains * the values that will be searched for in {@code sorted_search_values}. @@ -3901,7 +3851,6 @@ public MapUnstageNoKey mapUnstageNoKey(Operand indices, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -3921,7 +3870,6 @@ public Max max(Operand input, Operand{@code Merge} forwards the first tensor to become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. * - * @param data type for {@code output} output * @param inputs The input tensors, exactly one of which will become available. * @param data type for {@code Merge} output and operands * @return a new instance of Merge @@ -3937,7 +3885,6 @@ public Merge merge(Iterable> inputs) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -3974,7 +3921,6 @@ public Min min(Operand input, Operand * - * @param data type for {@code output} output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. @@ -4008,7 +3954,6 @@ public MirrorPad mirrorPad(Operand input, * [11, 28]] * * - * @param data type for {@code output} output * @param input The input tensor to be folded. * @param paddings A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. @@ -4187,7 +4132,6 @@ public MutexLock mutexLock(Operand mutex) { * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. * - * @param data type for {@code data} output * @deprecated use {@link org.tensorflow.op.distribute.NcclAllReduce} instead * @param input The input value * @param reduction The value of the reduction attribute @@ -4211,7 +4155,6 @@ public NcclAllReduce ncclAllReduce(Operand input, Stri * output: The same as input. * shape: The shape of the input tensor. * - * @param data type for {@code output} output * @deprecated use {@link org.tensorflow.op.distribute.NcclBroadcast} instead * @param input The input value * @param shape The value of the shape attribute @@ -4232,7 +4175,6 @@ public NcclBroadcast ncclBroadcast(Operand input, Shap * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. * - * @param data type for {@code data} output * @deprecated use {@link org.tensorflow.op.distribute.NcclReduce} instead * @param input The input value * @param reduction The value of the reduction attribute @@ -4248,7 +4190,6 @@ public NcclReduce ncclReduce(Iterable> input, /** * Makes its input available to the next iteration. * - * @param data type for {@code output} output * @param data The tensor to be made available to the next iteration. * @param data type for {@code NextIteration} output and operands * @return a new instance of NextIteration @@ -4343,7 +4284,6 @@ public NoOp noOp() { * ] * * - * @param data type for {@code output} output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. * @param onValue A scalar defining the value to fill in output when {@code indices[j] = i}. @@ -4372,7 +4312,6 @@ public Ones ones(Operand dims, Class /** * Returns a tensor of ones with the same shape and type as x. * - * @param data type for {@code y} output * @param x a tensor of type T. * @param data type for {@code OnesLike} output and operands * @return a new instance of OnesLike @@ -4506,7 +4445,6 @@ public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, * [0, 0, 0, 0, 0, 0]] * * - * @param data type for {@code output} output * @param input The input value * @param paddings The paddings value * @param constantValues The constantValues value @@ -4534,7 +4472,6 @@ public Pad pad(Operand input, Operand * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. * - * @param data type for {@code output} output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. * @param shape the final shape of the result; should be equal to the shapes of any input @@ -4602,7 +4539,6 @@ public ParallelConcat parallelConcat(Iterable> v * * * - * @param data type for {@code merged} output * @param indices The indices value * @param data The data value * @param data type for {@code ParallelDynamicStitch} output and operands @@ -4641,7 +4577,6 @@ public PartitionedCall partitionedCall(Iterable> args, * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param data type for {@code output} output * @param dtype The type of elements in the tensor. * @param options carries optional attribute values * @param data type for {@code Placeholder} output and operands @@ -4655,7 +4590,6 @@ public Placeholder placeholder(Class dtype, /** * A placeholder op that passes through {@code input} when its output is not fed. * - * @param data type for {@code output} output * @param input The default value to produce when {@code output} is not fed. * @param shape The (possibly partial) shape of the tensor. * @param data type for {@code PlaceholderWithDefault} output and operands @@ -4685,7 +4619,6 @@ public Print print(Operand input, Print.Options... options) { * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4701,7 +4634,6 @@ public Prod prod(Operand input, Operand data type for {@code output} output * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param inputMin The minimum value of the input. @@ -4721,7 +4653,6 @@ public QuantizedReshape quantizedReshape(Operand tensor, * first dimension must match. *

The outputs are deterministic. * - * @param data type for {@code output} output * @param index A scalar tensor or a vector of dtype {@code dtype}. The index (or indices) to be shuffled. Must be within [0, max_index]. * @param seed A tensor of dtype {@code Tseed} and shape [3] or [n, 3]. The random seed. * @param maxIndex A scalar tensor or vector of dtype {@code dtype}. The upper bound(s) of the interval (inclusive). @@ -4746,7 +4677,6 @@ public RandomIndexShuffle randomIndexShuffle(Operand i * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * * - * @param data type for {@code output} output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. * @param delta 0-D (scalar). Optional. Default is 1. Number that increments {@code start}. @@ -4785,7 +4715,6 @@ public Rank rank(Operand input) { * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param data type for {@code value} output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. * @param data type for {@code ReadVariableOp} output and operands @@ -4799,7 +4728,6 @@ public ReadVariableOp readVariableOp(Operand data type for {@code tensor} output * @param tensorType The value of the tensorType attribute * @param tensorName The name of the tensor to receive. * @param sendDevice The name of the device sending the tensor. @@ -4857,7 +4785,6 @@ public ReduceAny reduceAny(Operand input, Operand axis * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4877,7 +4804,6 @@ public ReduceMax reduceMax(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4897,7 +4823,6 @@ public ReduceMin reduceMin(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4917,7 +4842,6 @@ public ReduceProd reduceProd(Operand input, * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -4937,7 +4861,6 @@ public ReduceSum reduceSum(Operand input, Operand data type for {@code output} output * @param data The tensor to be made available to the child frame. * @param frameName The name of the child frame. * @param options carries optional attribute values @@ -4953,7 +4876,6 @@ public RefEnter refEnter(Operand data, String frameName, * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. * - * @param data type for {@code output} output * @param data The tensor to be made available to the parent frame. * @param data type for {@code RefExit} output and operands * @return a new instance of RefExit @@ -4965,7 +4887,6 @@ public RefExit refExit(Operand data) { /** * Return the same ref tensor as the input ref tensor. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code RefIdentity} output and operands * @return a new instance of RefIdentity @@ -4981,7 +4902,6 @@ public RefIdentity refIdentity(Operand input) { *

{@code Merge} forwards the first tensor for become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. * - * @param data type for {@code output} output * @param inputs The input tensors, exactly one of which will become available. * @param data type for {@code RefMerge} output and operands * @return a new instance of RefMerge @@ -4993,7 +4913,6 @@ public RefMerge refMerge(Iterable> inputs) { /** * Makes its input available to the next iteration. * - * @param data type for {@code output} output * @param data The tensor to be made available to the next iteration. * @param data type for {@code RefNextIteration} output and operands * @return a new instance of RefNextIteration @@ -5005,7 +4924,6 @@ public RefNextIteration refNextIteration(Operand data) { /** * Forwards the {@code index}th element of {@code inputs} to {@code output}. * - * @param data type for {@code output} output * @param index A scalar that determines the input that gets selected. * @param inputs A list of ref tensors, one of which will be forwarded to {@code output}. * @param data type for {@code RefSelect} output and operands @@ -5022,7 +4940,6 @@ public RefSelect refSelect(Operand index, * the data goes to {@code output_false}. *

See also {@code Switch} and {@code Merge}. * - * @param data type for {@code output_false} output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. * @param data type for {@code RefSwitch} output and operands @@ -5035,7 +4952,6 @@ public RefSwitch refSwitch(Operand data, Operand /** * The Relayout operation * - * @param data type for {@code output} output * @param input The input value * @param layout The value of the layout attribute * @param data type for {@code Relayout} output and operands @@ -5048,7 +4964,6 @@ public Relayout relayout(Operand input, String layout) { /** * The RelayoutLike operation * - * @param data type for {@code output} output * @param input The input value * @param layoutInput The layoutInput value * @param data type for {@code RelayoutLike} output and operands @@ -5130,7 +5045,6 @@ public RemoteCall remoteCall(Operand target, Iterable> args, * reshape(t, []) ==> 7 * * - * @param data type for {@code output} output * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param data type for {@code Reshape} output and operands @@ -5143,7 +5057,6 @@ public Reshape reshape(Operand tensor, Operand data type for {@code output} output * @param resource Should be from a scalar {@code Variable} node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. @@ -5171,7 +5084,6 @@ public ResourceCountUpTo resourceCountUpTo( * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * * - * @param data type for {@code output} output * @param resource The resource value * @param indices The indices value * @param dtype The value of the dtype attribute @@ -5187,7 +5099,6 @@ public ResourceGather resourceGather(Operand data type for {@code output} output * @param resource The resource value * @param indices The indices value * @param dtype The value of the dtype attribute @@ -5633,7 +5544,6 @@ public ResourceStridedSliceAssign resourceStridedSliceAssign * [12, 13, 14, 15]]]] * * - * @param data type for {@code output} output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range * {@code [-rank(tensor), rank(tensor))}. @@ -5695,7 +5605,6 @@ public Reverse reverse(Operand tensor, Operand * - * @param data type for {@code output} output * @param input The input to reverse. * @param seqLengths 1-D with length {@code input.dims(batch_dim)} and * {@code max(seq_lengths) <= input.dims(seq_dim)} @@ -5730,7 +5639,6 @@ public ReverseSequence reverseSequence(Operand input, * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * * - * @param data type for {@code output} output * @param input The input value * @param shift Dimension must be 0-D or 1-D. {@code shift[i]} specifies the number of places by which * elements are shifted positively (towards larger indices) along the dimension @@ -5770,7 +5678,6 @@ public Roll roll(Operand input, Operand * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to add to {@code ref}. @@ -5802,7 +5709,6 @@ public ScatterAdd scatterAdd(Operand ref, * the same location, their contributions divide. *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of values that {@code ref} is divided by. @@ -5837,7 +5743,6 @@ public ScatterDiv scatterDiv(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to reduce into {@code ref}. @@ -5872,7 +5777,6 @@ public ScatterMax scatterMax(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to reduce into {@code ref}. @@ -5904,7 +5808,6 @@ public ScatterMin scatterMin(Operand ref, * the same location, their contributions multiply. *

Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to multiply to {@code ref}. @@ -5993,7 +5896,6 @@ public ScatterMul scatterMul(Operand ref, *

Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output} output * @param indices Tensor of indices. * @param updates Values to scatter into the output tensor. * @param shape 1-D. The shape of the output tensor. @@ -6035,7 +5937,6 @@ public ScatterNd scatterNd(Operand in *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -6053,7 +5954,6 @@ public ScatterNdAdd scatterNdAdd(Operand ref, /** * Computes element-wise maximum. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -6071,7 +5971,6 @@ public ScatterNdMax scatterNdMax(Operand ref, /** * Computes element-wise minimum. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -6116,7 +6015,6 @@ public ScatterNdMin scatterNdMin(Operand ref, * *

See {@code tf.scatter_nd} for more details about how to make updates to slices. * - * @param data type for {@code output} output * @param input A Tensor. * @param indices A Tensor. Must be one of the following types: {@code int32}, {@code int64}. * A tensor of indices into {@code input}. @@ -6160,7 +6058,6 @@ public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd(Oper *

See {@code tf.scatter_nd} for more details about how to make updates to * slices. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -6204,7 +6101,6 @@ public ScatterNdSub scatterNdSub(Operand ref, * slices. *

See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. * - * @param data type for {@code output_ref} output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -6240,7 +6136,6 @@ public ScatterNdUpdate scatterNdUpdate(Operand ref, * * * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to subtract from {@code ref}. @@ -6277,7 +6172,6 @@ public ScatterSub scatterSub(Operand ref, * *

See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. * - * @param data type for {@code output_ref} output * @param ref Should be from a {@code Variable} node. * @param indices A tensor of indices into the first dimension of {@code ref}. * @param updates A tensor of updated values to store in {@code ref}. @@ -6293,7 +6187,6 @@ public ScatterUpdate scatterUpdate(Operand ref, /** * The SelectV2 operation * - * @param data type for {@code output} output * @param condition The condition value * @param t The t value * @param e The e value @@ -6339,8 +6232,6 @@ public Send send(Operand tensor, String tensorName, String send * idx ==> [1, 3, 5] * * - * @param data type for {@code out} output - * @param data type for {@code idx} output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param data type for {@code ListDiff} output and operands @@ -6369,8 +6260,6 @@ public SetDiff1d setDiff1d(Operand x, Operand * idx ==> [1, 3, 5] * * - * @param data type for {@code out} output - * @param data type for {@code idx} output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param outIdx The value of the outIdx attribute @@ -6412,7 +6301,6 @@ public SetSize setSize(Operand setIndices, Operand setV * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Shape, with default output types */ @@ -6429,7 +6317,6 @@ public org.tensorflow.op.core.Shape shape(Operand input * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code Shape} output and operands @@ -6444,7 +6331,6 @@ public org.tensorflow.op.core.Shape shape(Operand data type for {@code output} output * @param input The input value * @return a new instance of ShapeN, with default output types */ @@ -6456,7 +6342,6 @@ public ShapeN shapeN(Iterable> input) { * Returns shape of tensors. * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code ShapeN} output and operands @@ -6477,7 +6362,6 @@ public ShapeN shapeN(Iterable> i * size(t) ==> 12 * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of Size, with default output types */ @@ -6495,7 +6379,6 @@ public Size size(Operand input) { * size(t) ==> 12 * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code Size} output and operands @@ -6525,7 +6408,6 @@ public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... op *

Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * @param data type for {@code output} output * @param input The input value * @param begin begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. @@ -6545,7 +6427,6 @@ public Slice slice(Operand input, Ope /** * Returns a copy of the input tensor. * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code Snapshot} output and operands * @return a new instance of Snapshot @@ -6653,7 +6534,6 @@ public Snapshot snapshot(Operand input) { *

Among others, this operation is useful for reducing atrous convolution into * regular convolution. * - * @param data type for {@code output} output * @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has {@code M} dimensions. * @param blockShape 1-D with shape {@code [M]}, all values must be >= 1. @@ -6672,7 +6552,6 @@ public SpaceToBatchNd spaceToBatchNd(Operand input, /** * Splits a tensor into {@code num_split} tensors along one dimension. * - * @param data type for {@code output} output * @param axis 0-D. The dimension along which to split. Must be in the range * {@code [-rank(value), rank(value))}. * @param value The tensor to split. @@ -6688,7 +6567,6 @@ public Split split(Operand axis, Operand value, /** * Splits a tensor into {@code num_split} tensors along one dimension. * - * @param data type for {@code output} output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split * dimension. Must sum to the dimension of value along split_dim. @@ -6721,7 +6599,6 @@ public SplitV splitV(Operand value, Operand * - * @param data type for {@code output} output * @param input The {@code input} to squeeze. * @param options carries optional attribute values * @param data type for {@code Squeeze} output and operands @@ -6749,7 +6626,6 @@ public Squeeze squeeze(Operand input, Squeeze.Options... * *

This is the opposite of {@code unpack}. * - * @param data type for {@code output} output * @param values Must be of same shape and type. * @param options carries optional attribute values * @param data type for {@code Pack} output and operands @@ -6787,7 +6663,6 @@ public StackCreate stackCreate(Operand maxSize, Class< /** * Pop the element at the top of the stack. * - * @param data type for {@code elem} output * @param handle The handle to a stack. * @param elemType The type of the elem that is popped. * @param data type for {@code StackPopV2} output and operands @@ -6801,7 +6676,6 @@ public StackPop stackPop(Operand handle, /** * Push an element onto the stack. * - * @param data type for {@code output} output * @param handle The handle to a stack. * @param elem The tensor to be pushed onto the stack. * @param options carries optional attribute values @@ -7083,7 +6957,6 @@ public StatelessWhile statelessWhile(Iterable> input, ConcreteFunctio * The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN. *

The outputs are a deterministic function of {@code input}, {@code key}, {@code counter}, {@code alg}. * - * @param data type for {@code output} output * @param input The operand to stochastically cast to int. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -7151,7 +7024,6 @@ public StochasticCastToInt stochasticCastToInt( * example generation process. * * - * @param data type for {@code output} output * @param input The input value * @param data type for {@code StopGradient} output and operands * @return a new instance of StopGradient @@ -7169,16 +7041,17 @@ public StopGradient stopGradient(Operand input) { * equal to `n`, but this need not be the case. Each range specification entry can be one of the * following: * - *

- An ellipsis (...) using {@link Indices#ellipsis()}. Ellipses are used to imply zero or - * more dimensions of full-dimension selection. For example, {@code stridedSlice(foo, - * Indices.ellipsis()} is the identity slice. + *

- An ellipsis (...) using {@link org.tensorflow.ndarray.index.Indices#ellipsis()}. Ellipses + * are used to imply zero or more dimensions of full-dimension selection. For example, {@code + * stridedSlice(foo, Indices.ellipsis()} is the identity slice. * - *

- A new axis using {@link Indices#newAxis()}. This is used to insert a new shape=1 - * dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} where {@code foo} is - * shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. + *

- A new axis using {@link org.tensorflow.ndarray.index.Indices#newAxis()}. This is used to + * insert a new shape=1 dimension. For example, `{@code stridedSlice(foo, Indices.newAxis())} + * where {@code foo} is shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. * - *

- A range {@code begin:end:stride} using {@link Indices#slice(Long, Long, long)} - * Index.slice()} or {@link Indices#all()}. This is used to specify how much to choose from a + *

- A range {@code begin:end:stride} using {@link + * org.tensorflow.ndarray.index.Indices#slice(Long, Long, long)} Index.slice()} or {@link + * org.tensorflow.ndarray.index.Indices#all()}. This is used to specify how much to choose from a * given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer which * represents the index of the first value to select while {@code end} represents the index of the * last value to select (exclusive). Begin and end can be null, in which case the index begins or @@ -7195,10 +7068,11 @@ public StopGradient stopGradient(Operand input) { * elements). For example {@code foo = [1,2,3,4]; stridedSlice(foo, Indices.slice(-2, null, -1)} * is {@code [4,3]}. * - *

- A single index using {@link Indices#at(long)}. This is used to keep only elements that - * have a given index. For example ({@code stridedSlice(foo, Indices.at(2))} on a shape {@code - * (5,6)} tensor produces a shape {@code (6,)} tensor. The dimension can be kept with size one - * using {@link Indices#at(long, boolean)}. + *

- A single index using {@link org.tensorflow.ndarray.index.Indices#at(long)}. This is used + * to keep only elements that have a given index. For example ({@code stridedSlice(foo, + * Indices.at(2))} on a shape {@code (5,6)} tensor produces a shape {@code (6,)} tensor. The + * dimension can be kept with size one using {@link org.tensorflow.ndarray.index.Indices#at(long, + * boolean)}. * *

These semantics generally follow NumPy's indexing semantics, which can be found here: https://numpy.org/doc/stable/reference/arrays.indexing.html @@ -7206,9 +7080,9 @@ public StopGradient stopGradient(Operand input) { *

Requirements: `0 != strides[i] for i in [0, m)` Only one ellipsis. * * @param data type for {@code output()} output - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSlice - * @see Indices + * @see org.tensorflow.ndarray.index.Indices */ public StridedSlice stridedSlice(Operand input, Index... indices) { return StridedSliceHelper.stridedSlice(scope, input, indices); @@ -7314,7 +7188,6 @@ public StridedSlice stridedSlice(Operand input, Index... * {@code 0 != strides[i] for i in [0, m)} * {@code ellipsis_mask must be a power of two (only one ellipsis)} * - * @param data type for {@code output} output * @param input The input value * @param begin {@code begin[k]} specifies the offset into the {@code k}th range specification. * The exact dimension this corresponds to will be determined by context. @@ -7351,9 +7224,10 @@ public StridedSlice stridedSlice(Operand * @param data type for {@code outputRef()} output * @param ref the tensor to assign to. * @param value the value to assign. - * @param indices The indices to slice. See {@link Indices}. + * @param indices The indices to slice. See {@link org.tensorflow.ndarray.index.Indices}. * @return a new instance of StridedSliceAssign - * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) + * @see org.tensorflow.op.Ops#stridedSlice(org.tensorflow.Operand, + * org.tensorflow.ndarray.index.Index...) */ public StridedSliceAssign stridedSliceAssign(Operand ref, Operand value, Index... indices) { @@ -7368,7 +7242,6 @@ public StridedSliceAssign stridedSliceAssign(Operand ref *

NOTE this op currently does not support broadcasting and so {@code value}'s * shape must be exactly the shape produced by the slice of {@code ref}. * - * @param data type for {@code output_ref} output * @param ref The ref value * @param begin The begin value * @param end The end value @@ -7395,7 +7268,6 @@ public StridedSliceAssign stridedSliceAs * {@code dy} is the input gradient to be propagated and {@code shape} is the * shape of {@code StridedSlice}'s {@code input}. * - * @param data type for {@code output} output * @param shape The shape value * @param begin The begin value * @param end The end value @@ -7419,7 +7291,6 @@ public StridedSliceGrad stridedSliceGrad * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * - * @param data type for {@code output} output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. @@ -7438,7 +7309,6 @@ public Sum sum(Operand input, Operand * the data goes to {@code output_false}. *

See also {@code RefSwitch} and {@code Merge}. * - * @param data type for {@code output_false} output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. * @param data type for {@code Switch} output and operands @@ -7473,7 +7343,6 @@ public SyncDevice syncDevice() { * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * - * @param data type for {@code ref} output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values @@ -7524,7 +7393,6 @@ public TensorArrayClose tensorArrayClose(Operand handle) { * *

All elements must have the same shape (excepting the first dimension). * - * @param data type for {@code value} output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. @@ -7541,7 +7409,6 @@ public TensorArrayConcat tensorArrayConcat(Operand data type for {@code value} output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. @@ -7622,7 +7489,6 @@ public TensorArrayGradWithShape tensorArrayGradWithShape(Operand data type for {@code value} output * @param handle The handle value * @param flowIn The flowIn value * @param dtype The value of the dtype attribute @@ -7638,7 +7504,6 @@ public TensorArrayPack tensorArrayPack(Operand han /** * Read an element from the TensorArray into output {@code value}. * - * @param data type for {@code value} output * @param handle The handle to a TensorArray. * @param index The index value * @param flowIn A float scalar that enforces proper chaining of operations. @@ -7750,7 +7615,6 @@ public TensorArrayWrite tensorArrayWrite(Operand handle, Operan * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param leadingDims The leadingDims value @@ -7783,7 +7647,6 @@ public TensorListConcatLists tensorListConcatLists( * input_handle: the list * element_shape: the shape of elements of the list * - * @param data type for {@code element_shape} output * @param inputHandle The inputHandle value * @param shapeType The value of the shapeType attribute * @param data type for {@code TensorListElementShape} output and operands @@ -7817,7 +7680,6 @@ public TensorListFromTensor tensorListFromTensor(Operand tensor * indices: The indices used to index into the list. * values: The tensor. * - * @param data type for {@code values} output * @param inputHandle The inputHandle value * @param indices The indices value * @param elementShape The elementShape value @@ -7837,7 +7699,6 @@ public TensorListGather tensorListGather( * index: the position in the list from which an element will be retrieved * item: the element at that position * - * @param data type for {@code item} output * @param inputHandle The inputHandle value * @param index The index value * @param elementShape The elementShape value @@ -7871,7 +7732,6 @@ public TensorListLength tensorListLength(Operand inputHandle) { * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param elementDtype The value of the elementDtype attribute @@ -8033,7 +7893,6 @@ public TensorListSplit tensorListSplit(Operand tensor, * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param data type for {@code tensor} output * @param inputHandle The inputHandle value * @param elementShape The elementShape value * @param elementDtype The value of the elementDtype attribute @@ -8101,7 +7960,6 @@ public TensorMapInsert tensorMapInsert(Operand inputHandle, * key: the key to be looked up * value: the value found from the given key * - * @param data type for {@code value} output * @param inputHandle The inputHandle value * @param key The key value * @param valueDtype The value of the valueDtype attribute @@ -8130,7 +7988,6 @@ public TensorMapSize tensorMapSize(Operand inputHandle) { * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param data type for {@code keys} output * @param inputHandle The inputHandle value * @param keyDtype The value of the keyDtype attribute * @param data type for {@code TensorMapStackKeys} output and operands @@ -8204,7 +8061,6 @@ public TensorMapStackKeys tensorMapStackKeys( *

Note: on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -8233,7 +8089,6 @@ public TensorScatterNdAdd tensorScatterNdAdd(Operand ten * *

Refer to {@code tf.tensor_scatter_nd_update} for more details. * - * @param data type for {@code output} output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -8248,7 +8103,6 @@ public TensorScatterNdMax tensorScatterNdMax(Operand ten /** * The TensorScatterMin operation * - * @param data type for {@code output} output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -8318,7 +8172,6 @@ public TensorScatterNdMin tensorScatterNdMin(Operand ten *

Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -8362,7 +8215,6 @@ public TensorScatterNdSub tensorScatterNdSub(Operand ten * *

For usage examples see the python tf.tensor_scatter_nd_update {@link org.tensorflow.op.Ops#tensorScatterNdUpdate} function * - * @param data type for {@code output} output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -8382,7 +8234,6 @@ public TensorScatterNdUpdate tensorScatterNdUpdate(Operand< *

NOTE this op currently does not support broadcasting and so {@code value}'s shape * must be exactly the shape produced by the slice of {@code input}. * - * @param data type for {@code output} output * @param input The input value * @param begin The begin value * @param end The end value @@ -8433,7 +8284,6 @@ public TensorStridedSliceUpdate tensorSt * * * - * @param data type for {@code output} output * @param input Can be of any rank. * @param multiples 1-D. Length must be the same as the number of dimensions in {@code input} * @param data type for {@code Tile} output and operands @@ -8522,7 +8372,6 @@ public TopKWithUnique topKWithUnique(Operand input, Long k) { * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. * - * @param data type for {@code unbatched_tensor} output * @param batchedTensor The batchedTensor value * @param batchIndex The batchIndex value * @param id The id value @@ -8552,7 +8401,6 @@ public Unbatch unbatch(Operand batchedTensor, Operand data type for {@code batched_grad} output * @param originalInput The originalInput value * @param batchIndex The batchIndex value * @param grad The grad value @@ -8573,7 +8421,6 @@ public UnbatchGrad unbatchGrad(Operand originalInput, * If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. * Otherwise (per-channel quantized), the clipping is also done per-channel. * - * @param data type for {@code output} output * @param operand Must be a Tensor of T. * @param min The min value(s) to clip operand. Must be a Tensor of T. * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization). @@ -8635,8 +8482,6 @@ public UniformQuantizedClipByValue uniformQuantizedClipBy * idx ==> [0, 1, 1] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -8686,8 +8531,6 @@ public Unique unique(Operand x, Operand * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -8744,8 +8587,6 @@ public Unique unique(Operand x, * count ==> [1, 2] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -8800,8 +8641,6 @@ public UniqueWithCounts uniqueWithCounts(Operand * count ==> [1, 2] * * - * @param data type for {@code y} output - * @param data type for {@code idx} output * @param x A {@code Tensor}. * @param axis A {@code Tensor} of type {@code int32} (default: None). The axis of the Tensor to * find the unique elements. @@ -8835,7 +8674,6 @@ public UniqueWithCounts uniqueWithCou * Equivalent to np.unravel_index *
{@literal @}end_compatibility * - * @param data type for {@code output} output * @param indices An 0-D or 1-D {@code int} Tensor whose elements are indices into the * flattened version of an array of dimensions dims. * @param dims An 1-D {@code int} Tensor. The shape of the array to use for unraveling @@ -8859,7 +8697,6 @@ public UnravelIndex unravelIndex(Operand indices, Oper * Etc. *

This is the opposite of {@code pack}. * - * @param data type for {@code output} output * @param value 1-D or higher, with {@code axis} dimension size equal to {@code num}. * @param num The value of the num attribute * @param options carries optional attribute values @@ -8900,7 +8737,6 @@ public Unstage unstage(List> dtypes, Unstage.Options... o *

result == [[1, 2, 4], * [0, 2, 5]] * - * @param data type for {@code output} output * @param sortedInputs 2-D Tensor where each row is ordered. * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains * the values that will be searched for in {@code sorted_search_values}. @@ -8928,7 +8764,6 @@ public UpperBound upperBound(Operand sortedInputs, *

result == [[1, 2, 4], * [0, 2, 5]] * - * @param data type for {@code output} output * @param sortedInputs 2-D Tensor where each row is ordered. * @param values 2-D Tensor with the same numbers of rows as {@code sorted_search_values}. Contains * the values that will be searched for in {@code sorted_search_values}. @@ -8988,7 +8823,6 @@ public Variable variable(Operand init, Variable.Options. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param data type for {@code ref} output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values @@ -9009,7 +8843,6 @@ public Variable variable(Shape shape, Class dtype, * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @return a new instance of VariableShape, with default output types */ @@ -9026,7 +8859,6 @@ public VariableShape variableShape(Operand input) { * shape(t) ==> [2, 2, 3] * * - * @param data type for {@code output} output * @param input The input value * @param outType The value of the outType attribute * @param data type for {@code VariableShape} output and operands @@ -9147,7 +8979,6 @@ public Zeros zeros(Operand dims, Class data type for {@code y} output * @param x a tensor of type T. * @param data type for {@code ZerosLike} output and operands * @return a new instance of ZerosLike diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java index 99f3648ea27..c01f6462dac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/QuantizationOps.java @@ -107,7 +107,6 @@ public final class QuantizationOps { * max_range / max_expected_T); * * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -165,7 +164,6 @@ public Dequantize dequantize(Operand input, * max_range / max_expected_T); * * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -456,7 +454,6 @@ public FakeQuantWithMinMaxVarsPerChannelGradient fakeQuantWithMinMaxVarsPerChann * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param data type for {@code output} output * @param input The input value * @param minRange The minimum value of the quantization range. This value may be adjusted by the * op depending on other parameters. The adjusted value is written to {@code output_min}. @@ -482,7 +479,6 @@ public Quantize quantize(Operand input, * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The inputMin value * @param inputMax The inputMax value @@ -502,7 +498,6 @@ public QuantizeAndDequantize quantizeAndDequantize(Operan * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The inputMin value * @param inputMax The inputMax value @@ -522,7 +517,6 @@ public QuantizeAndDequantizeV3 quantizeAndDequantizeV3(Op * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. * - * @param data type for {@code output} output * @param input Tensor to quantize and then dequantize. * @param inputMin If {@code range_given == True}, this specifies the minimum input value that needs to * be represented, otherwise it is determined from the min value of the {@code input} @@ -544,7 +538,6 @@ public QuantizeAndDequantizeV4 quantizeAndDequantizeV4(Op * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. * - * @param data type for {@code input_backprop} output * @param gradients The gradients value * @param input The input value * @param inputMin The inputMin value @@ -581,7 +574,6 @@ public QuantizeAndDequantizeV4Grad quantizeAndDequantizeV * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -598,7 +590,6 @@ public QuantizeDownAndShrinkRange quantizeDownAndShrinkRa /** * Concatenates quantized tensors along one dimension. * - * @param data type for {@code output} output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). * @param values The {@code N} Tensors to concatenate. Their ranks and types must match, @@ -617,7 +608,6 @@ public QuantizedConcat quantizedConcat(Operand conc /** * The QuantizedMatMulWithBiasAndDequantize operation * - * @param data type for {@code out} output * @param a The a value * @param b The b value * @param bias The bias value @@ -644,7 +634,6 @@ public QuantizedMatMulWithBiasAndDequantize quantizedMatM /** * The QuantizedMatMulWithBiasAndRequantize operation * - * @param data type for {@code out} output * @param a The a value * @param b The b value * @param bias The bias value @@ -694,7 +683,6 @@ public RequantizationRange requantizationRange(Operand input, * {@code input_max} is 1.0f, and we are dealing with {@code quint16} quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param data type for {@code output} output * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -715,7 +703,6 @@ public Requantize requantize(Operand i * Given quantized {@code input} which was quantized using {@code scales} and {@code zero_points}, performs dequantization using the formula: * dequantized_data = (quantized_data - zero_point) * scale. * - * @param data type for {@code output} output * @param input Must be a Tensor of Tin. * @param scales The float value(s) used as scale(s) when quantizing original data that input represents. * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). @@ -746,7 +733,6 @@ public UniformDequantize uniformDequantize( * Given {@code input}, {@code scales} and {@code zero_points}, performs quantization using the formula: * quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point * - * @param data type for {@code output} output * @param input Must be a Tensor of Tin. * @param scales The float value(s) to use as scale(s) to quantize {@code input}. * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). @@ -780,7 +766,6 @@ public UniformQuantize uniformQuantize(Operand data type for {@code output} output * @param lhs Must be a 2D Tensor of Tin. * @param rhs Must be a 2D Tensor of Tin. * @param lhsScales The float value(s) used as scale when quantizing original data that lhs represents. @@ -833,7 +818,6 @@ public UniformQuantizedDot uniformQuan * {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). * - * @param data type for {@code output} output * @param lhs Must be a 2D Tensor of Tlhs. * @param rhs Must be a 2D Tensor of Trhs. * @param rhsScales The float value(s) used as scale when quantizing original data that rhs represents. @@ -873,7 +857,6 @@ public UniformQuantizedDotHybrid uniformQuantizedDotHybri * i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal. * * - * @param data type for {@code output} output * @param input Must be a Tensor of Tin. * @param inputScales The float value(s) used as scale(s) when quantizing original data that {@code input} represents. * Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization). diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java index 83bf63f461f..43b18f0cf57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RaggedOps.java @@ -60,7 +60,6 @@ public final class RaggedOps { * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param splits 1D int64 {@code Tensor}. * @param values 2D int {@code Tensor}. * @param sizeOutput non-negative int scalar {@code Tensor}. @@ -82,7 +81,6 @@ public RaggedBincount raggedBincount( * Performs sparse-output bin counting for a ragged tensor input. * Counts the number of times each value occurs in the input. * - * @param data type for {@code output_values} output * @param splits Tensor containing the row splits of the ragged tensor to count. * @param values Tensor containing values of the sparse tensor to count. * @param weights A Tensor of the same shape as indices containing per-index weight values. @@ -102,8 +100,6 @@ public RaggedCountSparseOutput raggedCountSparseOutput( * Generates a feature cross from a list of tensors, and returns it as a * RaggedTensor. See {@code tf.ragged.cross} for more details. * - * @param data type for {@code output_values} output - * @param data type for {@code output_row_splits} output * @param raggedValues The values tensor for each RaggedTensor input. * @param raggedRowSplits The row_splits tensor for each RaggedTensor input. * @param sparseIndices The indices tensor for each SparseTensor input. @@ -135,7 +131,6 @@ public RaggedCross raggedCross( /** * The RaggedFillEmptyRows operation * - * @param data type for {@code output_values} output * @param valueRowids The valueRowids value * @param values The values value * @param nrows The nrows value @@ -151,7 +146,6 @@ public RaggedFillEmptyRows raggedFillEmptyRows(Operand data type for {@code d_values} output * @param reverseIndexMap The reverseIndexMap value * @param gradValues The gradValues value * @param data type for {@code RaggedFillEmptyRowsGrad} output and operands @@ -183,8 +177,6 @@ public RaggedFillEmptyRowsGrad raggedFillEmptyRowsGrad( *

(Note: This c++ op is used to implement the higher-level python * {@code tf.ragged.gather} op, which also supports ragged indices.) * - * @param data type for {@code output_nested_splits} output - * @param data type for {@code output_dense_values} output * @param paramsNestedSplits The {@code nested_row_splits} tensors that define the row-partitioning for the * {@code params} RaggedTensor input. * @param paramsDenseValues The {@code flat_values} for the {@code params} RaggedTensor. There was a terminology change @@ -221,8 +213,6 @@ public RaggedGather raggedGather( * The vector inputs must all have the same size. Scalar inputs are broadcast * to match the size of the vector inputs. * - * @param data type for {@code rt_nested_splits} output - * @param data type for {@code rt_dense_values} output * @param starts The starts of each range. * @param limits The limits of each range. * @param deltas The deltas of each range. @@ -250,8 +240,6 @@ public RaggedRange raggedRange(Operand starts, * The vector inputs must all have the same size. Scalar inputs are broadcast * to match the size of the vector inputs. * - * @param data type for {@code rt_nested_splits} output - * @param data type for {@code rt_dense_values} output * @param starts The starts of each range. * @param limits The limits of each range. * @param deltas The deltas of each range. @@ -279,8 +267,6 @@ public RaggedRange raggedRange(Oper * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See * {@code RaggedTensorToVariant} for the corresponding encoding logic. * - * @param data type for {@code output_nested_splits} output - * @param data type for {@code output_dense_values} output * @param encodedRagged A {@code variant} Tensor containing encoded {@code RaggedTensor}s. * @param inputRaggedRank The ragged rank of each encoded {@code RaggedTensor} component in the input. If set to * -1, this is inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)} @@ -310,8 +296,6 @@ public RaggedTensorFromVariant raggedTensorFromVari * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See * {@code RaggedTensorToVariant} for the corresponding encoding logic. * - * @param data type for {@code output_nested_splits} output - * @param data type for {@code output_dense_values} output * @param encodedRagged A {@code variant} Tensor containing encoded {@code RaggedTensor}s. * @param inputRaggedRank The ragged rank of each encoded {@code RaggedTensor} component in the input. If set to * -1, this is inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)} @@ -335,7 +319,6 @@ public RaggedTensorFromVariant ragged * output=SparseTensor(indices=sparse_indices, values=sparse_values, * dense_shape=sparse_dense_shape) * - * @param data type for {@code sparse_values} output * @param rtNestedSplits The {@code row_splits} for the {@code RaggedTensor}. * @param rtDenseValues The {@code flat_values} for the {@code RaggedTensor}. * @param data type for {@code RaggedTensorToSparse} output and operands @@ -365,7 +348,6 @@ public RaggedTensorToSparse raggedTensorToSparse( * is preceded by "FIRST_DIM_SIZE". * * - * @param data type for {@code result} output * @param shape The desired shape of the output tensor. If left unspecified (empty), * the minimal shape required to contain all the elements in the ragged tensor * (the natural shape) will be used. If some dimensions are left unspecified, then @@ -438,7 +420,6 @@ public RaggedTensorToVariant raggedTensorToVariant( * the outer row-splits and the shape of the dense-values that were provided as * inputs to the RaggedTensorToVariant op. * - * @param data type for {@code dense_values_grad} output * @param encodedRaggedGrad A {@code variant} Tensor containing encoded {@code RaggedTensor} gradients. * @param rowSplits Outermost row-splits that were used as input to the RaggedTensorToVariant op. * @param denseValuesShape Shape of the dense_values that was used as an input to the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java index 09a2b385b6f..34d3585f270 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomExperimentalOps.java @@ -49,7 +49,6 @@ public final class RandomExperimentalOps { * *

The outputs are a deterministic function of {@code value}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param value The tensor to be shuffled. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java index 3c62a3b57a1..c5ff9a489a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/RandomOps.java @@ -203,7 +203,6 @@ public LogUniformCandidateSampler logUniformCandidateSampler(Operand tru /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -218,7 +217,6 @@ public Multinomial multinomial(Operand logits, /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -236,7 +234,6 @@ public Multinomial multinomial(Operand * Non-deterministically generates some integers. * This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @return a new instance of NonDeterministicInts, with default output types */ @@ -248,7 +245,6 @@ public NonDeterministicInts nonDeterministicInts(Operand data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param data type for {@code NonDeterministicInts} output and operands @@ -264,7 +260,6 @@ public NonDeterministicInts nonDeterministicInts( * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. @@ -287,7 +282,6 @@ public ParameterizedTruncatedNormal parameterizedTruncate * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. * @param alpha A tensor in which each scalar is a "shape" parameter describing the @@ -304,7 +298,6 @@ public RandomGamma randomGamma(Operand /** * Computes the derivative of a Gamma random sample w.r.t. {@code alpha}. * - * @param data type for {@code output} output * @param alpha The alpha value * @param sample The sample value * @param data type for {@code RandomGammaGrad} output and operands @@ -326,7 +319,6 @@ public RandomGammaGrad randomGammaGrad(Operand alpha, * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the @@ -350,7 +342,6 @@ public RandomPoisson randomPoisson(Operand shape, * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param data type for {@code output} output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the @@ -376,7 +367,6 @@ public RandomPoisson randomPoisson(Operand * - * @param data type for {@code output} output * @param value The tensor to be shuffled. * @param options carries optional attribute values * @param data type for {@code RandomShuffle} output and operands @@ -391,7 +381,6 @@ public RandomShuffle randomShuffle(Operand value, * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values @@ -408,7 +397,6 @@ public RandomStandardNormal randomStandardNormal( * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values @@ -429,7 +417,6 @@ public RandomUniform randomUniform(Operand data type for {@code output} output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. * @param maxval 0-D. Exclusive upper bound on the generated integers. @@ -491,7 +478,6 @@ public RngSkip rngSkip(Operand resource, Operand algori /** * The StatefulRandomBinomial operation * - * @param data type for {@code output} output * @param resource The resource value * @param algorithm The algorithm value * @param shape The shape value @@ -509,7 +495,6 @@ public StatefulRandomBinomial statefulRandomBinomial /** * The StatefulRandomBinomial operation * - * @param data type for {@code output} output * @param resource The resource value * @param algorithm The algorithm value * @param shape The shape value @@ -530,7 +515,6 @@ public StatefulRandomBinomial stateful * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -545,7 +529,6 @@ public StatefulStandardNormal statefulStandardNormal(Operand data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -565,7 +548,6 @@ public StatefulStandardNormal statefulStandardNormal( * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -583,7 +565,6 @@ public StatefulTruncatedNormal statefulTruncatedNormal( * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -602,7 +583,6 @@ public StatefulTruncatedNormal statefulTruncatedNormal( * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -618,7 +598,6 @@ public StatefulUniform statefulUniform(Operand resour * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -635,7 +614,6 @@ public StatefulUniform statefulUniform(Operand data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -658,7 +636,6 @@ public StatefulUniformFullInt statefulUniformFullInt( * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). * - * @param data type for {@code output} output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -676,7 +653,6 @@ public StatefulUniformInt statefulUniformInt( /** * Draws samples from a multinomial distribution. * - * @param data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -691,7 +667,6 @@ public StatelessMultinomial statelessMultinomial(Operand data type for {@code output} output * @param logits 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. @@ -709,7 +684,6 @@ public StatelessMultinomial statelessMultinomial( /** * The StatelessParameterizedTruncatedNormal operation * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param means The mean parameter of each batch. @@ -731,7 +705,6 @@ public StatelessParameterizedTruncatedNormal statelessPar * Outputs random values from a binomial distribution. *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param counts The counts of the binomial distribution. Must be broadcastable with {@code probs}, @@ -752,7 +725,6 @@ public StatelessRandomBinomial statelessRandomBinomi * Outputs random values from a binomial distribution. *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param counts The counts of the binomial distribution. Must be broadcastable with {@code probs}, @@ -775,7 +747,6 @@ public StatelessRandomBinomial statele * Outputs random values from a gamma distribution. *

The outputs are a deterministic function of the inputs. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -830,7 +801,6 @@ public StatelessRandomGetKeyCounterAlg statelessRandomGetKeyCounterAlg( * The generated values will have mean 0 and standard deviation 1. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomNormal, with default output types @@ -845,7 +815,6 @@ public StatelessRandomNormal statelessRandomNormal(OperandThe outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -862,7 +831,6 @@ public StatelessRandomNormal statelessRandomNormal( * The generated values will have mean 0 and standard deviation 1. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -879,7 +847,6 @@ public StatelessRandomNormalV2 statelessRandomNormalV2(OperandThe outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -899,7 +866,6 @@ public StatelessRandomNormalV2 statelessRandomNormalV2( * Outputs random values from a Poisson distribution. *

The outputs are a deterministic function of {@code shape}, {@code seed}, and {@code lam}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param lam The rate of the Poisson distribution. Shape must match the rightmost dimensions @@ -920,7 +886,6 @@ public StatelessRandomPoisson statelessRandomPoisson( * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomUniform, with default output types @@ -936,7 +901,6 @@ public StatelessRandomUniform statelessRandomUniform(OperandThe outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -953,7 +917,6 @@ public StatelessRandomUniform statelessRandomUniform( * The generated values are uniform integers covering the whole range of {@code dtype}. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -970,7 +933,6 @@ public StatelessRandomUniformFullInt statelessRandomUnifo * The generated values are uniform integers covering the whole range of {@code dtype}. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -990,7 +952,6 @@ public StatelessRandomUniformFullIntV2 statelessRandomUni * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

The outputs are a deterministic function of {@code shape}, {@code seed}, {@code minval}, and {@code maxval}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param minval Minimum value (inclusive, scalar). @@ -1009,7 +970,6 @@ public StatelessRandomUniformInt statelessRandomUniformIn * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter}, {@code alg}, {@code minval} and {@code maxval}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -1031,7 +991,6 @@ public StatelessRandomUniformIntV2 statelessRandomUniform * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -1050,7 +1009,6 @@ public StatelessRandomUniformV2 statelessRandomUniformV2( * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -1072,7 +1030,6 @@ public StatelessRandomUniformV2 statelessRandomUniformV2( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessTruncatedNormal, with default output types @@ -1089,7 +1046,6 @@ public StatelessTruncatedNormal statelessTruncatedNormal( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape} and {@code seed}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. @@ -1108,7 +1064,6 @@ public StatelessTruncatedNormal statelessTruncatedNormal( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -1128,7 +1083,6 @@ public StatelessTruncatedNormalV2 statelessTruncatedNormalV2( * deviations from the mean are dropped and re-picked. *

The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param key Key for the counter-based RNG algorithm (shape uint64[1]). * @param counter Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. @@ -1176,7 +1130,6 @@ public ThreadUnsafeUnigramCandidateSampler threadUnsafeUnigramCandidateSampler( * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param data type for {@code output} output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java index c9cdae676a4..68cb802f86d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java @@ -388,7 +388,8 @@ public Operand tail(Shape shape, Class type) { * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape */ @@ -401,7 +402,8 @@ public Operand take(Shape shape, Operand n) { * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @param type the shape datatype. * @param the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -416,7 +418,8 @@ public Operand take(Shape shape, Operand n, Class Operand takeLast(Shape shape, Operand * shape. * * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be less than or equal to the shape's + * numDimensions() * @param type the shape datatype. * @param the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java index 33e2cd4d920..ac5703c264a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SignalOps.java @@ -125,7 +125,6 @@ public BatchIfft3d batchIfft3d(Operand input) { * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT} output and operands * @return a new instance of Fft @@ -139,7 +138,6 @@ public Fft fft(Operand input) { * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT2D} output and operands * @return a new instance of Fft2d @@ -153,7 +151,6 @@ public Fft2d fft2d(Operand input) { * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code FFT3D} output and operands * @return a new instance of Fft3d @@ -173,7 +170,6 @@ public Fft3d fft3d(Operand input) { *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -190,7 +186,6 @@ public FftNd fftNd(Operand input, Operand fftLen * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT} output and operands * @return a new instance of Ifft @@ -204,7 +199,6 @@ public Ifft ifft(Operand input) { * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT2D} output and operands * @return a new instance of Ifft2d @@ -218,7 +212,6 @@ public Ifft2d ifft2d(Operand input) { * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of {@code input}. * - * @param data type for {@code output} output * @param input A complex tensor. * @param data type for {@code IFFT3D} output and operands * @return a new instance of Ifft3d @@ -238,7 +231,6 @@ public Ifft3d ifft3d(Operand input) { *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -264,7 +256,6 @@ public IfftNd ifftNd(Operand input, Operand fftL * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @return a new instance of Irfft, with default output types @@ -287,7 +278,6 @@ public Irfft irfft(Operand input, Operand fft * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Treal The value of the Treal attribute @@ -314,7 +304,6 @@ public Irfft irfft(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @return a new instance of Irfft2d, with default output types @@ -338,7 +327,6 @@ public Irfft2d irfft2d(Operand input, Operand * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Treal The value of the Treal attribute @@ -365,7 +353,6 @@ public Irfft2d irfft2d(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @return a new instance of Irfft3d, with default output types @@ -389,7 +376,6 @@ public Irfft3d irfft3d(Operand input, Operand * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Treal The value of the Treal attribute @@ -413,7 +399,6 @@ public Irfft3d irfft3d(Operand input, *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -436,7 +421,6 @@ public IrfftNd irfftNd(Operand input, Operand *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. @@ -460,7 +444,6 @@ public IrfftNd irfftNd(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Tcomplex The value of the Tcomplex attribute @@ -484,7 +467,6 @@ public Rfft rfft(Operand input, Operand< * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Tcomplex The value of the Tcomplex attribute @@ -508,7 +490,6 @@ public Rfft2d rfft2d(Operand input, * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param data type for {@code output} output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Tcomplex The value of the Tcomplex attribute @@ -532,7 +513,6 @@ public Rfft3d rfft3d(Operand input, *

Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. * - * @param data type for {@code output} output * @param input A complex tensor. * @param fftLength An int32 tensor. The FFT length for each dimension. * @param axes An int32 tensor with a same shape as fft_length. Axes to perform the transform. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java index 91726a9a693..6660a42449f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/SparseOps.java @@ -155,7 +155,6 @@ public AddSparseToTensorsMap addSparseToTensorsMap(Operand sparseIndices * Performs sparse-output bin counting for a tf.tensor input. * Counts the number of times each value occurs in the input. * - * @param data type for {@code output_values} output * @param values Tensor containing data to count. * @param weights A Tensor of the same shape as indices containing per-index weight values. May * also be the empty tensor if no weights are used. @@ -179,7 +178,6 @@ public DenseCountSparseOutput denseCountSparseOutput( * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. * Dimension {@code n} contains values in a set, duplicates are allowed but ignored. * @param set2 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set1}. @@ -209,7 +207,6 @@ public DenseToDenseSetOperation denseToDenseSetOperation(Op * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1 {@code Tensor} with rank {@code n}. 1st {@code n-1} dimensions must be the same as {@code set2}. * Dimension {@code n} contains values in a set, duplicates are allowed but ignored. * @param set2Indices 2D {@code Tensor}, indices of a {@code SparseTensor}. Must be in row-major @@ -272,7 +269,6 @@ public DenseToSparseSetOperation denseToSparseSetOperation( * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param serializedSparse The serialized {@code SparseTensor} objects. The last dimension * must have 3 columns. * @param dtype The {@code dtype} of the serialized {@code SparseTensor} objects. @@ -317,7 +313,6 @@ public SparseAccumulatorApplyGradient sparseAccumulatorApplyGradient(Operand data type for {@code values} output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type @@ -344,7 +339,6 @@ public SparseAccumulatorTakeGradient sparseAccumulatorTakeG * only for a positive value. *

In the following shapes, {@code nnz} is the count after taking {@code thresh} into account. * - * @param data type for {@code sum_values} output * @param aIndices 2-D. The {@code indices} of the first {@code SparseTensor}, size {@code [nnz, ndims]} Matrix. * @param aValues 1-D. The {@code values} of the first {@code SparseTensor}, size {@code [nnz]} Vector. * @param aShape 1-D. The {@code shape} of the first {@code SparseTensor}, size {@code [ndims]} Vector. @@ -369,7 +363,6 @@ public SparseAdd sparseAdd(Operand aIndices, Operan * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. * - * @param data type for {@code a_val_grad} output * @param backpropValGrad 1-D with shape {@code [nnz(sum)]}. The gradient with respect to * the non-empty values of the sum. * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor} A, size {@code [nnz(A), ndims]}. @@ -393,7 +386,6 @@ public SparseAddGrad sparseAddGrad(Operand backpropValGr * {@code i}. *

Values in {@code arr} outside of the range [0, size) are ignored. * - * @param data type for {@code output} output * @param indices 2D int64 {@code Tensor}. * @param values 1D int {@code Tensor}. * @param denseShape 1D int64 {@code Tensor}. @@ -452,7 +444,6 @@ public SparseBincount sparseBincount( * [b c ] [ ] [b c ] * * - * @param data type for {@code output_values} output * @param indices 2-D. Indices of each input {@code SparseTensor}. * @param values 1-D. Non-empty values of each {@code SparseTensor}. * @param shapes 1-D. Shapes of each {@code SparseTensor}. @@ -490,7 +481,6 @@ public SparseConditionalAccumulator sparseConditionalAccumulat * Performs sparse-output bin counting for a sparse tensor input. * Counts the number of times each value occurs in the input. * - * @param data type for {@code output_values} output * @param indices Tensor containing the indices of the sparse tensor to count. * @param values Tensor containing values of the sparse tensor to count. * @param denseShape Tensor containing the dense shape of the sparse tensor to count. @@ -624,7 +614,6 @@ public SparseCrossHashed sparseCrossHashed(Iterable> indices, * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -643,7 +632,6 @@ public SparseDenseCwiseAdd sparseDenseCwiseAdd(OperandLimitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -665,7 +653,6 @@ public SparseDenseCwiseDiv sparseDenseCwiseDiv(OperandLimitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param spValues 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. @@ -716,7 +703,6 @@ public SparseDenseCwiseMul sparseDenseCwiseMul(Operand * - * @param data type for {@code output_values} output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. * @param denseShape 1-D. the shape of the sparse tensor. @@ -741,7 +727,6 @@ public SparseFillEmptyRows sparseFillEmptyRows(Operand data type for {@code d_values} output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. * @param data type for {@code SparseFillEmptyRowsGrad} output and operands @@ -786,7 +771,6 @@ public SparseMatMul sparseMatMul(Operand a, Operand data type for {@code output} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -815,7 +799,6 @@ public SparseReduceMax sparseReduceMax(Operand in * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -844,7 +827,6 @@ public SparseReduceMaxSparse sparseReduceMaxSparse( * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -873,7 +855,6 @@ public SparseReduceSum sparseReduceSum(Operand inpu * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -898,7 +879,6 @@ public SparseReduceSumSparse sparseReduceSumSparse( *

If the tensor has rank {@code R} and {@code N} non-empty values, {@code input_indices} has * shape {@code [N, R]}, input_values has length {@code N}, and input_shape has length {@code R}. * - * @param data type for {@code output_values} output * @param inputIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * @param inputValues 1-D. {@code N} non-empty values corresponding to {@code input_indices}. @@ -943,7 +923,6 @@ public SparseReshape sparseReshape(Operand inputIndices, Operand *

Like {@code SegmentMean}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -963,8 +942,6 @@ public SparseSegmentMean sparseSegmentMean(Operand dat * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. @@ -987,7 +964,6 @@ public SparseSegmentMeanGrad sparse * the section on segmentation * for an explanation of segments. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1007,7 +983,6 @@ public SparseSegmentMeanWithNumSegments sparseSegmentMean * N is the size of the segment being reduced. *

See {@code tf.sparse.segment_sum} for usage examples. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1027,8 +1002,6 @@ public SparseSegmentSqrtN sparseSegmentSqrtN(Operand d * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. @@ -1052,7 +1025,6 @@ public SparseSegmentSqrtNGrad spars * the section on segmentation * for an explanation of segments. * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1097,7 +1069,6 @@ public SparseSegmentSqrtNWithNumSegments sparseSegmentSqr * tf.segment_sum(c, tf.constant([0, 0, 1])) * * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1117,8 +1088,6 @@ public SparseSegmentSum sparseSegmentSum(Operand data, * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". * - * @param data type for {@code output} output - * @param data type for {@code sorted_unique_indices} output * @param grad gradient propagated to the SparseSegmentSum op. * @param indices indices passed to the corresponding SparseSegmentSum op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSum op. @@ -1160,7 +1129,6 @@ public SparseSegmentSumGrad sparseS * # [ 0 0 0 0]] * * - * @param data type for {@code output} output * @param data The data value * @param indices A 1-D tensor. Has same rank as {@code segment_ids}. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. @@ -1194,7 +1162,6 @@ public SparseSegmentSumWithNumSegments sparseSegmentSumWi * [ ] * * - * @param data type for {@code output_values} output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. @@ -1216,7 +1183,6 @@ public SparseSlice sparseSlice(Operand indices, Ope * the sliced {@code SparseTensor}, and outputs the gradients w.r.t. * the non-empty values of input {@code SparseTensor}. * - * @param data type for {@code val_grad} output * @param backpropValGrad 1-D. The gradient with respect to * the non-empty values of the sliced {@code SparseTensor}. * @param inputIndices 2-D. The {@code indices} of the input {@code SparseTensor}. @@ -1245,7 +1211,6 @@ public SparseSliceGrad sparseSliceGrad(Operand backpropV *

Hence, the {@code SparseTensor} result has exactly the same non-zero indices and * shape. * - * @param data type for {@code output} output * @param spIndices 2-D. {@code NNZ x R} matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. * @param spValues 1-D. {@code NNZ} non-empty values corresponding to {@code sp_indices}. @@ -1262,7 +1227,6 @@ public SparseSoftmax sparseSoftmax(Operand spIndi * Returns the element-wise max of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param data type for {@code output_values} output * @param aIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * @param aValues 1-D. {@code N} non-empty values corresponding to {@code a_indices}. @@ -1283,7 +1247,6 @@ public SparseSparseMaximum sparseSparseMaximum(Operand data type for {@code output_values} output * @param aIndices 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * @param aValues 1-D. {@code N} non-empty values corresponding to {@code a_indices}. @@ -1321,7 +1284,6 @@ public SparseSparseMinimum sparseSparseMinimum(Operand * - * @param data type for {@code output_values} output * @param splitDim 0-D. The dimension along which to split. Must be in the range * {@code [0, rank(shape))}. * @param indices 2-D tensor represents the indices of the sparse tensor. @@ -1342,7 +1304,6 @@ public SparseSplit sparseSplit(Operand splitDim, * Adds up a {@code SparseTensor} and a dense {@code Tensor}, producing a dense {@code Tensor}. * This Op does not require {@code a_indices} be sorted in standard lexicographic order. * - * @param data type for {@code output} output * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor}, with shape {@code [nnz, ndims]}. * @param aValues 1-D. The {@code values} of the {@code SparseTensor}, with shape {@code [nnz]}. * @param aShape 1-D. The {@code shape} of the {@code SparseTensor}, with shape {@code [ndims]}. @@ -1367,7 +1328,6 @@ public SparseTensorDenseAdd sparseTensor * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). * - * @param data type for {@code product} output * @param aIndices 2-D. The {@code indices} of the {@code SparseTensor}, size {@code [nnz, 2]} Matrix. * @param aValues 1-D. The {@code values} of the {@code SparseTensor}, size {@code [nnz]} Vector. * @param aShape 1-D. The {@code shape} of the {@code SparseTensor}, size {@code [2]} Vector. @@ -1401,7 +1361,6 @@ public SparseTensorDenseMatMul sparseTensorDenseMatMul( * contain any repeats. If {@code validate_indices} is true, these properties * are checked during execution. * - * @param data type for {@code dense} output * @param sparseIndices 0-D, 1-D, or 2-D. {@code sparse_indices[i]} contains the complete * index where {@code sparse_values[i]} will be placed. * @param outputShape 1-D. Shape of the dense output tensor. @@ -1441,7 +1400,6 @@ public SparseToDense sparseToDense( * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. * - * @param data type for {@code result_values} output * @param set1Indices 2D {@code Tensor}, indices of a {@code SparseTensor}. Must be in row-major * order. * @param set1Values 1D {@code Tensor}, values of a {@code SparseTensor}. Must be in row-major @@ -1511,7 +1469,6 @@ public SparseToSparseSetOperation sparseToSparseSetOperatio * shape = [2 50] * * - * @param data type for {@code sparse_values} output * @param sparseHandles 1-D, The {@code N} serialized {@code SparseTensor} objects. * Shape: {@code [N]}. * @param dtype The {@code dtype} of the {@code SparseTensor} objects stored in the diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java index b7d38d58553..56a82c2dbf6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/StringsOps.java @@ -260,7 +260,6 @@ public StringLength stringLength(Operand input, StringLength.Options... * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. * - * @param data type for {@code ngrams_splits} output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. @@ -510,7 +509,6 @@ public ToHashBucketStrong toHashBucketStrong(Operand input, Long numBuc * * * - * @param data type for {@code output} output * @param stringTensor The stringTensor value * @return a new instance of ToNumber, with default output types */ @@ -533,7 +531,6 @@ public ToNumber toNumber(Operand stringTensor) { * * * - * @param data type for {@code output} output * @param stringTensor The stringTensor value * @param outType The numeric type to interpret each string in {@code string_tensor} as. * @param data type for {@code StringToNumber} output and operands @@ -559,7 +556,6 @@ public ToNumber toNumber(Operand stringTensor, C * string (in row-major order). * * - * @param data type for {@code row_splits} output * @param input The text to be decoded. Can have any shape. Note that the output is flattened * to a vector of char values. * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported @@ -588,7 +584,6 @@ public UnicodeDecode unicodeDecode(Operand input, String inputE * string (in row-major order). * * - * @param data type for {@code row_splits} output * @param input The text to be decoded. Can have any shape. Note that the output is flattened * to a vector of char values. * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported @@ -623,7 +618,6 @@ public UnicodeDecode unicodeDecode(Operand input * string (in row-major order). * * - * @param data type for {@code row_splits} output * @param input The text to be decoded. Can have any shape. Note that the output is flattened * to a vector of char values. * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported @@ -656,7 +650,6 @@ public UnicodeDecodeWithOffsets unicodeDecodeWithOffsets(Operand * * - * @param data type for {@code row_splits} output * @param input The text to be decoded. Can have any shape. Note that the output is flattened * to a vector of char values. * @param inputEncoding Text encoding of the input strings. This is any of the encodings supported diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java index 59a9f973858..72356814850 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TpuOps.java @@ -158,7 +158,6 @@ public final class TpuOps { *

replica 0's output: {@code [[A], [C]]} * replica 1's output: {@code [[B], [D]]} * - * @param data type for {@code output} output * @param input The local input to the sum. * @param groupAssignment An int32 tensor with shape * [num_groups, num_replicas_per_group]. {@code group_assignment[i]} represents the @@ -365,7 +364,6 @@ public ConvertToCooTensor convertToCooTensor(Operand indicesOrRowSplits, * and {@code B, D, F, H} as group 1. Thus we get the outputs: * {@code [A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]}. * - * @param data type for {@code output} output * @param input The local input to the sum. * @param groupAssignment An int32 tensor with shape * [num_groups, num_replicas_per_group]. {@code group_assignment[i]} represents the @@ -801,7 +799,6 @@ public GlobalIterId globalIterId() { /** * A placeholder op for a value that will be fed into the computation. * - * @param data type for {@code output} output * @param dtype The type of elements in the tensor. * @param shape The shape of the tensor. * @param data type for {@code InfeedDequeue} output and operands @@ -1252,7 +1249,6 @@ public OrdinalSelector ordinalSelector() { * Retrieves a single tensor from the computation outfeed. * This operation will block indefinitely until data is available. * - * @param data type for {@code output} output * @param dtype The type of elements in the tensor. * @param shape The shape of the tensor. * @param options carries optional attribute values @@ -1302,7 +1298,6 @@ public OutfeedDequeueTupleV2 outfeedDequeueTupleV2(Operand deviceOrdinal * tensor allowing dynamic outfeed. * This operation will block indefinitely until data is available. * - * @param data type for {@code output} output * @param deviceOrdinal An int scalar tensor, representing the TPU device to use. This should be -1 when * the Op is running on a TPU device, and >= 0 when the Op is running on the CPU * device. @@ -1355,7 +1350,6 @@ public PartitionedCall partitionedCall(Iterable> args, Operand data type for {@code output} output * @param inputs A list of partitioned inputs which must have the same shape. * @param partitionDims A list of integers describing how each dimension is partitioned. Emptiness * indicates the inputs are replicated. @@ -1372,7 +1366,6 @@ public PartitionedInput partitionedInput(Iterable data type for {@code output} output * @param inputs A tensor which represents the full shape of partitioned tensors. * @param numSplits The value of the numSplits attribute * @param partitionDims A list of integers describing how each dimension is partitioned. Emptiness @@ -1454,7 +1447,6 @@ public ReplicateMetadata replicateMetadata(Long numReplicas, * *

The above computation has a replicated input of two replicas. * - * @param data type for {@code output} output * @param inputs The inputs value * @param options carries optional attribute values * @param data type for {@code TPUReplicatedInput} output and operands @@ -1476,7 +1468,6 @@ public ReplicatedInput replicatedInput(Iterable> * *

The above computation has a replicated output of two replicas. * - * @param data type for {@code outputs} output * @param input The input value * @param numReplicas The value of the numReplicas attribute * @param data type for {@code TPUReplicatedOutput} output and operands @@ -1784,8 +1775,6 @@ public ShutdownTPUSystem shutdownTPUSystem() { * values. This op is to split these values into two groups for two types, and * construct each group as one tensor to return. * - * @param data type for {@code integer_tensor} output - * @param data type for {@code float_tensor} output * @param input An XLA tuple including integer and float elements as deduplication data tuple. * @param integerType integer_tensor type. Allowed types: int32, int64, uint32, uint64. * @param floatType float_tensor type. Allowed types: half, bfloat16, float. @@ -1913,7 +1902,6 @@ public TPUReplicateMetadata tPUReplicateMetadata(Long numReplicas, * *

The above computation has a replicated input of two replicas. * - * @param data type for {@code output} output * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedInput} instead * @param inputs The inputs value * @param options carries optional attribute values @@ -1937,7 +1925,6 @@ public TPUReplicatedInput tPUReplicatedInput(Iterable *

The above computation has a replicated output of two replicas. * - * @param data type for {@code outputs} output * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedOutput} instead * @param input The input value * @param numReplicas The value of the numReplicas attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java index 0442b896828..3ee5b8de813 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/TrainOps.java @@ -166,7 +166,6 @@ public AccumulatorSetGlobalStep accumulatorSetGlobalStep(Operand handle * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param data type for {@code average} output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type @@ -185,7 +184,6 @@ public AccumulatorTakeGradient accumulatorTakeGradient( * v_t <- max(beta2 * v_{t-1}, abs(g)) * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -212,7 +210,6 @@ public ApplyAdaMax applyAdaMax(Operand var, Operand m * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -235,7 +232,6 @@ public ApplyAdadelta applyAdadelta(Operand var, Operand< * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -252,7 +248,6 @@ public ApplyAdagrad applyAdagrad(Operand var, Operand /** * Update '*var' according to the proximal adagrad scheme. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -277,7 +272,6 @@ public ApplyAdagradDa applyAdagradDa(Operand var, * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -299,7 +293,6 @@ public ApplyAdagradV2 applyAdagradV2(Operand var, Operan * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -326,7 +319,6 @@ public ApplyAdam applyAdam(Operand var, Operand m, Op * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -361,7 +353,6 @@ public ApplyAddSign applyAddSign(Operand var, Operand * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -392,7 +383,6 @@ public ApplyCenteredRmsProp applyCenteredRmsProp(Operand * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -415,7 +405,6 @@ public ApplyFtrl applyFtrl(Operand var, Operand accum /** * Update '*var' by subtracting 'alpha' * 'delta' from it. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. @@ -434,7 +423,6 @@ public ApplyGradientDescent applyGradientDescent(Operand *

accum = accum * momentum + grad * var -= lr * accum * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -455,7 +443,6 @@ public ApplyMomentum applyMomentum(Operand var, Operand< * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -479,7 +466,6 @@ public ApplyPowerSign applyPowerSign(Operand var, Operan * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -501,7 +487,6 @@ public ApplyProximalAdagrad applyProximalAdagrad(Operand * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -528,7 +513,6 @@ public ApplyProximalGradientDescent applyProximalGradientDe * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -570,7 +554,6 @@ public ApplyRmsProp applyRmsProp(Operand var, Operand * about broadcasting * here . * - * @param data type for {@code output} output * @param x 2-D or higher with shape {@code [..., r_x, c_x]}. * @param y 2-D or higher with shape {@code [..., r_y, c_y]}. * @param Tout If not spcified, Tout is the same type to input type. @@ -717,7 +700,6 @@ public NegTrain negTrain(Operand wIn, Operand wOut, Operand< * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. * - * @param data type for {@code output} output * @param input any tensor. * @param options carries optional attribute values * @param data type for {@code PreventGradient} output and operands @@ -776,7 +758,6 @@ public ResourceAccumulatorSetGlobalStep resourceAccumulatorSetGlobalStep( * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param data type for {@code average} output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type @@ -1535,7 +1516,6 @@ public Restore restore(Operand prefix, Operand tensorNames, *

The {@code shape_and_slice} input has the same format as the * elements of the {@code shapes_and_slices} input of the {@code SaveSlices} op. * - * @param data type for {@code tensor} output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -1687,7 +1667,6 @@ public SdcaShrinkL1 sdcaShrinkL1(Iterable> weights, Float l1, /** * var: Should be from a Variable(). * - * @param data type for {@code out} output * @param var The var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). @@ -1712,7 +1691,6 @@ public SparseApplyAdadelta sparseApplyAdadelta(Operand v * $$accum += grad * grad$$ * $$var -= lr * grad * (1 / sqrt(accum))$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1732,7 +1710,6 @@ public SparseApplyAdagrad sparseApplyAdagrad(Operand var /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1769,7 +1746,6 @@ public SparseApplyAdagradDa sparseApplyAdagradDa(Operand * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1802,7 +1778,6 @@ public SparseApplyCenteredRmsProp sparseApplyCenteredRmsPro * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1831,7 +1806,6 @@ public SparseApplyFtrl sparseApplyFtrl(Operand var, Oper *

$$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1856,7 +1830,6 @@ public SparseApplyMomentum sparseApplyMomentum(Operand v * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1880,7 +1853,6 @@ public SparseApplyProximalAdagrad sparseApplyProximalAdagra * $$prox_v = var - alpha * grad$$ * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -1908,7 +1880,6 @@ public SparseApplyProximalGradientDescent sparseApplyProxim * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * - * @param data type for {@code out} output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -1960,7 +1931,6 @@ public SymbolicGradient symbolicGradient(Iterable> input, * along each dimension, {@code train.TileGrad} takes in {@code multiples} and aggregates * each repeated tile of {@code input} into {@code output}. * - * @param data type for {@code output} output * @param input The input value * @param multiples The multiples value * @param data type for {@code TileGrad} output and operands diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java index 75f9104ce4b..3d303810698 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java @@ -149,7 +149,6 @@ public AssignVariableConcatND assignVariableConcatND(Operand re * [8, 9, 10]] * * - * @param data type for {@code output} output * @param inputs Input tensor slices in row-major order to merge across all dimensions. All * inputs must have the same shape. * } @@ -199,7 +198,6 @@ public ConcatND concatND(Iterable> inputs, List< * [0, 0]] * * - * @param data type for {@code outputs} output * @param resource Resource variable of input tensor to split across all dimensions. * } * out_arg { @@ -252,7 +250,6 @@ public ReadVariableSplitND readVariableSplitND( * [0, 0]] * * - * @param data type for {@code outputs} output * @param input Input tensor to split across all dimensions. * } * out_arg { @@ -298,7 +295,6 @@ public XlaHostCompute xlaHostCompute(Iterable> inputs, * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param data type for {@code output} output * @param Toutput The value of the Toutput attribute * @param shape The value of the shape attribute * @param key The value of the key attribute diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java index 34789dce80c..7fea36a03b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseAnd.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseAnd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java index afa384f6e38..1e57451698b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseOr.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseOr.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java index dc26dc145aa..52953422482 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/BitwiseXor.java @@ -52,8 +52,6 @@ * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * - * - * @param data type for {@code z} output */ @OpMetadata( opType = BitwiseXor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java index a2d9a985bae..8dcb5a72de7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/Invert.java @@ -73,8 +73,6 @@ * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Invert.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java index 5874dc12979..ccf41c473f8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/LeftShift.java @@ -63,8 +63,6 @@ * bitwise_ops.left_shift(lhs, rhs) * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * - * - * @param data type for {@code z} output */ @OpMetadata( opType = LeftShift.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java index 22c95c81136..6c1407b9d19 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/bitwise/RightShift.java @@ -65,8 +65,6 @@ * bitwise_ops.right_shift(lhs, rhs) * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> * - * - * @param data type for {@code z} output */ @OpMetadata( opType = RightShift.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java index 99ccff79289..9c513486b9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveAllToAll.java @@ -37,8 +37,6 @@ /** * Mutually exchanges multiple tensors of identical type and shape. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveAllToAll.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java index 332b5dcf9ab..a66995e4d4e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastRecv.java @@ -38,8 +38,6 @@ /** * Receives a tensor value broadcast from another device. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveBcastRecv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java index ee495b56951..df7a315413f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveBcastSend.java @@ -36,8 +36,6 @@ /** * Broadcasts a tensor value to one or more other devices. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveBcastSend.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java index d3997e8743f..57a2b134ff6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveGather.java @@ -41,8 +41,6 @@ * {@code is_stateless} means each op does not need control dependencies to other * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java index 9fd029facf3..380a949a664 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectivePermute.java @@ -40,8 +40,6 @@ *

For example, suppose there are 4 TPU instances: {@code [A, B, C, D]}. Passing * source_target_pairs={@code [[0,1],[1,2],[2,3],[3,0]]} gets the outputs: * {@code [D, A, B, C]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CollectivePermute.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java index 7eab3bb0f17..8f6c26778e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduce.java @@ -37,8 +37,6 @@ /** * Mutually reduces multiple tensors of identical type and shape. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveReduce.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java index 5ab06edf273..8b89dbaf183 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/collective/CollectiveReduceScatter.java @@ -41,8 +41,6 @@ * {@code is_stateless} means each op does not need control dependencies to other * collective ops. In this case, keys that are unique at runtime * (e.g. {@code instance_key}) should be used to distinguish collective groups. - * - * @param data type for {@code data} output */ @OpMetadata( opType = CollectiveReduceScatter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java index 1daca9f077e..48f4f94315b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ApproxTopK.java @@ -38,8 +38,6 @@ * Returns min/max k values and their indices of the input operand in an approximate manner. * See https://arxiv.org/abs/2206.14286 for the algorithm details. * This op is only optimized on TPU currently. - * - * @param data type for {@code values} output */ @OpMetadata( opType = ApproxTopK.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java index a8001c6103a..e49f3eafacc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Assign.java @@ -37,8 +37,6 @@ * Update 'ref' by assigning 'value' to it. * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = Assign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java index 2b6f78046ca..848231d569a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignAdd.java @@ -37,8 +37,6 @@ * Update 'ref' by adding 'value' to it. * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = AssignAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java index 162fc069e92..cc96d634945 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignSub.java @@ -37,8 +37,6 @@ * Update 'ref' by subtracting 'value' from it. * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = AssignSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java index 889bd521e0d..09fa1d49bcb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpace.java @@ -42,8 +42,6 @@ * this op outputs a copy of the input tensor where values from the {@code batch} * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, * followed by cropping along the {@code height} and {@code width} dimensions. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchToSpace.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java index c7cf592d517..65a98188342 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BatchToSpaceNd.java @@ -42,8 +42,6 @@ * the input. The spatial dimensions of this intermediate result are then * optionally cropped according to {@code crops} to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchToSpaceNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java index c1bd2421b15..82a2a99d295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Bitcast.java @@ -96,8 +96,6 @@ * endian orderings will give different results. A copy from input buffer to output * buffer is made on BE machines when types are of different sizes in order to get * the same casting results as on LE machines. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Bitcast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java index 96cfa009842..165e7e12b9a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastDynamicShape.java @@ -37,8 +37,6 @@ * Return the shape of s0 op s1 with broadcast. * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. - * - * @param data type for {@code r0} output */ @OpMetadata( opType = BroadcastDynamicShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java index fe9cf0e7039..f29d66c8de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastGradientArgs.java @@ -36,8 +36,6 @@ /** * Return the reduction indices for computing gradients of s0 op s1 with broadcast. * This is typically used by gradient computations for a broadcasting operation. - * - * @param data type for {@code r0} output */ @OpMetadata( opType = BroadcastGradientArgs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java index d9ada9ae323..f27247cd37a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/BroadcastTo.java @@ -72,8 +72,6 @@ * The newly-created tensor takes the full memory of the broadcasted * shape. (In a graph context, {@code broadcast_to} might be fused to * subsequent operation and then be optimized away, however.) - * - * @param data type for {@code output} output */ @OpMetadata( opType = BroadcastTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java index 4477b0d4924..2ae7185a7e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ClipByValue.java @@ -39,8 +39,6 @@ * shape as {@code t} with its values clipped to {@code clip_value_min} and {@code clip_value_max}. * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values * greater than {@code clip_value_max} are set to {@code clip_value_max}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ClipByValue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java index 894b3a574be..cf3b735f4be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Concat.java @@ -37,8 +37,6 @@ /** * Concatenates tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Concat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java index df14b30a11b..d8082c3b6ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ConcatOffset.java @@ -53,8 +53,6 @@ * * *

This is typically used by gradient computations for a concat operation. - * - * @param data type for {@code offset} output */ @OpMetadata( opType = ConcatOffset.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java index 9b55fac9069..a04de48877b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Copy.java @@ -42,8 +42,6 @@ * deep-copying. See the documentation of Debug* ops for more details. *

Unlike the CopyHost Op, this op does not have HostMemory constraint on its * input or output. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Copy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java index 59af18c8b33..055c9d878bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyHost.java @@ -40,8 +40,6 @@ * gRPC gating status, the output will simply forward the input tensor without * deep-copying. See the documentation of Debug* ops for more details. *

Unlike the Copy Op, this op has HostMemory constraint on its input or output. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyHost.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java index f83d6c6ad61..166d4613d54 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMesh.java @@ -35,8 +35,6 @@ /** * The CopyToMesh operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyToMesh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java index fa3467cd849..095d5b5d7ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CopyToMeshGrad.java @@ -35,8 +35,6 @@ /** * The CopyToMeshGrad operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = CopyToMeshGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java index 7a81a4419e6..0f404fa1419 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/CountUpTo.java @@ -35,8 +35,6 @@ /** * Increments 'ref' until it reaches 'limit'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CountUpTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java index ca15dbb9a55..f0b9b3927a8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DeepCopy.java @@ -35,8 +35,6 @@ /** * Makes a copy of {@code x}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = DeepCopy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java index cc8f2bafb2f..876a1e46ee5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DestroyTemporaryVariable.java @@ -41,8 +41,6 @@ * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. *

Outputs the final value of the tensor pointed to by 'ref'. - * - * @param data type for {@code value} output */ @OpMetadata( opType = DestroyTemporaryVariable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java index b851e0cccdf..d7d7bf7c328 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicPartition.java @@ -78,8 +78,6 @@ * * * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = DynamicPartition.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java index 9aba2968627..d160ab8255c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DynamicStitch.java @@ -90,8 +90,6 @@ *

* *
- * - * @param data type for {@code merged} output */ @OpMetadata( opType = DynamicStitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java index 02c76780ba2..6f7d74d94e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Empty.java @@ -38,8 +38,6 @@ /** * Creates a tensor with the given shape. *

This operation creates a tensor of {@code shape} and {@code dtype}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Empty.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java index 131285dc0e6..bbada3714ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/EnsureShape.java @@ -38,8 +38,6 @@ * Ensures that the tensor's shape matches the expected shape. * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. - * - * @param data type for {@code output} output */ @OpMetadata( opType = EnsureShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java index baed3b18053..309e5700eb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Enter.java @@ -40,8 +40,6 @@ * {@code is_constant} is true, {@code output} is a constant in the child frame; otherwise * it may be changed in the child frame. At most {@code parallel_iterations} iterations * are run in parallel in the child frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Enter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java index c1535016b59..8dea6a66fe6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Exit.java @@ -36,8 +36,6 @@ /** * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Exit.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java index bf17427d228..0f0e030b71d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExpandDims.java @@ -59,8 +59,6 @@ *

{@code -1-input.dims() <= dim <= input.dims()} *

This operation is related to {@code squeeze()}, which removes dimensions of * size 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ExpandDims.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java index 12afb6060b3..350c416e235 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ExtractVolumePatches.java @@ -36,8 +36,6 @@ /** * Extract {@code patches} from {@code input} and put them in the {@code "depth"} output dimension. 3D extension of {@code extract_image_patches}. - * - * @param data type for {@code patches} output */ @OpMetadata( opType = ExtractVolumePatches.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java index ee07de5268d..79e63958dda 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FakeParam.java @@ -40,8 +40,6 @@ * valid output when run, so must either be removed (e.g. replaced with a * function input) or guaranteed not to be used (e.g. if mirroring an * intermediate output needed for the gradient computation of the other branch). - * - * @param data type for {@code output} output */ @OpMetadata( opType = FakeParam.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java index 5ba5931795e..8634981f57c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Fill.java @@ -53,8 +53,6 @@ *

  • Because {@code tf.fill} evaluates at graph runtime, it supports dynamic shapes * based on other runtime Tensors, unlike {@code tf.constant}.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fill.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java index 1b1e3f888ee..cbd71fe5e36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Gather.java @@ -58,8 +58,6 @@ * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. *

    See also {@code tf.batch_gather} and {@code tf.gather_nd}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Gather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java index b1a05118129..406323aea9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GatherNd.java @@ -125,8 +125,6 @@ * output = [['b0', 'b1'], ['d0', 'c1']] * *

    See also {@code tf.gather} and {@code tf.batch_gather}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = GatherNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java index a2445004e6d..0cccfb42045 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GetSessionTensor.java @@ -37,8 +37,6 @@ /** * Get the value of the tensor specified by its handle. - * - * @param data type for {@code value} output */ @OpMetadata( opType = GetSessionTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java index 8839f77471f..c4235de8ff2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/GuaranteeConst.java @@ -39,8 +39,6 @@ *

    Only accepts value typed tensors as inputs and rejects resource variable handles * as input. *

    Returns the input tensor without modification. - * - * @param data type for {@code output} output */ @OpMetadata( opType = GuaranteeConst.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java index 0846ac056c0..782cfc69f05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HistogramFixedWidth.java @@ -51,8 +51,6 @@ * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * - * - * @param data type for {@code out} output */ @OpMetadata( opType = HistogramFixedWidth.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java index 8aa7bf2e13c..82f5ef8f295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/HostConst.java @@ -37,8 +37,6 @@ /** * Returns a constant tensor on the host. Only for writing C++ tests. - * - * @param data type for {@code output} output */ @OpMetadata( opType = HostConst.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java index 12c84344373..d0729ab93da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Identity.java @@ -35,8 +35,6 @@ /** * Return a tensor with the same shape and contents as the input tensor or value. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Identity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java index 47cbe749ee9..12d647268ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ImmutableConst.java @@ -38,8 +38,6 @@ /** * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = ImmutableConst.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java index c42388fc55c..78f37851589 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceAdd.java @@ -39,8 +39,6 @@ *

      * Computes y = x; y[i, :] += v; return y.
      * 
    - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java index a39bf6d741b..31d0287aab2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceSub.java @@ -40,8 +40,6 @@ * * Computes y = x; y[i, :] -= v; return y. * - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java index 8aecb6edf8c..d34e0f15011 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/InplaceUpdate.java @@ -39,8 +39,6 @@ * Computes {@code x[i, :] = v; return x}. *

    Originally this function is mutative however for compilation we make this * operation create / operate on a copy of {@code x}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = InplaceUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java index 3473ddf487e..317eb054e29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LinSpace.java @@ -42,8 +42,6 @@ *

      * tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
      * 
    - * - * @param data type for {@code output} output */ @OpMetadata( opType = LinSpace.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java index 7406671423c..7546b26f8f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableExport.java @@ -36,10 +36,6 @@ /** * Outputs all keys and values in the table. - * - * @param data type for {@code keys} output - * - * @param data type for {@code values} output */ @OpMetadata( opType = LookupTableExport.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java index b097f2ee81d..1155c94662f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LookupTableFind.java @@ -39,8 +39,6 @@ * The output {@code values} is of the type of the table values. *

    The scalar {@code default_value} is the value output for keys not present in the * table. It must also be of the same type as the table values. - * - * @param data type for {@code values} output */ @OpMetadata( opType = LookupTableFind.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java index 8cf633e2d7f..2a4b761a8fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/LowerBound.java @@ -51,8 +51,6 @@ *

    result = LowerBound(sorted_sequence, values) *

    result == [[1, 2, 2], * [0, 1, 5]] - * - * @param data type for {@code output} output */ @OpMetadata( opType = LowerBound.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java index fb03ee5c942..04c4f1481d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Max.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Max.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java index 7e4c77434b9..f5a189c9c58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Merge.java @@ -41,8 +41,6 @@ * It is usually combined with {@code Switch} to implement branching. *

    {@code Merge} forwards the first tensor to become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Merge.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java index f3db8fedac0..89ac31b5854 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Min.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Min.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java index e63036ec117..751bec8fd66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPad.java @@ -57,8 +57,6 @@ * [5, 4, 4, 5, 6, 6, 5] * [5, 4, 4, 5, 6, 6, 5]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MirrorPad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java index 64235a34e0a..d1286e4bd89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/MirrorPadGrad.java @@ -50,8 +50,6 @@ * pad(t, paddings) ==> [[ 1, 5] * [11, 28]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MirrorPadGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java index d49045a1bad..5e8f5709b65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclAllReduce.java @@ -46,8 +46,6 @@ * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. * - * @param data type for {@code data} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclAllReduce} instead */ @OpMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java index 4d5c2d771de..5e6c2a583ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclBroadcast.java @@ -43,8 +43,6 @@ * output: The same as input. * shape: The shape of the input tensor. * - * @param data type for {@code output} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclBroadcast} instead */ @OpMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java index 8b050aba7e3..cd3dea3af6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NcclReduce.java @@ -43,8 +43,6 @@ * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. * - * @param data type for {@code data} output - * * @deprecated use {@link org.tensorflow.op.distribute.NcclReduce} instead */ @OpMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java index 33e50ce1b5d..1f0f73c672f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/NextIteration.java @@ -35,8 +35,6 @@ /** * Makes its input available to the next iteration. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NextIteration.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java index 09f55f7eaff..8ed3c25bd8e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OneHot.java @@ -111,8 +111,6 @@ * [0.0, 0.0, 0.0] // one_hot(-1) * ] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = OneHot.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java index b69df0d0952..51178e062f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/OnesLike.java @@ -35,8 +35,6 @@ /** * Returns a tensor of ones with the same shape and type as x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = OnesLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java index d80e87f0f2d..60ddbcf6817 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Pad.java @@ -56,8 +56,6 @@ * [0, 0, 2, 2, 0, 0] * [0, 0, 0, 0, 0, 0]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Pad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java index c5cbde1618c..b12c3b896aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelConcat.java @@ -50,8 +50,6 @@ * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParallelConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java index a23c3d135a8..c9fd16880ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ParallelDynamicStitch.java @@ -89,8 +89,6 @@ *

    * *
    - * - * @param data type for {@code merged} output */ @OpMetadata( opType = ParallelDynamicStitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java index 634500dcfc0..f4c450973da 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Placeholder.java @@ -40,8 +40,6 @@ * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Placeholder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java index 9604ea0a92a..202d4cc476c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/PlaceholderWithDefault.java @@ -36,8 +36,6 @@ /** * A placeholder op that passes through {@code input} when its output is not fed. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PlaceholderWithDefault.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java index 71c7f986eb6..3f1c696a0bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Prod.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Prod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java index 6e92b83bf89..84816c6893f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/QuantizedReshape.java @@ -37,8 +37,6 @@ /** * Reshapes a quantized tensor as per the Reshape op. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedReshape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java index 68cd7f9f0eb..76538abf9cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RandomIndexShuffle.java @@ -39,8 +39,6 @@ *

    If multiple inputs are vectors (matrix in case of seed) then the size of the * first dimension must match. *

    The outputs are deterministic. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomIndexShuffle.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java index 0699bd59b09..702214095a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Range.java @@ -44,8 +44,6 @@ * # 'delta' is 3 * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Range.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java index 236991942ee..f57c2781c3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReadVariableOp.java @@ -41,8 +41,6 @@ * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. - * - * @param data type for {@code value} output */ @OpMetadata( opType = ReadVariableOp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java index 1853328543d..5b3caab37b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Recv.java @@ -36,8 +36,6 @@ /** * Receives the named tensor from send_device on recv_device. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = Recv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java index 529841fd5fa..dca6c6a5ffc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMax.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java index f349357096b..a7e544cfaab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceMin.java @@ -39,8 +39,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java index 49008ad1a36..3dc53ad9c58 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceProd.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java index 05851e60764..bbe161f9210 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReduceSum.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReduceSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java index 888c0ee977b..218092a2563 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefEnter.java @@ -39,8 +39,6 @@ * {@code is_constant} is true, {@code output} is a constant in the child frame; otherwise * it may be changed in the child frame. At most {@code parallel_iterations} iterations * are run in parallel in the child frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefEnter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java index c23ff2d03d7..9a840da2c3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefExit.java @@ -36,8 +36,6 @@ /** * Exits the current frame to its parent frame. * Exit makes its input {@code data} available to the parent frame. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefExit.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java index 53d515be8e1..c3bb004b548 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefIdentity.java @@ -35,8 +35,6 @@ /** * Return the same ref tensor as the input ref tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java index 9354cb2847b..4baf6cc6260 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefMerge.java @@ -41,8 +41,6 @@ * It is usually combined with {@code Switch} to implement branching. *

    {@code Merge} forwards the first tensor for become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefMerge.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java index 5c7f1d2c4b7..ef647c70cd6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefNextIteration.java @@ -35,8 +35,6 @@ /** * Makes its input available to the next iteration. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefNextIteration.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java index 02c6ddc8e2f..d7ffa33956e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSelect.java @@ -37,8 +37,6 @@ /** * Forwards the {@code index}th element of {@code inputs} to {@code output}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RefSelect.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java index 04a2d4811ab..2e97b2bbcad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RefSwitch.java @@ -39,8 +39,6 @@ * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. *

    See also {@code Switch} and {@code Merge}. - * - * @param data type for {@code output_false} output */ @OpMetadata( opType = RefSwitch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java index 959987e6200..503d3cfe42a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Relayout.java @@ -35,8 +35,6 @@ /** * The Relayout operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = Relayout.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java index 7fd8a91fb8b..499cb8d6c72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/RelayoutLike.java @@ -35,8 +35,6 @@ /** * The RelayoutLike operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = RelayoutLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java index 4b1ce466a7d..54c0aba057e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reshape.java @@ -90,8 +90,6 @@ * # shape `[]` reshapes to a scalar * reshape(t, []) ==> 7 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Reshape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java index f8e5cf5abef..0ca0faa179e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceCountUpTo.java @@ -37,8 +37,6 @@ /** * Increments variable pointed to by 'resource' until it reaches 'limit'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceCountUpTo.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java index 5dff2d95dc2..c458bacea4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGather.java @@ -49,8 +49,6 @@ * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java index 1a86a282ab9..f9c6b72b544 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ResourceGatherNd.java @@ -37,8 +37,6 @@ /** * The ResourceGatherNd operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResourceGatherNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java index 65a6ac9ab0c..711b7148209 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Reverse.java @@ -76,8 +76,6 @@ * [16, 17, 18, 19], * [12, 13, 14, 15]]]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Reverse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java index b7eb3fb25a2..e18f16874f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ReverseSequence.java @@ -84,8 +84,6 @@ * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReverseSequence.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java index a2f04750d53..e190730b970 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Roll.java @@ -54,8 +54,6 @@ * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Roll.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java index bc66b56b3d1..9f0bc6a526f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterAdd.java @@ -55,8 +55,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java index 083f4de2a81..902d11400e5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterDiv.java @@ -52,8 +52,6 @@ *

    Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions divide. *

    Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java index 162556fb11c..9b761e52419 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMax.java @@ -54,8 +54,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java index 4264f92bc7e..7f725ad19d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMin.java @@ -54,8 +54,6 @@ *
    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java index 7fb20e9d36e..ae8bbca9670 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterMul.java @@ -52,8 +52,6 @@ *

    Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions multiply. *

    Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java index 34487ebf9d7..45b242e6259 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNd.java @@ -109,8 +109,6 @@ * *

    Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ScatterNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java index aef9eed4a32..f427c60fe5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdAdd.java @@ -62,8 +62,6 @@ * *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java index 0adccafee2a..1acea47e149 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMax.java @@ -36,8 +36,6 @@ /** * Computes element-wise maximum. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java index d2780381fcb..357ba576649 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdMin.java @@ -36,8 +36,6 @@ /** * Computes element-wise minimum. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java index 4d29ef748d8..2945b588d8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdNonAliasingAdd.java @@ -63,8 +63,6 @@ * [1, 13, 3, 14, 14, 6, 7, 20] * *

    See {@code tf.scatter_nd} for more details about how to make updates to slices. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ScatterNdNonAliasingAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java index b2018d27511..54a8a410f78 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdSub.java @@ -63,8 +63,6 @@ * *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java index 56427f20fac..716b09592cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterNdUpdate.java @@ -62,8 +62,6 @@ *

    See {@code tf.scatter_nd} for more details about how to make updates to * slices. *

    See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterNdUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java index 06d274ff356..4686a81470f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterSub.java @@ -54,8 +54,6 @@ *

    * *
    - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java index 711cbf7485f..60e22039589 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ScatterUpdate.java @@ -57,8 +57,6 @@ * * *

    See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = ScatterUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java index 71caff86d14..c88ea468f39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Select.java @@ -36,8 +36,6 @@ /** * The SelectV2 operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = Select.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java index 61af8e762a2..562b2088b93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SetDiff1d.java @@ -54,10 +54,6 @@ * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * - * - * @param data type for {@code out} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = SetDiff1d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java index 4f9f9115847..2f7592fbc03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Shape.java @@ -44,8 +44,6 @@ * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Shape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java index b56a39452d5..b53a00a1a82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ShapeN.java @@ -41,8 +41,6 @@ /** * Returns shape of tensors. * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ShapeN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java index 1ad02bc0f9b..2be90850900 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Size.java @@ -45,8 +45,6 @@ * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Size.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java index b53cae539a0..37a168fb6f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Slice.java @@ -41,8 +41,6 @@ * 'begin'. *

    Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - * - * @param data type for {@code output} output */ @OpMetadata( opType = Slice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java index d8b1ed563d9..bafca31221f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Snapshot.java @@ -35,8 +35,6 @@ /** * Returns a copy of the input tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Snapshot.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java index d56e6ef8709..2a366e46641 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SpaceToBatchNd.java @@ -132,8 +132,6 @@ * *

    Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToBatchNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java index f6a01ed1950..dc4fad88677 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Split.java @@ -38,8 +38,6 @@ /** * Splits a tensor into {@code num_split} tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Split.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java index 8d1beb3fc5b..cc0525e9645 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SplitV.java @@ -39,8 +39,6 @@ /** * Splits a tensor into {@code num_split} tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SplitV.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java index 3ccc9dff638..52155b47d43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Squeeze.java @@ -50,8 +50,6 @@ * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Squeeze.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java index 0022997321a..976a86955b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Stack.java @@ -51,8 +51,6 @@ * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] * *

    This is the opposite of {@code unpack}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Stack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java index a6a3021ce14..502cfcc8c06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPop.java @@ -36,8 +36,6 @@ /** * Pop the element at the top of the stack. - * - * @param data type for {@code elem} output */ @OpMetadata( opType = StackPop.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java index c43aa1de30e..f9f05ff1912 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StackPush.java @@ -35,8 +35,6 @@ /** * Push an element onto the stack. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StackPush.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java index 29da2cb9a53..a06a2c8017d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StochasticCastToInt.java @@ -40,8 +40,6 @@ * Stochastically cast a given tensor from floats to ints. * The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN. *

    The outputs are a deterministic function of {@code input}, {@code key}, {@code counter}, {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StochasticCastToInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java index c2086cb3e92..fb486c42253 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StopGradient.java @@ -85,8 +85,6 @@ *

  • Adversarial training, where no backprop should happen through the adversarial * example generation process.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = StopGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java index 6b8953f7995..ec55dae1c24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSlice.java @@ -133,8 +133,6 @@ *

    Requirements: * {@code 0 != strides[i] for i in [0, m)} * {@code ellipsis_mask must be a power of two (only one ellipsis)} - * - * @param data type for {@code output} output */ @OpMetadata( opType = StridedSlice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java index b2ab8d606e2..2911a675905 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceAssign.java @@ -41,8 +41,6 @@ * {@code begin}, {@code end}, {@code strides}, etc. work exactly as in {@code StridedSlice}. *

    NOTE this op currently does not support broadcasting and so {@code value}'s * shape must be exactly the shape produced by the slice of {@code ref}. - * - * @param data type for {@code output_ref} output */ @OpMetadata( opType = StridedSliceAssign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java index 2a234c9ab7a..fcd7518dd87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/StridedSliceGrad.java @@ -43,8 +43,6 @@ *

    Arguments are the same as StridedSliceGrad with the exception that * {@code dy} is the input gradient to be propagated and {@code shape} is the * shape of {@code StridedSlice}'s {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StridedSliceGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java index 15957ea2189..abcdb1ee9ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Sum.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Sum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java index c6a8f810467..c6842c9ab87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/SwitchCond.java @@ -39,8 +39,6 @@ * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. *

    See also {@code RefSwitch} and {@code Merge}. - * - * @param data type for {@code output_false} output */ @OpMetadata( opType = SwitchCond.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java index 3e8c8a70ec8..d66021bb728 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TemporaryVariable.java @@ -48,8 +48,6 @@ * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * - * @param data type for {@code ref} output */ @OpMetadata( opType = TemporaryVariable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java index b3dbc08ef3e..75ba48a0102 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayConcat.java @@ -48,8 +48,6 @@ * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...) * *

    All elements must have the same shape (excepting the first dimension). - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java index 0f7fd351089..60d8b437b00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayGather.java @@ -40,8 +40,6 @@ /** * Gather specific elements from the TensorArray into output {@code value}. * All elements selected by {@code indices} must have the same shape. - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java index 6e52e6ef906..d1cf5c89e65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayPack.java @@ -39,8 +39,6 @@ /** * The TensorArrayPack operation - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayPack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java index 6765205c463..f5a0aa073a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorArrayRead.java @@ -38,8 +38,6 @@ /** * Read an element from the TensorArray into output {@code value}. - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorArrayRead.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java index 664783a09c5..70ef65f9314 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListConcat.java @@ -48,8 +48,6 @@ * is not already set. * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java index d955a6a636d..6190f9c1c01 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListElementShape.java @@ -39,8 +39,6 @@ * The shape of the elements of the given list, as a tensor. * input_handle: the list * element_shape: the shape of elements of the list - * - * @param data type for {@code element_shape} output */ @OpMetadata( opType = TensorListElementShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java index 27a627b4759..ac725c72b97 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGather.java @@ -42,8 +42,6 @@ *

    input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. - * - * @param data type for {@code values} output */ @OpMetadata( opType = TensorListGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java index 1ea76d2101e..244704b5754 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListGetItem.java @@ -40,8 +40,6 @@ * input_handle: the list * index: the position in the list from which an element will be retrieved * item: the element at that position - * - * @param data type for {@code item} output */ @OpMetadata( opType = TensorListGetItem.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java index ee7a5cde1c9..af805e71f9b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListPopBack.java @@ -42,8 +42,6 @@ * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListPopBack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java index fec4f942658..2d058b8e00d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorListStack.java @@ -41,8 +41,6 @@ *

    input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = TensorListStack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java index dccdc1ee996..a3e8b54e888 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapLookup.java @@ -39,8 +39,6 @@ * input_handle: the input map * key: the key to be looked up * value: the value found from the given key - * - * @param data type for {@code value} output */ @OpMetadata( opType = TensorMapLookup.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java index b2a217c98e6..8942b2f9f8b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorMapStackKeys.java @@ -38,8 +38,6 @@ * Returns a Tensor stack of all keys in a tensor map. * input_handle: the input map * keys: the returned Tensor of all keys in the map - * - * @param data type for {@code keys} output */ @OpMetadata( opType = TensorMapStackKeys.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java index a72a1defde1..38cfdb3a764 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdAdd.java @@ -96,8 +96,6 @@ * *

    Note: on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java index ceddda24a20..48adcec0d0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java @@ -50,8 +50,6 @@ * * *

    Refer to {@code tf.tensor_scatter_nd_update} for more details. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java index b6da07b4c31..b69849d62ee 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMin.java @@ -36,8 +36,6 @@ /** * The TensorScatterMin operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java index 3623707e77e..a7fd984a513 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdSub.java @@ -91,8 +91,6 @@ * *

    Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdSub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java index 3c53fca7eab..10e10aea036 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdUpdate.java @@ -65,8 +65,6 @@ * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] * *

    For usage examples see the python tf.tensor_scatter_nd_update {@link org.tensorflow.op.Ops#tensorScatterNdUpdate} function - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorScatterNdUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java index 23b2d386a05..de80c141d72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorStridedSliceUpdate.java @@ -41,8 +41,6 @@ * {@code strides} etc. work exactly as in {@code StridedSlice}. *

    NOTE this op currently does not support broadcasting and so {@code value}'s shape * must be exactly the shape produced by the slice of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorStridedSliceUpdate.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java index c9a58b9158c..7339fdbb3de 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Tile.java @@ -67,8 +67,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Tile.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java index a49747c48ca..fa4c04f3c27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unbatch.java @@ -53,8 +53,6 @@ * shared_name: Instances of Unbatch with the same container and shared_name are * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. - * - * @param data type for {@code unbatched_tensor} output */ @OpMetadata( opType = Unbatch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java index 912e08c3a6b..25418f3986f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnbatchGrad.java @@ -49,8 +49,6 @@ * shared_name: Instances of UnbatchGrad with the same container and shared_name * are assumed to possibly belong to the same batch. If left empty, the op name * will be used as the shared name. - * - * @param data type for {@code batched_grad} output */ @OpMetadata( opType = UnbatchGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java index ca3c5dfdd14..f1a4eb739d1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniformQuantizedClipByValue.java @@ -40,8 +40,6 @@ * Given quantized {@code operand} which was quantized using {@code scales} and {@code zero_points}, performs clip by value using {@code min} and {@code max} values. * If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. * Otherwise (per-channel quantized), the clipping is also done per-channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedClipByValue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java index c4324a9f324..4d17cf9f141 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unique.java @@ -74,10 +74,6 @@ * [2, 0]] * idx ==> [0, 1, 1] * - * - * @param data type for {@code y} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = Unique.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java index 80a1804887f..8046082f95b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UniqueWithCounts.java @@ -78,10 +78,6 @@ * idx ==> [0, 1, 1] * count ==> [1, 2] * - * - * @param data type for {@code y} output - * - * @param data type for {@code idx} output */ @OpMetadata( opType = UniqueWithCounts.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java index 5393635bc69..ec7c8f8c6e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UnravelIndex.java @@ -52,8 +52,6 @@ *

    {@literal @}compatibility(numpy)
    * Equivalent to np.unravel_index *
    {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnravelIndex.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java index fd20a76940d..64c8de23911 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Unstack.java @@ -46,8 +46,6 @@ * and each tensor in {@code output} will have shape {@code (A, C, D)}. * Etc. *

    This is the opposite of {@code pack}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Unstack.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java index d5e939ffde6..78e45391c8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/UpperBound.java @@ -51,8 +51,6 @@ *

    result = UpperBound(sorted_sequence, values) *

    result == [[1, 2, 4], * [0, 2, 5]] - * - * @param data type for {@code output} output */ @OpMetadata( opType = UpperBound.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java index a0febf9c223..d8b09bfddde 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/Variable.java @@ -40,8 +40,6 @@ * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. - * - * @param data type for {@code ref} output */ @OpMetadata( opType = Variable.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java index 3f94b9efbd6..abfd8d7c504 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/VariableShape.java @@ -44,8 +44,6 @@ * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = VariableShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java index 792a37d112c..497cf5128b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/ZerosLike.java @@ -35,8 +35,6 @@ /** * Returns a tensor of zeros with the same shape and type as x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = ZerosLike.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java index a42cc0f51d2..131903f2fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/LeakyReluGrad.java @@ -35,8 +35,6 @@ /** * Computes rectified linear gradients for a LeakyRelu operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = LeakyReluGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java index d1aae3e74ad..86215fa9a9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/CheckNumerics.java @@ -39,8 +39,6 @@ * that are not a number (NaN) or infinity (Inf). Otherwise, returns the input * tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf * in the errors it throws. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CheckNumerics.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java index 37f2fec7d91..776a971ef27 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientIdentity.java @@ -37,8 +37,6 @@ * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on non-reference-type tensors. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugGradientIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java index 5071299a66a..76a9e9029ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugGradientRefIdentity.java @@ -37,8 +37,6 @@ * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on reference-type tensors. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugGradientRefIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java index 63c7105e3c8..10edd71d4b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugIdentity.java @@ -36,8 +36,6 @@ /** * Provides an identity mapping of the non-Ref type input tensor for debugging. * Provides an identity mapping of the non-Ref type input tensor for debugging. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugIdentity.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java index ec63e9da708..4ff0f11c7bc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/debugging/DebugNumericsSummary.java @@ -40,8 +40,6 @@ * Computes a numeric summary of the input tensor. The shape of the output * depends on the tensor_debug_mode attribute. * This op is used internally by TensorFlow Debugger (tfdbg) v2. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DebugNumericsSummary.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java index c5416746198..7cc17dd9d36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclAllReduce.java @@ -45,8 +45,6 @@ * reduction: the reduction operation to perform. * num_devices: The number of devices participating in this reduction. * shared_name: Identifier that shared between ops of the same reduction. - * - * @param data type for {@code data} output */ @OpMetadata( opType = NcclAllReduce.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java index 3824d6a10dd..41a2050e44f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclBroadcast.java @@ -42,8 +42,6 @@ *

    input: The input to the broadcast. * output: The same as input. * shape: The shape of the input tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NcclBroadcast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java index 2a80593be6c..8fcf62bf4cc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/distribute/NcclReduce.java @@ -42,8 +42,6 @@ *

    input: The input to the reduction. * data: the value of the reduction across all {@code num_devices} devices. * reduction: the reduction operation to perform. - * - * @param data type for {@code data} output */ @OpMetadata( opType = NcclReduce.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java index 806ad99e2ea..af516490d88 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Cast.java @@ -36,8 +36,6 @@ /** * Cast x of type SrcT to y of DstT. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java index 6b0a717157c..0da2678549f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/dtypes/Complex.java @@ -48,8 +48,6 @@ * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * - * - * @param data type for {@code out} output */ @OpMetadata( opType = Complex.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java index 0a6a141c036..123c74afd50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustContrast.java @@ -43,8 +43,6 @@ *

    For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * {@code (x - mean) * contrast_factor + mean}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustContrast.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java index 45fe50175c4..b0001085638 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustHue.java @@ -41,8 +41,6 @@ *

    The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustHue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java index a7fea42d8fb..5f0c063dc1d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/AdjustSaturation.java @@ -41,8 +41,6 @@ *

    The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AdjustSaturation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java index 59e98a3252d..e639b0f2cb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/CropAndResizeGradImage.java @@ -38,8 +38,6 @@ /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CropAndResizeGradImage.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java index ae91e89973a..a5c7ee7845e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodeImage.java @@ -53,8 +53,6 @@ * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. - * - * @param data type for {@code image} output */ @OpMetadata( opType = DecodeImage.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java index db44c3b3146..dd6384caf7c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DecodePng.java @@ -51,8 +51,6 @@ * of color channels. *

    This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.io.decode_image}. - * - * @param data type for {@code image} output */ @OpMetadata( opType = DecodePng.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java index 8033cecb4c9..56c64a5e50c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/DrawBoundingBoxes.java @@ -45,8 +45,6 @@ * box is {@code [0.1, 0.2, 0.5, 0.9]}, the upper-left and bottom-right coordinates of * the bounding box will be {@code (40, 10)} to {@code (100, 50)} (in (x,y) coordinates). *

    Parts of the bounding box may fall outside the image. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DrawBoundingBoxes.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java index 69492ac2873..54395a44acc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractImagePatches.java @@ -36,8 +36,6 @@ /** * Extract {@code patches} from {@code images} and put them in the "depth" output dimension. - * - * @param data type for {@code patches} output */ @OpMetadata( opType = ExtractImagePatches.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java index 368fe5cfd02..4ca887e7e72 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ExtractJpegShape.java @@ -39,8 +39,6 @@ /** * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. - * - * @param data type for {@code image_shape} output */ @OpMetadata( opType = ExtractJpegShape.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java index 6e32b95ca11..abd3d53d884 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/HsvToRgb.java @@ -39,8 +39,6 @@ * value of the pixels. The output is only well defined if the value in {@code images} * are in {@code [0,1]}. *

    See {@code rgb_to_hsv} for a description of the HSV encoding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = HsvToRgb.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java index 572b3e59d16..cef590ad519 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV2.java @@ -42,8 +42,6 @@ * {@code (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)}, where * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input * image, the output pixel is set to 0. - * - * @param data type for {@code transformed_images} output */ @OpMetadata( opType = ImageProjectiveTransformV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java index 2c448fc9397..59f06c2b982 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ImageProjectiveTransformV3.java @@ -42,8 +42,6 @@ * {@code (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)}, where * {@code k = c0 x + c1 y + 1}. If the transformed point lays outside of the input * image, the output pixel is set to fill_value. - * - * @param data type for {@code transformed_images} output */ @OpMetadata( opType = ImageProjectiveTransformV3.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java index 65c6f7f7f2a..f682bfd1f5a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/NonMaxSuppression.java @@ -58,8 +58,6 @@ * of other overlapping boxes instead of directly causing them to be pruned. * To enable this Soft-NMS mode, set the {@code soft_nms_sigma} parameter to be * larger than 0. - * - * @param data type for {@code selected_scores} output */ @OpMetadata( opType = NonMaxSuppression.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java index def6ca5246e..94b4e077416 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/QuantizedResizeBilinear.java @@ -38,8 +38,6 @@ /** * Resize quantized {@code images} to {@code size} using quantized bilinear interpolation. * Input images and output images must be quantized types. - * - * @param data type for {@code resized_images} output */ @OpMetadata( opType = QuantizedResizeBilinear.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java index 966401d271c..063b7b8f529 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RandomCrop.java @@ -41,8 +41,6 @@ *

    This Op picks a random location in {@code image} and crops a {@code height} by {@code width} * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomCrop.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java index 16d5af61802..c04fe6d13e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBicubicGrad.java @@ -36,8 +36,6 @@ /** * Computes the gradient of bicubic interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeBicubicGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java index dbd172bfbf2..166d6b46de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeBilinearGrad.java @@ -36,8 +36,6 @@ /** * Computes the gradient of bilinear interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeBilinearGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java index 1fc40174782..355ac564de1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighbor.java @@ -36,8 +36,6 @@ /** * Resize {@code images} to {@code size} using nearest neighbor interpolation. - * - * @param data type for {@code resized_images} output */ @OpMetadata( opType = ResizeNearestNeighbor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java index 485aa4ba63b..36df9e12b2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ResizeNearestNeighborGrad.java @@ -36,8 +36,6 @@ /** * Computes the gradient of nearest neighbor interpolation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ResizeNearestNeighborGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java index 3709f0bd4f7..be3c84d9b66 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/RgbToHsv.java @@ -56,8 +56,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = RgbToHsv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java index 152f96ce75f..a7378278309 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/SampleDistortedBoundingBox.java @@ -70,8 +70,6 @@ * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. - * - * @param data type for {@code begin} output */ @OpMetadata( opType = SampleDistortedBoundingBox.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java index 55dae2a4ae8..1749d046b37 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/ScaleAndTranslateGrad.java @@ -36,8 +36,6 @@ /** * The ScaleAndTranslateGrad operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = ScaleAndTranslateGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java index ac9dfdfe74d..31c4de5388d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/image/StatelessSampleDistortedBoundingBox.java @@ -95,8 +95,6 @@ * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. - * - * @param data type for {@code begin} output */ @OpMetadata( opType = StatelessSampleDistortedBoundingBox.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java index 0ef81b9eff2..07eac6679d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodePaddedRaw.java @@ -38,8 +38,6 @@ /** * Reinterpret the bytes of a string as a vector of numbers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DecodePaddedRaw.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java index 068d203c2b0..217c843796f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeRaw.java @@ -37,8 +37,6 @@ /** * Reinterpret the bytes of a string as a vector of numbers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DecodeRaw.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java index 1ff234ea6b6..9704bd78d15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DeserializeManySparse.java @@ -77,8 +77,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = DeserializeManySparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java index 66a64b13c0b..039ff1546f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/ParseTensor.java @@ -37,8 +37,6 @@ /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParseTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java index b0e447608f3..70f9327d112 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeManySparse.java @@ -44,8 +44,6 @@ * {@code SparseTensor} objects going into each row of {@code serialized_sparse} will have * rank {@code R-1}. *

    The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. - * - * @param data type for {@code serialized_sparse} output */ @OpMetadata( opType = SerializeManySparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java index 2f450dcf3bd..b0c2b5935bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/SerializeSparse.java @@ -38,8 +38,6 @@ /** * Serialize a {@code SparseTensor} into a {@code [3]} {@code Tensor} object. - * - * @param data type for {@code serialized_sparse} output */ @OpMetadata( opType = SerializeSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java index 34f179ed2b0..a521e77b040 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandPart.java @@ -65,8 +65,6 @@ * tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. * tf.linalg.band_part(input, 0, 0) ==> Diagonal. * - * - * @param data type for {@code band} output */ @OpMetadata( opType = BandPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java index 9dc6dba4348..532d4fe148b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BandedTriangularSolve.java @@ -35,8 +35,6 @@ /** * The BandedTriangularSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BandedTriangularSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java index 0016839b211..b43cf15b48e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholesky.java @@ -35,8 +35,6 @@ /** * The BatchCholesky operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchCholesky.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java index d9ce332f7e2..5e917e740b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchCholeskyGrad.java @@ -35,8 +35,6 @@ /** * The BatchCholeskyGrad operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchCholeskyGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java index 55e8a0d6a75..99cb57ff97f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixBandPart.java @@ -36,8 +36,6 @@ /** * The BatchMatrixBandPart operation - * - * @param data type for {@code band} output */ @OpMetadata( opType = BatchMatrixBandPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java index c50a706e073..7f1bd32a749 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDeterminant.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDeterminant operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixDeterminant.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java index bba3cae6292..edc731b1f36 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiag.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDiag operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java index 63e7e0e3026..ac379b960aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixDiagPart.java @@ -35,8 +35,6 @@ /** * The BatchMatrixDiagPart operation - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = BatchMatrixDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java index 081dab67e8b..5ecaf005f3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixInverse.java @@ -35,8 +35,6 @@ /** * The BatchMatrixInverse operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixInverse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java index 67a97a485c0..eaea0c7db31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSetDiag.java @@ -35,8 +35,6 @@ /** * The BatchMatrixSetDiag operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSetDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java index dc65bb1dce1..5b6749c53e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolve.java @@ -35,8 +35,6 @@ /** * The BatchMatrixSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java index 801c5262946..7cb6714696f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixSolveLs.java @@ -36,8 +36,6 @@ /** * The BatchMatrixSolveLs operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixSolveLs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java index ae63e405dd7..d7b326bae21 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchMatrixTriangularSolve.java @@ -35,8 +35,6 @@ /** * The BatchMatrixTriangularSolve operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatrixTriangularSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java index 1d6588ac785..637625bd5db 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSelfAdjointEig.java @@ -35,8 +35,6 @@ /** * The BatchSelfAdjointEigV2 operation - * - * @param data type for {@code e} output */ @OpMetadata( opType = BatchSelfAdjointEig.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java index cf723ceeedc..a2411601e63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/BatchSvd.java @@ -35,8 +35,6 @@ /** * The BatchSvd operation - * - * @param data type for {@code s} output */ @OpMetadata( opType = BatchSvd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java index 294a41889da..ef6d0ca1a3d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cholesky.java @@ -45,8 +45,6 @@ *

    Note: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Cholesky.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java index f2529b61318..ce7975bbb29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/CholeskyGrad.java @@ -37,8 +37,6 @@ * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CholeskyGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java index e14f2e71ef9..561e4fecbf1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/ConjugateTranspose.java @@ -39,8 +39,6 @@ * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])} - * - * @param data type for {@code y} output */ @OpMetadata( opType = ConjugateTranspose.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java index 68ee2a65439..5c942c1e41b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Cross.java @@ -38,8 +38,6 @@ * {@code a} and {@code b} must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. - * - * @param data type for {@code product} output */ @OpMetadata( opType = Cross.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java index 62aafcde736..d63118c9f73 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Det.java @@ -38,8 +38,6 @@ * The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants * for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Det.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java index 783950dfde9..3276bbb78fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Eig.java @@ -46,8 +46,6 @@ * e, v = eig(a) * e = eig(a, compute_v=False) * - * - * @param data type for {@code e} output */ @OpMetadata( opType = Eig.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java index 51d3eeb3fa6..5b57bad8aa4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Einsum.java @@ -99,8 +99,6 @@ * supported by {@code numpy.einsum}. *
    {@literal @}end_compatibility * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Einsum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java index ab6f58f4885..f544381e1a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/EuclideanNorm.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = EuclideanNorm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java index 6b02bc2a059..93338f1df07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Inv.java @@ -42,8 +42,6 @@ *

    If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Inv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java index 298e01306db..a144ac2d31c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/LogMatrixDeterminant.java @@ -43,8 +43,6 @@ * The {@code log_abs_determinant} is computed as {@code det(P)*sum(log(diag(LU)))} where {@code LU} * is the {@code LU} decomposition of the input and {@code P} is the corresponding * permutation matrix. - * - * @param data type for {@code sign} output */ @OpMetadata( opType = LogMatrixDeterminant.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java index 480ed23e696..9063fab1875 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Lu.java @@ -51,10 +51,6 @@ *

    P represents a permutation matrix encoded as a list of indices each between {@code 0} * and {@code M-1}, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * - * @param data type for {@code lu} output - * - * @param data type for {@code p} output */ @OpMetadata( opType = Lu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java index a592a65396a..c817cbc9037 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatMul.java @@ -41,8 +41,6 @@ * true). *

    Note: The default kernel implementation for MatMul on GPUs uses * cublas. - * - * @param data type for {@code product} output */ @OpMetadata( opType = MatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java index 0a292c9d1b1..5241708f71a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiag.java @@ -116,8 +116,6 @@ * [1, 9], * [9, 2]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java index 084c946193e..a818b134cbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPart.java @@ -96,8 +96,6 @@ * [3, 4, 9], * [4, 3, 8]]] * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = MatrixDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java index d4794ab7571..c6ecab46bab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagPartV3.java @@ -126,8 +126,6 @@ * [4, 3, 8]]] * * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = MatrixDiagPartV3.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java index 943b92e2c95..67b5b3b74b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixDiagV3.java @@ -144,8 +144,6 @@ * [9, 2]] * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixDiagV3.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java index 961f57037f4..9332cd02b3e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixExponential.java @@ -35,8 +35,6 @@ /** * Deprecated, use python implementation tf.linalg.matrix_exponential. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixExponential.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java index b3876d3a572..f1529a1c264 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixLogarithm.java @@ -46,8 +46,6 @@ *

    The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the exponential for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixLogarithm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java index 0ae2c206569..1ec3a1444f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSetDiag.java @@ -132,8 +132,6 @@ * [7, 4, 2, 4]]] * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixSetDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java index 3b340034827..d0601c6ee57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/MatrixSolveLs.java @@ -66,8 +66,6 @@ * least-squares solution, even when \(A\) is rank deficient. This path is * typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then * {@code l2_regularizer} is ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MatrixSolveLs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java index 9e73edaf6b8..037f024d04b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Qr.java @@ -47,8 +47,6 @@ * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) * - * - * @param data type for {@code q} output */ @OpMetadata( opType = Qr.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java index 93ca4112092..d3136668a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMul.java @@ -41,8 +41,6 @@ * {@code a} (after being transposed if {@code transpose_a} is non-zero) must match the * outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java index 4ff470d2594..0cc43361bf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBias.java @@ -43,8 +43,6 @@ * match the outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). Then do broadcast add operation with bias values on the matrix * multiplication result. The bias size must match inner dimension of {@code b}. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBias.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java index ad1182c50de..eee116597b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndRelu.java @@ -44,8 +44,6 @@ * non-zero). Then do broadcast add operation with bias values on the matrix * multiplication result. The bias size must match inner dimension of {@code b}. Then do * relu activation to get non-negative result. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java index 91eefc72f1b..82bdde439f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/QuantizedMatMulWithBiasAndReluAndRequantize.java @@ -45,8 +45,6 @@ * multiplication result. The bias size must match inner dimension of {@code b}. Then do * relu activation to get non-negative result. Then do requantize operation to get * final uint8 result. - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java index 2d64ddb4dda..75c06a99f2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/SelfAdjointEig.java @@ -45,8 +45,6 @@ * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) * - * - * @param data type for {@code e} output */ @OpMetadata( opType = SelfAdjointEig.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java index a0f41eda3f5..d1057183227 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Solve.java @@ -41,8 +41,6 @@ * satisfies {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}. * If {@code adjoint} is {@code True} then each output matrix satisfies * {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Solve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java index 224688c8e1d..cf48c52605a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Sqrtm.java @@ -47,8 +47,6 @@ *

    The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices {@code [..., :, :]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Sqrtm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java index b17b01cf88e..b11eafdccfc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Svd.java @@ -45,8 +45,6 @@ * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) * - * - * @param data type for {@code s} output */ @OpMetadata( opType = Svd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java index 6292194a118..69ee9258392 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiag.java @@ -48,8 +48,6 @@ * [0, 0, 3, 0] * [0, 0, 0, 4]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = TensorDiag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java index ae21a73b071..838a036f84b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TensorDiagPart.java @@ -49,8 +49,6 @@ * * tf.diag_part(input) ==> [1, 2, 3, 4] * - * - * @param data type for {@code diagonal} output */ @OpMetadata( opType = TensorDiagPart.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java index 65f22dfe32b..712576c0989 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/Transpose.java @@ -38,8 +38,6 @@ * Shuffle dimensions of x according to a permutation. * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} - * - * @param data type for {@code y} output */ @OpMetadata( opType = Transpose.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java index 891f4e1f608..026fbfb70bf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TriangularSolve.java @@ -77,8 +77,6 @@ * # [4. ], * # [1.9999999]], dtype=float32)> * - * - * @param data type for {@code output} output */ @OpMetadata( opType = TriangularSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java index bd69ed483e4..a6122dabc83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalMatMul.java @@ -36,8 +36,6 @@ /** * Calculate product with tridiagonal matrix. * Calculates product of two matrices, where left matrix is a tridiagonal matrix. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TridiagonalMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java index 57c0864ef7d..6b0a890d12e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/TridiagonalSolve.java @@ -42,8 +42,6 @@ * pivoting, depending on {@code partial_pivoting} attribute. On GPU, Nvidia's cuSPARSE * library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv * Partial pivoting is not yet supported by XLA backends. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TridiagonalSolve.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java index 27d77557bfb..7fd47c7c6f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixComponents.java @@ -39,8 +39,6 @@ * Reads out the CSR components at batch {@code index}. * This op is meant only for debugging / testing, and its interface is not expected * to be stable. - * - * @param data type for {@code values} output */ @OpMetadata( opType = CSRSparseMatrixComponents.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java index 51bed06f6ba..97fb87d7250 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToDense.java @@ -36,8 +36,6 @@ /** * Convert a (possibly batched) CSRSparseMatrix to dense. - * - * @param data type for {@code dense_output} output */ @OpMetadata( opType = CSRSparseMatrixToDense.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java index 5c111887894..ad365783cea 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/CSRSparseMatrixToSparseTensor.java @@ -37,8 +37,6 @@ /** * Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. - * - * @param data type for {@code values} output */ @OpMetadata( opType = CSRSparseMatrixToSparseTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java index 2de2e93ec3b..5d9ed9bbbf2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/linalg/sparse/SparseMatrixMatMul.java @@ -56,8 +56,6 @@ * C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . * conjugate(transpose(A)) * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseMatrixMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java index ef53c5f5693..0f4ee840704 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Abs.java @@ -38,8 +38,6 @@ * Given a tensor {@code x}, this operation returns a tensor containing the absolute * value of each element in {@code x}. For example, if x is an input element and y is * an output element, this operation computes \(y = |x|\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Abs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java index 61d1df63943..3a0e466e8cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AccumulateN.java @@ -43,8 +43,6 @@ * storage is proportional to the output size rather than the inputs size. *

    Unlike the original {@code accumulate_n}, {@code accumulate_n_v2} is differentiable. *

    Returns a {@code Tensor} of same shape and type as the elements of {@code inputs}. - * - * @param data type for {@code sum} output */ @OpMetadata( opType = AccumulateN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java index 078326e1891..915e5b98b63 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acos.java @@ -37,8 +37,6 @@ * Computes acos of x element-wise. * Provided an input tensor, the {@code tf.math.acos} operation returns the inverse cosine of each element of the tensor. If {@code y = tf.math.cos(x)} then, {@code x = tf.math.acos(y)}. *

    Input range is {@code [-1, 1]} and the output has a range of {@code [0, pi]}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Acos.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java index 60edbd7880f..8ade37b1990 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Acosh.java @@ -41,8 +41,6 @@ * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Acosh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java index 4f32acd9ee1..61db4d2e4ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Add.java @@ -39,8 +39,6 @@ * here *

    Given two input tensors, the {@code tf.add} operation computes the sum for every element in the tensor. *

    Both input and output have a range {@code (-inf, inf)}. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Add.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java index 6cd47212eef..f2ef9209796 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/AddN.java @@ -41,8 +41,6 @@ * x = [9, 7, 10] * tf.math.add_n(x) ==> 26 * - * - * @param data type for {@code sum} output */ @OpMetadata( opType = AddN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java index a9c7814636f..6ad1ff84bba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Angle.java @@ -51,8 +51,6 @@ *

    {@literal @}compatibility(numpy)
    * Equivalent to np.angle. *
    {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = Angle.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java index 5a7b5adec69..c222f3d54d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMax.java @@ -48,8 +48,6 @@ * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ArgMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java index ff138655b1f..41aa45a10ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ArgMin.java @@ -48,8 +48,6 @@ * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ArgMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java index 050107db969..810aeb5fa3b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asin.java @@ -47,8 +47,6 @@ * * tf.math.asin(y) # [1.047, 0.785] = x * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Asin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java index d4170db292a..918518f2b82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Asinh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Asinh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java index aab73783c10..8979ab75d9e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan.java @@ -47,8 +47,6 @@ * * tf.math.atan(y) # [1.047, 0.785] = x * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Atan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java index dfff4a48676..2d566d3cc22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atan2.java @@ -51,8 +51,6 @@ * * * - * - * @param data type for {@code z} output */ @OpMetadata( opType = Atan2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java index ea5729193bf..c4dd0f1ead2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Atanh.java @@ -44,8 +44,6 @@ * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Atanh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java index d3782706f20..945d2107a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0.java @@ -35,8 +35,6 @@ /** * The BesselI0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI0.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java index eec8b3281a3..7e27d3e4263 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI0e.java @@ -35,8 +35,6 @@ /** * The BesselI0e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI0e.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java index bb59dc19f5c..28304567e86 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1.java @@ -35,8 +35,6 @@ /** * The BesselI1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java index fe929e32eb1..df3b3f937e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/BesselI1e.java @@ -35,8 +35,6 @@ /** * The BesselI1e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselI1e.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java index f7b9904c100..1a895c89f00 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Betainc.java @@ -41,8 +41,6 @@ *

    \(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\) *

    is the incomplete beta function and \(B(a, b)\) is the complete * beta function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Betainc.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java index 6e78f0799fc..463dc277eae 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Bincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code bins} output */ @OpMetadata( opType = Bincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java index 3db46461d7c..1a69b94a8e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ceil.java @@ -35,8 +35,6 @@ /** * Returns element-wise smallest integer not less than x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Ceil.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java index 798b2a9cb1a..9461d599888 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ComplexAbs.java @@ -52,8 +52,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = ComplexAbs.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java index 266da810658..d46b7f2ae5b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Conj.java @@ -45,8 +45,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conj.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java index 0ab0152ff02..b6b5b9595c5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cos.java @@ -43,8 +43,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cos.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java index 76a98abe533..391d2efd7ab 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cosh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Cosh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java index 3e901959c5d..90bdcdc0038 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumprod.java @@ -56,8 +56,6 @@ *

      * tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
      * 
    - * - * @param data type for {@code out} output */ @OpMetadata( opType = Cumprod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java index 12b3346db25..ff8dca235c9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Cumsum.java @@ -56,8 +56,6 @@ *
      * tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
      * 
    - * - * @param data type for {@code out} output */ @OpMetadata( opType = Cumsum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java index 52595f56eea..f7367703a41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/CumulativeLogsumexp.java @@ -51,8 +51,6 @@ * floating point type is used instead. *

    By setting the {@code reverse} kwarg to {@code True}, the cumulative log-sum-exp is performed in the * opposite direction. - * - * @param data type for {@code out} output */ @OpMetadata( opType = CumulativeLogsumexp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java index ff9a38ba24d..808be372c5f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DenseBincount.java @@ -41,8 +41,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DenseBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java index 37117f4e1b8..3a48d548bd4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Digamma.java @@ -36,8 +36,6 @@ /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of * {@code Gamma(x)}), element-wise. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Digamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java index 62a15f37da7..8ad37113d3f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Div.java @@ -37,8 +37,6 @@ * Returns x / y element-wise. * NOTE: {@code math.Div} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Div.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java index bb098cfdf14..43047bad3c6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/DivNoNan.java @@ -37,8 +37,6 @@ * Returns 0 if the denominator is zero. * NOTE: {@code math.DivNoNan} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = DivNoNan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java index 1e2046e2892..ef607d7778b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erf.java @@ -35,8 +35,6 @@ /** * Computes the Gauss error function of {@code x} element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Erf.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java index b8d11327b94..25fdbcd648c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Erfc.java @@ -35,8 +35,6 @@ /** * Computes the complementary error function of {@code x} element-wise. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Erfc.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java index 1a5c7456b51..fe1d6ed1515 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Exp.java @@ -56,8 +56,6 @@ * x = tf.constant(1 + 1j) * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Exp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java index a6f8f64ab43..b9c80edf84b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Expm1.java @@ -47,8 +47,6 @@ * x = tf.constant(1 + 1j) * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Expm1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java index bb9dbc4aa32..27ed6af66ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Floor.java @@ -35,8 +35,6 @@ /** * Returns element-wise largest integer not greater than x. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Floor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java index 47887e1a4dd..61d57ac8c4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorDiv.java @@ -37,8 +37,6 @@ * Returns x // y element-wise. * NOTE: {@code math.FloorDiv} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = FloorDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java index 58c90f87123..b41e5d112b2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/FloorMod.java @@ -40,8 +40,6 @@ * {@code floor(x / y) * y + floormod(x, y) = x}, regardless of the signs of x and y. *

    NOTE: {@code math.FloorMod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = FloorMod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java index 4f116ba6e63..224c434af9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igamma.java @@ -42,8 +42,6 @@ *

    is the lower incomplete Gamma function. *

    Note, above {@code Q(a, x)} ({@code Igammac}) is the upper regularized complete * Gamma function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Igamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java index f9e7aced432..a3c6c4f20ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/IgammaGradA.java @@ -35,8 +35,6 @@ /** * Computes the gradient of {@code igamma(a, x)} wrt {@code a}. - * - * @param data type for {@code z} output */ @OpMetadata( opType = IgammaGradA.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java index 1cc0549ad00..80f2545ce69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Igammac.java @@ -42,8 +42,6 @@ *

    is the upper incomplete Gamma function. *

    Note, above {@code P(a, x)} ({@code Igamma}) is the lower regularized complete * Gamma function. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Igammac.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java index fe04cd17336..509de2b8c7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Imag.java @@ -47,8 +47,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Imag.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java index 3035d46e60c..a466109898c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/InvertPermutation.java @@ -46,8 +46,6 @@ * # tensor `x` is [3, 4, 0, 2, 1] * invert_permutation(x) ==> [2, 4, 3, 0, 1] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = InvertPermutation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java index d8c6b4889a2..4c5aea1de84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Lgamma.java @@ -42,8 +42,6 @@ * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Lgamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java index 32ee589536a..911ab61ff0c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log.java @@ -41,8 +41,6 @@ * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Log.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java index f280d8b0062..05fe31ad376 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Log1p.java @@ -41,8 +41,6 @@ * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Log1p.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java index c46c8c6e384..0c864b79f5e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Maximum.java @@ -37,8 +37,6 @@ * Returns the max of x and y (i.e. x > y ? x : y) element-wise. * NOTE: {@code math.Maximum} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Maximum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java index 94de9fc5bd4..9018aa2bd6d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mean.java @@ -40,8 +40,6 @@ * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Mean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java index 588bcb3328b..b516ee5c302 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Minimum.java @@ -37,8 +37,6 @@ * Returns the min of x and y (i.e. x < y ? x : y) element-wise. * NOTE: {@code math.Minimum} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Minimum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java index d318de97c9c..60ccc32e855 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mod.java @@ -39,8 +39,6 @@ * {@code tf.truncatediv(x, y) * y + truncate_mod(x, y) = x}. *

    NOTE: {@code math.Mod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Mod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java index d7466085ada..d18a48a6472 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Mul.java @@ -37,8 +37,6 @@ * Returns x * y element-wise. * NOTE: {@code math.Mul} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Mul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java index 85429b70ca1..7e85f94c31d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/MulNoNan.java @@ -37,8 +37,6 @@ * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. * NOTE: {@code math.MulNoNan} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = MulNoNan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java index 37d1ffb8fc9..2c9b4f4719f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Ndtri.java @@ -35,8 +35,6 @@ /** * The Ndtri operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Ndtri.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java index e0ec5783144..e11b274470a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Neg.java @@ -36,8 +36,6 @@ /** * Computes numerical negative value element-wise. * I.e., \(y = -x\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Neg.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java index 45ff3a179ca..fef32810db3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/NextAfter.java @@ -40,8 +40,6 @@ *

    {@literal @}compatibility(cpp)
    * Equivalent to C++ std::nextafter function. *
    {@literal @}end_compatibility - * - * @param data type for {@code output} output */ @OpMetadata( opType = NextAfter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java index b2fb442489b..f391fef2335 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Polygamma.java @@ -39,8 +39,6 @@ *

    \(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\) *

    where \(\psi(x)\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Polygamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java index f50532e8d62..3a8f8acbb7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Pow.java @@ -42,8 +42,6 @@ * # tensor 'y' is [[8, 16], [2, 3]] * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * - * - * @param data type for {@code z} output */ @OpMetadata( opType = Pow.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java index ad59711dca9..cf02c4ad713 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedAdd.java @@ -37,8 +37,6 @@ /** * Returns x + y element-wise, working on quantized buffers. - * - * @param data type for {@code z} output */ @OpMetadata( opType = QuantizedAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java index 6b5c3d05579..b9f1e5b062c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/QuantizedMul.java @@ -37,8 +37,6 @@ /** * Returns x * y element-wise, working on quantized buffers. - * - * @param data type for {@code z} output */ @OpMetadata( opType = QuantizedMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java index 6217269b474..c85e0d73861 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Real.java @@ -47,8 +47,6 @@ * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Real.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java index c1aceba76d3..fb2e7e77d33 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RealDiv.java @@ -38,8 +38,6 @@ * If {@code x} and {@code y} are reals, this will return the floating-point division. *

    NOTE: {@code Div} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = RealDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java index 97ae15f6015..c0e6b9c573a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Reciprocal.java @@ -36,8 +36,6 @@ /** * Computes the reciprocal of x element-wise. * I.e., \(y = 1 / x\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Reciprocal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java index 13b7b7592ab..9d1c672629f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/ReciprocalGrad.java @@ -37,8 +37,6 @@ * Computes the gradient for the inverse of {@code x} wrt its input. * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = ReciprocalGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java index c2a71d1d594..f6dcf220ade 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RequantizePerChannel.java @@ -37,8 +37,6 @@ /** * Requantizes input with min and max values known per channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RequantizePerChannel.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java index 716bc8be07b..62a48d4ecd0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rint.java @@ -43,8 +43,6 @@ * rint(0.5000001) ==> 1.0 * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Rint.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java index d8a5aff3d2d..0e7441efeb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Round.java @@ -37,8 +37,6 @@ * Rounds the values of a tensor to the nearest integer, element-wise. * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Round.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java index 12ce75ef035..3d438f10f12 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Rsqrt.java @@ -36,8 +36,6 @@ /** * Computes reciprocal of square root of x element-wise. * I.e., \(y = 1 / \sqrt{x}\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Rsqrt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java index f92da40a82b..90fc4892083 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/RsqrtGrad.java @@ -37,8 +37,6 @@ * Computes the gradient for the rsqrt of {@code x} wrt its input. * Specifically, {@code grad = dy * -0.5 * y^3}, where {@code y = rsqrt(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = RsqrtGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java index 1939c7a4d3a..44ec468eaf4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMax.java @@ -73,8 +73,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java index 7d0e2af1606..2e69b2bb8b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMean.java @@ -64,8 +64,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java index cb5a312d3ff..9dce52fceed 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentMin.java @@ -73,8 +73,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java index 87738a1ac3a..77fd53d92a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentProd.java @@ -66,8 +66,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java index 578d159e289..c47c3acd24f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java @@ -44,9 +44,6 @@ * that {@code segment_ids[j] == i}. *

    If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. *

    Note that this op is currently only supported with jit_compile=True. - * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java index bd93a0303eb..8e71006a2c0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sigmoid.java @@ -36,8 +36,6 @@ /** * Computes sigmoid of {@code x} element-wise. * Specifically, {@code y = 1 / (1 + exp(-x))}. - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sigmoid.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java index 8f4b7cfe45c..a85b754cc61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SigmoidGrad.java @@ -37,8 +37,6 @@ * Computes the gradient of the sigmoid of {@code x} wrt its input. * Specifically, {@code grad = dy * y * (1 - y)}, where {@code y = sigmoid(x)}, and * {@code dy} is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = SigmoidGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java index 15f5e07b597..ee9d2d65154 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sign.java @@ -46,8 +46,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java index 06269cb6278..1a13ada1838 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sin.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java index 9e1a692df76..b4af201ab99 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sinh.java @@ -42,8 +42,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sinh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java index 95f33401f0b..5989ca78f57 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SobolSample.java @@ -40,8 +40,6 @@ * Generates points from the Sobol sequence. * Creates a Sobol sequence with {@code num_results} samples. Each sample has dimension * {@code dim}. Skips the first {@code skip} samples. - * - * @param data type for {@code samples} output */ @OpMetadata( opType = SobolSample.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java index aa80f8d0840..cdb0aea4f9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Softplus.java @@ -35,8 +35,6 @@ /** * The Softplus operation - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Softplus.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java index 5a8445dad45..3f2901810ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SoftplusGrad.java @@ -35,8 +35,6 @@ /** * Computes softplus gradients for a softplus operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SoftplusGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java index ac6cd68b529..8c6edfc6e89 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sqrt.java @@ -36,8 +36,6 @@ /** * Computes square root of x element-wise. * I.e., \(y = \sqrt{x} = x^{1/2}\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Sqrt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java index 451143c16e4..eed0209152b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SqrtGrad.java @@ -37,8 +37,6 @@ * Computes the gradient for the sqrt of {@code x} wrt its input. * Specifically, {@code grad = dy * 0.5 / y}, where {@code y = sqrt(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = SqrtGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java index d5811d17c2a..2952af307d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Square.java @@ -36,8 +36,6 @@ /** * Computes square of x element-wise. * I.e., \(y = x * x = x^2\). - * - * @param data type for {@code y} output */ @OpMetadata( opType = Square.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java index 2af6fe083e3..4d880a79baa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SquaredDifference.java @@ -37,8 +37,6 @@ * Returns conj(x - y)(x - y) element-wise. * NOTE: {@code math.SquaredDifference} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = SquaredDifference.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java index 6313555f9f1..b48b311d80e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Sub.java @@ -37,8 +37,6 @@ * Returns x - y element-wise. * NOTE: {@code math.Sub} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = Sub.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java index 566b7d2b03f..c1073f8a5bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tan.java @@ -43,8 +43,6 @@ * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Tan.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java index ee24b4085df..706a8d90cd0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java @@ -49,8 +49,6 @@ * * * - * - * @param data type for {@code y} output */ @OpMetadata( opType = Tanh.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java index c638f78b3fe..273adcf20a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TanhGrad.java @@ -37,8 +37,6 @@ * Computes the gradient for the tanh of {@code x} wrt its input. * Specifically, {@code grad = dy * (1 - y*y)}, where {@code y = tanh(x)}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = TanhGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java index 377eb5848d8..7857bd6221b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateDiv.java @@ -41,8 +41,6 @@ * Python Semantics. *

    NOTE: {@code math.TruncateDiv} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = TruncateDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java index e80c75e5709..bd7a41fafd2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/TruncateMod.java @@ -38,8 +38,6 @@ * the result here is consistent with a truncating divide. E.g. {@code truncate(x / y) * y + truncate_mod(x, y) = x}. *

    NOTE: {@code math.TruncateMod} supports broadcasting. More about broadcasting * here - * - * @param data type for {@code z} output */ @OpMetadata( opType = TruncateMod.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java index 535d432dcca..312c712b44e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UniformQuantizedAdd.java @@ -52,8 +52,6 @@ * i.e. For both operands {@code lhs} and {@code rhs}, * if {@code operand.quantization_axis} >= 0 and {@code output.quantization_axis} >= 0, * {@code operand.dims} - {@code operand.quantization_axis} must be equal to {@code output.dims} - {@code output.quantization_axis}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java index 50d32494e80..27888d7f1f5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java @@ -67,8 +67,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java index db83daaead7..af919665a56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java @@ -64,8 +64,6 @@ * result in safe but unspecified behavior, which may include ignoring * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentMin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java index a36c653ef2a..fd3f76bc1e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java @@ -64,8 +64,6 @@ * result in safe but unspecified behavior, which may include ignoring * out-of-bound indices or outputting a tensor with a 0 stored in the first * dimension of its shape if {@code num_segments} is 0. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentProd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java index 14c0bef2293..af4dd57e39f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java @@ -67,8 +67,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UnsortedSegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java index 8be3546a9f0..0ba35ba8a83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xdivy.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x / y otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xdivy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java index b798c8ef598..c6e6184bed0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlog1py.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xlog1py.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java index b4ad543093f..e27ef9a210c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Xlogy.java @@ -35,8 +35,6 @@ /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. - * - * @param data type for {@code z} output */ @OpMetadata( opType = Xlogy.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java index 887fb1af711..593507c4340 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Zeta.java @@ -37,8 +37,6 @@ * Compute the Hurwitz zeta function \(\zeta(x, q)\). * The Hurwitz zeta function is defined as: *

    \(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) - * - * @param data type for {@code z} output */ @OpMetadata( opType = Zeta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java index a4b68423646..a208c49973f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/erfinv.java @@ -35,8 +35,6 @@ /** * The Erfinv operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = erfinv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java index 6ef1d289c7d..839ca6179b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ0.java @@ -35,8 +35,6 @@ /** * The BesselJ0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselJ0.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java index 5e7718f4144..6e125a29821 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselJ1.java @@ -35,8 +35,6 @@ /** * The BesselJ1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselJ1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java index 338b5759a10..8ec9f528212 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0.java @@ -35,8 +35,6 @@ /** * The BesselK0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK0.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java index f2a01b68ba8..69d5995c59d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK0e.java @@ -35,8 +35,6 @@ /** * The BesselK0e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK0e.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java index 8143c8107d5..f26b95a8c53 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1.java @@ -35,8 +35,6 @@ /** * The BesselK1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java index 08ea2073dab..995eaccd9dd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselK1e.java @@ -35,8 +35,6 @@ /** * The BesselK1e operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselK1e.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java index c82e15022db..1beae63d61f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY0.java @@ -35,8 +35,6 @@ /** * The BesselY0 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselY0.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java index 5b86f1987e3..3985dee42d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/BesselY1.java @@ -35,8 +35,6 @@ /** * The BesselY1 operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = BesselY1.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java index 045ffc0d94c..e34e0376249 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Dawsn.java @@ -35,8 +35,6 @@ /** * The Dawsn operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Dawsn.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java index bcdff92cb07..9b61e0fcb90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Expint.java @@ -35,8 +35,6 @@ /** * The Expint operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Expint.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java index 790daad9115..dffb6bda0f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelCos.java @@ -35,8 +35,6 @@ /** * The FresnelCos operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = FresnelCos.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java index a148cb42bff..23e7e1d4bbd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/FresnelSin.java @@ -35,8 +35,6 @@ /** * The FresnelSin operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = FresnelSin.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java index 7835a2fca79..0a012a3be6c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/special/Spence.java @@ -35,8 +35,6 @@ /** * The Spence operation - * - * @param data type for {@code y} output */ @OpMetadata( opType = Spence.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java index aa583ae8174..3d6355679c8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool.java @@ -38,8 +38,6 @@ * Performs average pooling on the input. * Each entry in {@code output} is the mean of the corresponding size {@code ksize} * window in {@code value}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java index b7b61a50351..5f5410d91d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3d.java @@ -38,8 +38,6 @@ * Performs 3D average pooling on the input. * Each entry in {@code output} is the mean of the corresponding size {@code ksize} window in * {@code value}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java index 6acc17b69ae..4b41a0338b3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPool3dGrad.java @@ -37,8 +37,6 @@ /** * Computes gradients of average pooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPool3dGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java index 74acc456c92..9a2c1511bba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/AvgPoolGrad.java @@ -37,8 +37,6 @@ /** * Computes gradients of the average pooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = AvgPoolGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java index deaec7bdd3d..ef7ead8115e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalization.java @@ -36,8 +36,6 @@ /** * Batch normalization. * This op is deprecated. Prefer {@code tf.nn.batch_normalization}. - * - * @param data type for {@code result} output */ @OpMetadata( opType = BatchNormWithGlobalNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java index f75aebb0e4c..03e84d778c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BatchNormWithGlobalNormalizationGrad.java @@ -36,8 +36,6 @@ /** * Gradients for batch normalization. * This op is deprecated. See {@code tf.nn.batch_normalization}. - * - * @param data type for {@code dx} output */ @OpMetadata( opType = BatchNormWithGlobalNormalizationGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java index c228699e9cb..5f826546b07 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAdd.java @@ -37,8 +37,6 @@ * Adds {@code bias} to {@code value}. * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. * Broadcasting is supported, so {@code value} may have any number of dimensions. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BiasAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java index 01c90a2fd49..33c2829c271 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BiasAddGrad.java @@ -38,8 +38,6 @@ * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. - * - * @param data type for {@code output} output */ @OpMetadata( opType = BiasAddGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java index 3363a371d20..ef303c35efc 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTM.java @@ -56,8 +56,6 @@ * this op uses IFCO. So in order for the following snippet to be equivalent * all gate-related outputs should be reordered. * - * - * @param data type for {@code i} output */ @OpMetadata( opType = BlockLSTM.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java index 2684ae60017..85bc08f38b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/BlockLSTMGrad.java @@ -37,8 +37,6 @@ /** * Computes the LSTM cell backward propagation for the entire time sequence. * This implementation is to be used in conjunction of BlockLSTMV2. - * - * @param data type for {@code x_grad} output */ @OpMetadata( opType = BlockLSTMGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java index 7e352a1ff76..096c8a3719f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv.java @@ -38,8 +38,6 @@ * Computes a N-D convolution given (N+1+batch_dims)-D {@code input} and (N+2)-D {@code filter} tensors. * General function for computing a N-D convolution. It is required that * {@code 1 <= N <= 3}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java index 9fef633fefd..6d7eb6e004e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2d.java @@ -56,8 +56,6 @@ * *

    Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java index 9d09ebaa1df..2d5af50d5e6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropFilter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java index 901d2a50f72..1b8a95c8728 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropFilterV2.java @@ -35,8 +35,6 @@ /** * Computes the gradients of convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropFilterV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java index 9e44c7170cb..fc0f5f296e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInput.java @@ -37,8 +37,6 @@ /** * Computes the gradients of convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java index 1fa123e14b2..04941640016 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv2dBackpropInputV2.java @@ -35,8 +35,6 @@ /** * Computes the gradients of convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv2dBackpropInputV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java index 5d3d0925894..7de4f93716d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3d.java @@ -40,8 +40,6 @@ * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. *

    Our Conv3D implements a form of cross-correlation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java index 2cc01b0dfe0..79970ac4d15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of 3-D convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3dBackpropFilter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java index 651f027ac42..d60306ab96d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Conv3dBackpropInput.java @@ -36,8 +36,6 @@ /** * Computes the gradients of 3-D convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Conv3dBackpropInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java index 59cde61eb54..f270607bb50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcBeamSearchDecoder.java @@ -43,8 +43,6 @@ * the first of these is emitted. That is, when the top path is "A B B B B", * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. - * - * @param data type for {@code log_probability} output */ @OpMetadata( opType = CtcBeamSearchDecoder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java index de01c874c33..688f60ab28e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcGreedyDecoder.java @@ -45,8 +45,6 @@ *

    Regardless of the value of merge_repeated, if the maximum index of a given * time and batch corresponds to the blank, index {@code (num_classes - 1)}, no new * element is emitted. - * - * @param data type for {@code log_probability} output */ @OpMetadata( opType = CtcGreedyDecoder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java index d2dd09549fa..8369dae6c75 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CtcLoss.java @@ -39,8 +39,6 @@ * Calculates the CTC Loss (log probability) for each batch entry. Also calculates * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = CtcLoss.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java index 0525df86f45..8845090aa6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNN.java @@ -73,8 +73,6 @@ * major. * reserve_space: An opaque tensor that can be used in backprop calculation. It * is only produced if is_training is true. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CudnnRNN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java index d76dd629918..a1e09f597ac 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNBackprop.java @@ -83,8 +83,6 @@ * shape as input_c. * params_backprop: The backprop to the params buffer in the forward pass. Has the * same shape as params. - * - * @param data type for {@code input_backprop} output */ @OpMetadata( opType = CudnnRNNBackprop.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java index a513cf67d66..0c38a68a23e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNCanonicalToParams.java @@ -65,8 +65,6 @@ * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * - * @param data type for {@code params} output */ @OpMetadata( opType = CudnnRNNCanonicalToParams.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java index 6a1e55f34e2..b85a3568412 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRNNParamsToCanonical.java @@ -65,8 +65,6 @@ * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * - * @param data type for {@code weights} output */ @OpMetadata( opType = CudnnRNNParamsToCanonical.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java index 051c792e878..1dbc4d48ad8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/CudnnRnnParamsSize.java @@ -57,8 +57,6 @@ * compatible across GPUs. Please use CudnnRNNParamsWeights and * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. - * - * @param data type for {@code params_size} output */ @OpMetadata( opType = CudnnRnnParamsSize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java index 3376ad9ed6e..6e83cd0c867 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatDimMap.java @@ -36,8 +36,6 @@ /** * Returns the dimension index in the destination data format given the one in * the source data format. - * - * @param data type for {@code y} output */ @OpMetadata( opType = DataFormatDimMap.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java index e02890a40ce..f719f7cc7ce 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DataFormatVecPermute.java @@ -64,8 +64,6 @@ *

      * [1, 2]
      * 
    - * - * @param data type for {@code y} output */ @OpMetadata( opType = DataFormatVecPermute.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java index cceb78d27d1..2f1880cda02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthToSpace.java @@ -109,8 +109,6 @@ * [ [11], [12], [15], [16]]]] * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthToSpace.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java index e3f7f02ac33..93a0b744513 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNative.java @@ -52,8 +52,6 @@ * *

    Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNative.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java index 6c55468131b..66eb190debf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropFilter.java @@ -37,8 +37,6 @@ /** * Computes the gradients of depthwise convolution with respect to the filter. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNativeBackpropFilter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java index 0f1a70bb566..287b29abba1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/DepthwiseConv2dNativeBackpropInput.java @@ -37,8 +37,6 @@ /** * Computes the gradients of depthwise convolution with respect to the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = DepthwiseConv2dNativeBackpropInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java index f213e685ab6..019c786873c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2d.java @@ -57,8 +57,6 @@ * kernel size and contains all zeros. *

    Note on duality: The dilation of {@code input} by the {@code filter} is equal to the * negation of the erosion of {@code -input} by the reflected {@code filter}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Dilation2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java index 93381ee22cf..cae841aee0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropFilter.java @@ -36,8 +36,6 @@ /** * Computes the gradient of morphological 2-D dilation with respect to the filter. - * - * @param data type for {@code filter_backprop} output */ @OpMetadata( opType = Dilation2dBackpropFilter.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java index 7747bc57c64..8204785ae02 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Dilation2dBackpropInput.java @@ -36,8 +36,6 @@ /** * Computes the gradient of morphological 2-D dilation with respect to the input. - * - * @param data type for {@code in_backprop} output */ @OpMetadata( opType = Dilation2dBackpropInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java index 6119dd0dec2..253baee2601 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Elu.java @@ -55,8 +55,6 @@ * *

    See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Elu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java index 2df99ce5c8f..4d32b6d365f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/EluGrad.java @@ -35,8 +35,6 @@ /** * Computes gradients for the exponential linear (Elu) operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = EluGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java index 04cfd0e3cd9..bb525aac295 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPool.java @@ -41,8 +41,6 @@ * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalAvgPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java index 71b1e624c55..eee42886ab1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalAvgPoolGrad.java @@ -41,8 +41,6 @@ * out_backprop to those indices that form the same pooling cell. Therefore, we * just need to know the shape of original input tensor, instead of the whole * tensor. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalAvgPoolGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java index d4c2cb5cf15..08bcbd1a63d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPool.java @@ -63,8 +63,6 @@ * *

    For more details on fractional max pooling, see this paper: * Benjamin Graham, Fractional Max-Pooling - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalMaxPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java index 432d6bbfdb7..d44e062ccf7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FractionalMaxPoolGrad.java @@ -36,8 +36,6 @@ /** * Computes gradient of the FractionalMaxPool function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FractionalMaxPoolGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java index 41e62263399..f5cede8855e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNorm.java @@ -37,10 +37,6 @@ * Batch normalization. * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * - * @param data type for {@code y} output - * - * @param data type for {@code batch_mean} output */ @OpMetadata( opType = FusedBatchNorm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java index efc751554d2..985249a19fe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedBatchNormGrad.java @@ -38,10 +38,6 @@ * Gradient for batch normalization. * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * - * @param data type for {@code x_backprop} output - * - * @param data type for {@code scale_backprop} output */ @OpMetadata( opType = FusedBatchNormGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java index 1a11cf9c722..336419f92ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedPadConv2d.java @@ -48,8 +48,6 @@ * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FusedPadConv2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java index 69b33a7ffee..8491feba1d7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/FusedResizeAndPadConv2d.java @@ -47,8 +47,6 @@ * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FusedResizeAndPadConv2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java index 413c9db45cf..0db7843bced 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCell.java @@ -73,8 +73,6 @@ * * h = (1-u) \circ c + u \circ h_prev * - * - * @param data type for {@code r} output */ @OpMetadata( opType = GRUBlockCell.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java index 108aa910427..7379a2790ba 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/GRUBlockCellGrad.java @@ -108,8 +108,6 @@ * * d_b_c = sum of d_c_bar along axis = 0 * - * - * @param data type for {@code d_x} output */ @OpMetadata( opType = GRUBlockCellGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java index 37f66b92878..5f178f53e50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/InvGrad.java @@ -37,8 +37,6 @@ * Computes the gradient for the inverse of {@code x} wrt its input. * Specifically, {@code grad = -dy * y*y}, where {@code y = 1/x}, and {@code dy} * is the corresponding input gradient. - * - * @param data type for {@code z} output */ @OpMetadata( opType = InvGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java index 8936770d8b7..ecd511253e8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/IsotonicRegression.java @@ -38,8 +38,6 @@ /** * Solves a batch of isotonic regression problems. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IsotonicRegression.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java index e3b179e440c..9cc952c05cb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/L2Loss.java @@ -39,8 +39,6 @@ *

      * output = sum(t ** 2) / 2
      * 
    - * - * @param data type for {@code output} output */ @OpMetadata( opType = L2Loss.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java index 12d4402e70f..5b1e38d3fbe 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCell.java @@ -57,8 +57,6 @@ * co = tanh(cs) * h = co .* o * - * - * @param data type for {@code i} output */ @OpMetadata( opType = LSTMBlockCell.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java index e22e2241718..931e4bf2381 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LSTMBlockCellGrad.java @@ -36,8 +36,6 @@ /** * Computes the LSTM cell backward propagation for 1 timestep. * This implementation is to be used in conjunction of LSTMBlockCell. - * - * @param data type for {@code cs_prev_grad} output */ @OpMetadata( opType = LSTMBlockCellGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java index 022b81f82da..a0f088f9a03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LeakyRelu.java @@ -35,8 +35,6 @@ /** * Computes rectified linear: {@code max(features, features * alpha)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = LeakyRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java index f0bb2b5017b..17c1e5c0d04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalization.java @@ -46,8 +46,6 @@ * *

    For details, see Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS 2012) . - * - * @param data type for {@code output} output */ @OpMetadata( opType = LocalResponseNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java index 041837b7871..c0b795094aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LocalResponseNormalizationGrad.java @@ -35,8 +35,6 @@ /** * Gradients for Local Response Normalization. - * - * @param data type for {@code output} output */ @OpMetadata( opType = LocalResponseNormalizationGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java index 1f9ee440140..1e19b56c19f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/LogSoftmax.java @@ -39,8 +39,6 @@ *

      * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
      * 
    - * - * @param data type for {@code logsoftmax} output */ @OpMetadata( opType = LogSoftmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java index 427b3c92bb2..75b432b8ba3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool.java @@ -36,8 +36,6 @@ /** * Performs max pooling on the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java index d9cace3d967..d701189d5e1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3d.java @@ -36,8 +36,6 @@ /** * Performs 3D max pooling on the input. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java index 6ac95b8a978..932399be80b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGrad.java @@ -36,8 +36,6 @@ /** * Computes gradients of 3D max pooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3dGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java index 5efa05dec89..74dbc598b35 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPool3dGradGrad.java @@ -36,8 +36,6 @@ /** * Computes second-order gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPool3dGradGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java index 214b0b0d31c..a329757270c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGrad.java @@ -36,8 +36,6 @@ /** * Computes gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPoolGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java index a33ba6642b8..0b0f0f616b7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGrad.java @@ -36,8 +36,6 @@ /** * Computes second-order gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java index 35f1ffeb6dd..9dedc6014b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradGradWithArgmax.java @@ -36,8 +36,6 @@ /** * Computes second-order gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradGradWithArgmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java index 0edd2ca5adc..60d7e7de94c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolGradWithArgmax.java @@ -36,8 +36,6 @@ /** * Computes gradients of the maxpooling function. - * - * @param data type for {@code output} output */ @OpMetadata( opType = MaxPoolGradWithArgmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java index bcfba861e1e..bd19af1b703 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/MaxPoolWithArgmax.java @@ -46,10 +46,6 @@ * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * - * @param data type for {@code output} output - * - * @param data type for {@code argmax} output */ @OpMetadata( opType = MaxPoolWithArgmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java index 383dbfc3b22..57754316380 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/NthElement.java @@ -43,8 +43,6 @@ *
      * values.shape = input.shape[:-1]
      * 
    - * - * @param data type for {@code values} output */ @OpMetadata( opType = NthElement.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java index 2e27d649947..8987fcd7d55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedAvgPool.java @@ -37,8 +37,6 @@ /** * Produces the average pool of the input tensor for quantized types. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedAvgPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java index 0b9e3b27b55..7f22995509c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBatchNormWithGlobalNormalization.java @@ -39,8 +39,6 @@ * Quantized Batch normalization. * This op is deprecated and will be removed in the future. Prefer * {@code tf.nn.batch_normalization}. - * - * @param data type for {@code result} output */ @OpMetadata( opType = QuantizedBatchNormWithGlobalNormalization.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java index c95300fa493..744eb1397eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedBiasAdd.java @@ -38,8 +38,6 @@ /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedBiasAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java index 4594e0401cc..9226b7b697e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRelu.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java index 0104cbf9908..f02eba09012 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndReluAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java index 5fe5999adab..66344508160 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java index 134449aba91..bfd108c34d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DPerChannel.java @@ -38,8 +38,6 @@ /** * Computes QuantizedConv2D per channel. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DPerChannel.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java index 27f5343c6ff..fe5566ac7e9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBias.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBias operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBias.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java index 61c9bb31b45..ff7d157a846 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRelu.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java index 081b8ac3863..b68080cc72c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndReluAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java index 21f4eef7826..5301017e666 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java index afdd8b87219..687e41485d4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasSignedSumAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java index d92782f88bb..34ceb6e7898 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndRelu.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasSumAndRelu operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSumAndRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java index 0d9c4fab0f6..021873d6885 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2DWithBiasSumAndReluAndRequantize.java @@ -38,8 +38,6 @@ /** * The QuantizedConv2DWithBiasSumAndReluAndRequantize operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2DWithBiasSumAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java index 88482fc869f..77d21ba9794 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedConv2d.java @@ -42,8 +42,6 @@ * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConv2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java index 19c05799f1f..3281b31698b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2D.java @@ -38,8 +38,6 @@ /** * Computes quantized depthwise Conv2D. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2D.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java index 9414fd9e015..70314ace0b0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBias.java @@ -38,8 +38,6 @@ /** * Computes quantized depthwise Conv2D with Bias. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBias.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java index c8d6a30445b..76b0917f709 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndRelu.java @@ -38,8 +38,6 @@ /** * Computes quantized depthwise Conv2D with Bias and Relu. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBiasAndRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java index b23311716d2..55dfdecdb39 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.java @@ -38,8 +38,6 @@ /** * Computes quantized depthwise Conv2D with Bias, Relu and Requantize. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java index 54bd27c1705..48aedde6806 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedInstanceNorm.java @@ -36,8 +36,6 @@ /** * Quantized Instance normalization. - * - * @param data type for {@code y} output */ @OpMetadata( opType = QuantizedInstanceNorm.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java index b1323bb3b42..e57d4e945b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedMaxPool.java @@ -37,8 +37,6 @@ /** * Produces the max pool of the input tensor for quantized types. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedMaxPool.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java index b80e07346d9..ad55085ab6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear: {@code max(features, 0)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedRelu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java index d820e51188a..2b2f21a6b45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedRelu6.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear 6: {@code min(max(features, 0), 6)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedRelu6.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java index 577df61b8dd..41daae389b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/QuantizedReluX.java @@ -37,8 +37,6 @@ /** * Computes Quantized Rectified Linear X: {@code min(max(features, 0), max_value)} - * - * @param data type for {@code activations} output */ @OpMetadata( opType = QuantizedReluX.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java index 218fee4f3d2..126eb0c4c56 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu.java @@ -45,8 +45,6 @@ * * * - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Relu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java index 19de03d7f8e..5500229b21c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6.java @@ -35,8 +35,6 @@ /** * Computes rectified linear 6: {@code min(max(features, 0), 6)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Relu6.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java index 48ec9cb7037..9af8b816d87 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Relu6Grad.java @@ -35,8 +35,6 @@ /** * Computes rectified linear 6 gradients for a Relu6 operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = Relu6Grad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java index 5e7103853f3..b15132dd583 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/ReluGrad.java @@ -35,8 +35,6 @@ /** * Computes rectified linear gradients for a Relu operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = ReluGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java index d382a2f5a75..33d504105ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Selu.java @@ -40,8 +40,6 @@ * {@code initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')}. * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. *

    See Self-Normalizing Neural Networks - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Selu.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java index 7a2e0656275..bd2d2203f69 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SeluGrad.java @@ -35,8 +35,6 @@ /** * Computes gradients for the scaled exponential linear (Selu) operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SeluGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java index 36ef20f21fd..dd6b9ecb2b5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softmax.java @@ -39,8 +39,6 @@ *

      * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
      * 
    - * - * @param data type for {@code softmax} output */ @OpMetadata( opType = Softmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java index 9a17188c048..a7836f24051 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -36,8 +36,6 @@ /** * Computes softmax cross entropy cost and gradients to backpropagate. * Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = SoftmaxCrossEntropyWithLogits.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java index 1345a1ffd11..1144c4c21be 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/Softsign.java @@ -35,8 +35,6 @@ /** * Computes softsign: {@code features / (abs(features) + 1)}. - * - * @param data type for {@code activations} output */ @OpMetadata( opType = Softsign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java index b16c933ffe0..3ebe407b08e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SoftsignGrad.java @@ -35,8 +35,6 @@ /** * Computes softsign gradients for a softsign operation. - * - * @param data type for {@code backprops} output */ @OpMetadata( opType = SoftsignGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java index 050a12e7f98..e35f65ee574 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToBatch.java @@ -100,8 +100,6 @@ * *

    Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToBatch.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java index 18449c4627c..aaaddf55663 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SpaceToDepth.java @@ -103,8 +103,6 @@ * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SpaceToDepth.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 043587de9b5..1b7c99a694e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -40,8 +40,6 @@ * of features. This label is considered to have probability 1.0 for the * given row. *

    Inputs are the logits, not probabilities. - * - * @param data type for {@code loss} output */ @OpMetadata( opType = SparseSoftmaxCrossEntropyWithLogits.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java index b752c40666b..189f0434054 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/TopK.java @@ -46,10 +46,6 @@ * values.shape = indices.shape = input.shape[:-1] + [k] * *

    If two elements are equal, the lower-index element appears first. - * - * @param data type for {@code values} output - * - * @param data type for {@code indices} output */ @OpMetadata( opType = TopK.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java index 9b4715c3a21..124c2b062f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolution.java @@ -55,8 +55,6 @@ * *

    {@code output} is also quantized, using the same formula. * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedConvolution.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java index 02b51c0dfe4..8510272759e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/nn/UniformQuantizedConvolutionHybrid.java @@ -55,8 +55,6 @@ * *

    {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedConvolutionHybrid.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java index 743b6c81d93..a062ee1db29 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Dequantize.java @@ -80,8 +80,6 @@ * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * - * - * @param data type for {@code output} output */ @OpMetadata( opType = Dequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java index a6a5df07a8a..ed34d301ec7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Quantize.java @@ -128,8 +128,6 @@ *

    Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Quantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java index b6552257828..eeb9f05536c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantize.java @@ -38,8 +38,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java index a715ecdb8e5..e1de6cd2ab7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV3.java @@ -38,8 +38,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantizeV3.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java index 75b47a7f0f9..7de2e59c64b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4.java @@ -37,8 +37,6 @@ * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeAndDequantizeV4.OP_NAME, @@ -114,7 +112,7 @@ public static QuantizeAndDequantizeV4 create(Scope scope, * Sets the signedInput option. * * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter should - * have been called {@code signed_output}</b>) + * have been called {@code signed_output}) * @return this Options instance. */ public static Options signedInput(Boolean signedInput) { @@ -218,7 +216,7 @@ private Options() { * Sets the signedInput option. * * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter should - * have been called {@code signed_output}</b>) + * have been called {@code signed_output}) * @return this Options instance. */ public Options signedInput(Boolean signedInput) { @@ -317,7 +315,7 @@ public static class Inputs extends RawOpInputs{@code signed_output}</b>) + * have been called {@code signed_output}) */ public final boolean signedInput; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java index d2d9d9e6035..65cf77c43ca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeAndDequantizeV4Grad.java @@ -37,8 +37,6 @@ * Returns the gradient of {@code QuantizeAndDequantizeV4}. * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. - * - * @param data type for {@code input_backprop} output */ @OpMetadata( opType = QuantizeAndDequantizeV4Grad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java index d8aee82efb2..77aaa257758 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizeDownAndShrinkRange.java @@ -56,8 +56,6 @@ * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizeDownAndShrinkRange.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java index cae65990d35..a52e49b8080 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedConcat.java @@ -38,8 +38,6 @@ /** * Concatenates quantized tensors along one dimension. - * - * @param data type for {@code output} output */ @OpMetadata( opType = QuantizedConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java index 69827ccd019..c03a82caf5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndDequantize.java @@ -37,8 +37,6 @@ /** * The QuantizedMatMulWithBiasAndDequantize operation - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndDequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java index cd48b07ac48..b848d068a15 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/QuantizedMatMulWithBiasAndRequantize.java @@ -37,8 +37,6 @@ /** * The QuantizedMatMulWithBiasAndRequantize operation - * - * @param data type for {@code out} output */ @OpMetadata( opType = QuantizedMatMulWithBiasAndRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java index 48bfa78ab74..0ebd2ce0e3a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/Requantize.java @@ -43,8 +43,6 @@ * interpretation of the {@code input} data. For example, if {@code input_min} is -1.0f and * {@code input_max} is 1.0f, and we are dealing with {@code quint16} quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Requantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java index 97dad1321da..8f5d44bf663 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformDequantize.java @@ -40,8 +40,6 @@ * Perform dequantization on the quantized Tensor {@code input}. * Given quantized {@code input} which was quantized using {@code scales} and {@code zero_points}, performs dequantization using the formula: * dequantized_data = (quantized_data - zero_point) * scale. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformDequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java index 43fed90b7cc..390ceb83d8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantize.java @@ -40,8 +40,6 @@ * Perform quantization on Tensor {@code input}. * Given {@code input}, {@code scales} and {@code zero_points}, performs quantization using the formula: * quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java index 16768a99b22..eff33c22ce7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDot.java @@ -44,8 +44,6 @@ * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). * {@code output} is also quantized, using the same formula. * If {@code rhs} is per-tensor quantized, {@code output} must be also per-tensor quantized. - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedDot.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java index ed8c67f9a53..1f30f7a1a4c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformQuantizedDotHybrid.java @@ -43,8 +43,6 @@ * {@code lhs} and {@code rhs} must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). * {@code rhs} must be quantized Tensor, where its data value is quantized using the formula: * quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformQuantizedDotHybrid.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java index 85f81e8f202..eb4c511b567 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/quantization/UniformRequantize.java @@ -52,8 +52,6 @@ *

  • per-axis -> per-axis where input_quantization_axis equals output_quantization_axis. * i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.
  • * - * - * @param data type for {@code output} output */ @OpMetadata( opType = UniformRequantize.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java index 2607b8e0fcf..0aadded3990 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedBincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RaggedBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java index 1e654d1665b..720919e6873 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCountSparseOutput.java @@ -37,8 +37,6 @@ /** * Performs sparse-output bin counting for a ragged tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = RaggedCountSparseOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java index 1d5cc361a5f..3b356804b4f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedCross.java @@ -39,10 +39,6 @@ /** * Generates a feature cross from a list of tensors, and returns it as a * RaggedTensor. See {@code tf.ragged.cross} for more details. - * - * @param data type for {@code output_values} output - * - * @param data type for {@code output_row_splits} output */ @OpMetadata( opType = RaggedCross.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java index 5f1b9cf66ec..d8414fd1ae3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRows.java @@ -37,8 +37,6 @@ /** * The RaggedFillEmptyRows operation - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = RaggedFillEmptyRows.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java index 9ea15d1320a..314e4a689af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedFillEmptyRowsGrad.java @@ -36,8 +36,6 @@ /** * The RaggedFillEmptyRowsGrad operation - * - * @param data type for {@code d_values} output */ @OpMetadata( opType = RaggedFillEmptyRowsGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java index 059c102f6ed..3c71b9987c4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedGather.java @@ -56,10 +56,6 @@ * *

    (Note: This c++ op is used to implement the higher-level python * {@code tf.ragged.gather} op, which also supports ragged indices.) - * - * @param data type for {@code output_nested_splits} output - * - * @param data type for {@code output_dense_values} output */ @OpMetadata( opType = RaggedGather.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java index 52d8d2d66b9..39a6487398e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedRange.java @@ -50,10 +50,6 @@ *

    The input tensors {@code starts}, {@code limits}, and {@code deltas} may be scalars or vectors. * The vector inputs must all have the same size. Scalar inputs are broadcast * to match the size of the vector inputs. - * - * @param data type for {@code rt_nested_splits} output - * - * @param data type for {@code rt_dense_values} output */ @OpMetadata( opType = RaggedRange.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java index 9223acdcd39..5e9e6cae9a7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorFromVariant.java @@ -50,10 +50,6 @@ * values of the decoded {@code RaggedTensor}. If {@code input_ragged_rank} is -1, then it is * inferred as {@code output_ragged_rank} - {@code rank(encoded_ragged)}. See * {@code RaggedTensorToVariant} for the corresponding encoding logic. - * - * @param data type for {@code output_nested_splits} output - * - * @param data type for {@code output_dense_values} output */ @OpMetadata( opType = RaggedTensorFromVariant.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java index 510cab39924..e765d995332 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToSparse.java @@ -41,8 +41,6 @@ * input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) * output=SparseTensor(indices=sparse_indices, values=sparse_values, * dense_shape=sparse_dense_shape) - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = RaggedTensorToSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java index 127c85e9f72..1bbb93a9327 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToTensor.java @@ -54,8 +54,6 @@ *

  • "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it * is preceded by "FIRST_DIM_SIZE".
  • * - * - * @param data type for {@code result} output */ @OpMetadata( opType = RaggedTensorToTensor.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java index d8e57336a0e..ca254cd1cf5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/ragged/RaggedTensorToVariantGradient.java @@ -42,8 +42,6 @@ * op, given the variant-encoded ragged gradients of the outputs, along with * the outer row-splits and the shape of the dense-values that were provided as * inputs to the RaggedTensorToVariant op. - * - * @param data type for {@code dense_values_grad} output */ @OpMetadata( opType = RaggedTensorToVariantGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java index 6412651e6ac..a213609fca6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/Multinomial.java @@ -38,8 +38,6 @@ /** * Draws samples from a multinomial distribution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Multinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java index 6008cd03718..83f81ee6c51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/NonDeterministicInts.java @@ -38,8 +38,6 @@ /** * Non-deterministically generates some integers. * This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. - * - * @param data type for {@code output} output */ @OpMetadata( opType = NonDeterministicInts.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java index e2a12f2a3c9..4bc87b4da51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/ParameterizedTruncatedNormal.java @@ -37,8 +37,6 @@ * Outputs random values from a normal distribution. The parameters may each be a * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ParameterizedTruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java index 5558b534e66..cc1a0ab9ba6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGamma.java @@ -38,8 +38,6 @@ * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomGamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java index 7baaab08ee4..4ab62242717 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomGammaGrad.java @@ -35,8 +35,6 @@ /** * Computes the derivative of a Gamma random sample w.r.t. {@code alpha}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomGammaGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java index d26081bd288..3e5fc40fc2f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomPoisson.java @@ -45,8 +45,6 @@ * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomPoisson.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java index 8c52e218fc8..517900e7df1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomShuffle.java @@ -43,8 +43,6 @@ * [3, 4], ==> [1, 2], * [5, 6]] [3, 4]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomShuffle.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java index 3addc74b9bb..322fe10883c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomStandardNormal.java @@ -37,8 +37,6 @@ /** * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomStandardNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java index 74487b121aa..5940994392c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniform.java @@ -38,8 +38,6 @@ * Outputs random values from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomUniform.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java index 243fd44c671..6eba6a6c8b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/RandomUniformInt.java @@ -41,8 +41,6 @@ *

    The random integers are slightly biased unless {@code maxval - minval} is an exact * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). - * - * @param data type for {@code output} output */ @OpMetadata( opType = RandomUniformInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java index fc03e7feddb..67bc6bf1167 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulRandomBinomial.java @@ -38,8 +38,6 @@ /** * The StatefulRandomBinomial operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulRandomBinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java index 8330a9f4b49..ff905308114 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulStandardNormal.java @@ -39,8 +39,6 @@ /** * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulStandardNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java index e623baabf5c..409dff36de6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulTruncatedNormal.java @@ -41,8 +41,6 @@ * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulTruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java index a0e85b0458f..65f86463b06 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniform.java @@ -40,8 +40,6 @@ * Outputs random values from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniform.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java index a43b26418ea..80f425ff575 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformFullInt.java @@ -38,8 +38,6 @@ /** * Outputs random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniformFullInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java index 154f3bd2841..d2854aea992 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatefulUniformInt.java @@ -42,8 +42,6 @@ *

    The random integers are slightly biased unless {@code maxval - minval} is an exact * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatefulUniformInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java index 1c306047fd5..45a902b2da8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessMultinomial.java @@ -38,8 +38,6 @@ /** * Draws samples from a multinomial distribution. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessMultinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java index b10e961aab2..64f85682701 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessParameterizedTruncatedNormal.java @@ -35,8 +35,6 @@ /** * The StatelessParameterizedTruncatedNormal operation - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessParameterizedTruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java index 71a3cb24cf9..ebd295592eb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomBinomial.java @@ -39,8 +39,6 @@ * Outputs deterministic pseudorandom random numbers from a binomial distribution. * Outputs random values from a binomial distribution. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, {@code counts}, and {@code probs}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomBinomial.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java index e57dfcf90f6..69bd0d03ddd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomGamma.java @@ -39,8 +39,6 @@ * Outputs deterministic pseudorandom random numbers from a gamma distribution. * Outputs random values from a gamma distribution. *

    The outputs are a deterministic function of the inputs. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomGamma.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java index 7081e980beb..bf0fa718d0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormal.java @@ -39,8 +39,6 @@ * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java index b1e9dcb4439..ef4f9aafee6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomNormalV2.java @@ -41,8 +41,6 @@ * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomNormalV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java index 3a55731c32d..c617e49f652 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomPoisson.java @@ -38,8 +38,6 @@ * Outputs deterministic pseudorandom random numbers from a Poisson distribution. * Outputs random values from a Poisson distribution. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, and {@code lam}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomPoisson.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java index 6e18ceffb6f..86c24f1e171 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniform.java @@ -40,8 +40,6 @@ * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniform.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java index ef2bf5e7884..41e703d9ddf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullInt.java @@ -38,8 +38,6 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformFullInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java index 50fb67d6fe1..7a910d86feb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformFullIntV2.java @@ -40,8 +40,6 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values are uniform integers covering the whole range of {@code dtype}. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformFullIntV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java index 8bce8bc129e..5c792f75e51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformInt.java @@ -37,8 +37,6 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

    The outputs are a deterministic function of {@code shape}, {@code seed}, {@code minval}, and {@code maxval}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformInt.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java index aa3e3d0de83..ae538d14050 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformIntV2.java @@ -39,8 +39,6 @@ * Outputs deterministic pseudorandom random integers from a uniform distribution. * The generated values follow a uniform distribution in the range {@code [minval, maxval)}. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter}, {@code alg}, {@code minval} and {@code maxval}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformIntV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java index 8b0e106cb95..86bb5202639 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessRandomUniformV2.java @@ -42,8 +42,6 @@ * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessRandomUniformV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java index 2ddedee0436..83c4ebdab9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormal.java @@ -41,8 +41,6 @@ * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. *

    The outputs are a deterministic function of {@code shape} and {@code seed}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessTruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java index 6505cd06561..ae8b00ae1df 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/StatelessTruncatedNormalV2.java @@ -43,8 +43,6 @@ * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. *

    The outputs are a deterministic function of {@code shape}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessTruncatedNormalV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java index ee3e12c25e3..36fbe8a2a05 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/TruncatedNormal.java @@ -39,8 +39,6 @@ * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TruncatedNormal.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java index 5100d0ef8c6..dc17294084b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/random/experimental/StatelessShuffle.java @@ -45,8 +45,6 @@ * [5, 6]] [3, 4]] * *

    The outputs are a deterministic function of {@code value}, {@code key}, {@code counter} and {@code alg}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = StatelessShuffle.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java index 42ef1e6bdf9..220c72d1723 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft.java @@ -37,8 +37,6 @@ * Fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java index 118d2db63e0..4f78086027b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft2d.java @@ -37,8 +37,6 @@ * 2D fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java index 6195de0eae8..7f5478e228a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Fft3d.java @@ -37,8 +37,6 @@ * 3D fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Fft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java index b7f4268150c..8f530229379 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/FftNd.java @@ -44,8 +44,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = FftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java index 3a313a6f23e..6b1f6fa6d8c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft.java @@ -37,8 +37,6 @@ * Inverse fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java index ad0902bf3a1..2c4c19b2ead 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft2d.java @@ -37,8 +37,6 @@ * Inverse 2D fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java index 82251ed232c..efcb06fafcd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Ifft3d.java @@ -37,8 +37,6 @@ * Inverse 3D fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of {@code input}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Ifft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java index 82855d2bab4..181e3756015 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IfftNd.java @@ -44,8 +44,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java index ecf2703b6e8..50f6daef0a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft.java @@ -50,8 +50,6 @@ *

    Along the axis {@code signal.Irfft} is computed on, if {@code fft_length / 2 + 1} is smaller * than the corresponding dimension of {@code input}, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java index 8a448fd2a52..01214bfec41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft2d.java @@ -51,8 +51,6 @@ * {@code fft_length / 2 + 1} for the inner-most dimension) is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java index a336791cb83..c83389668b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Irfft3d.java @@ -51,8 +51,6 @@ * {@code fft_length / 2 + 1} for the inner-most dimension) is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Irfft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java index 93006aea156..5e83c9f4dc3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/IrfftNd.java @@ -48,8 +48,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = IrfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java index f5c14f6eec7..c4d7b74e39a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft.java @@ -46,8 +46,6 @@ *

    Along the axis {@code signal.Rfft} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java index 6587b7378c1..314d16f4eec 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft2d.java @@ -47,8 +47,6 @@ *

    Along each axis {@code signal.Rfft2d} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft2d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java index 35746c0f93b..282c4b7386e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/Rfft3d.java @@ -47,8 +47,6 @@ *

    Along each axis {@code signal.Rfft3d} is computed on, if {@code fft_length} is smaller than the * corresponding dimension of {@code input}, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * - * @param data type for {@code output} output */ @OpMetadata( opType = Rfft3d.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java index 85e48957ee4..17bf1368600 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/signal/RfftNd.java @@ -47,8 +47,6 @@ * is not given, the default shape(input) is used. *

    Axes mean the dimensions to perform the transform on. Default is to perform on * all axes. - * - * @param data type for {@code output} output */ @OpMetadata( opType = RfftNd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java index 49d78c0517c..5cf78a2a0a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseCountSparseOutput.java @@ -37,8 +37,6 @@ /** * Performs sparse-output bin counting for a tf.tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = DenseCountSparseOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java index 2ea6aa671d1..546adba1a9d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToDenseSetOperation.java @@ -42,8 +42,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = DenseToDenseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java index bb75893bfd4..1b8cbcaee50 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DenseToSparseSetOperation.java @@ -48,8 +48,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = DenseToSparseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java index 697249eca81..ba0c51f9a1e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/DeserializeSparse.java @@ -76,8 +76,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = DeserializeSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java index aeb639d2d6e..fb8a868349d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAccumulatorTakeGradient.java @@ -45,8 +45,6 @@ * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. - * - * @param data type for {@code values} output */ @OpMetadata( opType = SparseAccumulatorTakeGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java index 1591773a20c..88ef61b78a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAdd.java @@ -48,8 +48,6 @@ * {@code thresh == 0} (default) means everything is kept and actual thresholding happens * only for a positive value. *

    In the following shapes, {@code nnz} is the count after taking {@code thresh} into account. - * - * @param data type for {@code sum_values} output */ @OpMetadata( opType = SparseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java index 7d6c0923f4f..8a844c96eff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseAddGrad.java @@ -40,8 +40,6 @@ * as {@code SparseTensor} objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. - * - * @param data type for {@code a_val_grad} output */ @OpMetadata( opType = SparseAddGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java index b7414e4ab54..9eca1295d45 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseBincount.java @@ -42,8 +42,6 @@ * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. *

    Values in {@code arr} outside of the range [0, size) are ignored. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseBincount.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java index 6d53b3a723b..016f010647b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseConcat.java @@ -74,8 +74,6 @@ * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseConcat.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java index c3983444bd3..4c59b4e2774 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseCountSparseOutput.java @@ -37,8 +37,6 @@ /** * Performs sparse-output bin counting for a sparse tensor input. * Counts the number of times each value occurs in the input. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseCountSparseOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java index 261d292d3b0..10ac8721d98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseAdd.java @@ -43,8 +43,6 @@ *

    By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java index e0b56d6827c..724997892b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseDiv.java @@ -38,8 +38,6 @@ * Component-wise divides a SparseTensor by a dense Tensor. * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseDiv.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java index 3fb7a03c683..fe8386f0838 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseDenseCwiseMul.java @@ -41,8 +41,6 @@ * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). *

    Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseDenseCwiseMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java index 989fda03492..ef0d2f85afa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRows.java @@ -71,8 +71,6 @@ *

      * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
      * 
    - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseFillEmptyRows.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java index 21d4e2f099f..3b1c80bb5b1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseFillEmptyRowsGrad.java @@ -43,8 +43,6 @@ *

    d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) - * - * @param data type for {@code d_values} output */ @OpMetadata( opType = SparseFillEmptyRowsGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java index 1e48a53ea82..256695f0acd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMax.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseReduceMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java index 8f337f0c19e..b0a65daea67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceMaxSparse.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReduceMaxSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java index 26e0ecbfc45..3589487bece 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSum.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseReduceSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java index bb434694ccf..ef58eac0af1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReduceSumSparse.java @@ -47,8 +47,6 @@ *

    If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReduceSumSparse.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java index 9e963285d77..4e2883435f9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseReorder.java @@ -42,8 +42,6 @@ *

    Reordering does not affect the shape of the SparseTensor. *

    If the tensor has rank {@code R} and {@code N} non-empty values, {@code input_indices} has * shape {@code [N, R]}, input_values has length {@code N}, and input_shape has length {@code R}. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseReorder.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java index c1899b2fbf6..4703ba10fca 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMean.java @@ -38,8 +38,6 @@ * See {@code tf.sparse.segment_sum} for usage examples. *

    Like {@code SegmentMean}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentMean.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java index 50f29512a23..9da8038eee9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentMeanGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java index d1c0e07c099..99cf33231a5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentMeanWithNumSegments.java @@ -40,8 +40,6 @@ *

    Read * the section on segmentation * for an explanation of segments. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentMeanWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java index ee0dc4238fc..5e299d7d124 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtN.java @@ -37,8 +37,6 @@ * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. *

    See {@code tf.sparse.segment_sum} for usage examples. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSqrtN.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java index 075cbacbcfb..b458c7daff9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentSqrtNGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java index 84ccc501312..146dd696d6e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSqrtNWithNumSegments.java @@ -41,8 +41,6 @@ *

    Read * the section on segmentation * for an explanation of segments. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSqrtNWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java index cf2ce2c9851..2f28386d05c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSum.java @@ -61,8 +61,6 @@ * # Which is equivalent to: * tf.segment_sum(c, tf.constant([0, 0, 1])) * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java index 71b8f92448e..1372d6f7089 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumGrad.java @@ -39,10 +39,6 @@ * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is the number of unique indexes in "indices". Also returns vector * "sorted_unique_indices" containing the corresponding indexes from "indices". - * - * @param data type for {@code output} output - * - * @param data type for {@code sorted_unique_indices} output */ @OpMetadata( opType = SparseSegmentSumGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java index 4c44377244d..88b577afec1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSegmentSumWithNumSegments.java @@ -59,8 +59,6 @@ * # [-1 -2 -3 -4] * # [ 0 0 0 0]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSegmentSumWithNumSegments.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java index 58c794dfb2f..a3718f1a7e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSlice.java @@ -52,8 +52,6 @@ * [ d e ] * [ ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSlice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java index 4cfa41a7e45..969ef935dc7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSliceGrad.java @@ -39,8 +39,6 @@ * This op takes in the upstream gradient w.r.t. non-empty values of * the sliced {@code SparseTensor}, and outputs the gradients w.r.t. * the non-empty values of input {@code SparseTensor}. - * - * @param data type for {@code val_grad} output */ @OpMetadata( opType = SparseSliceGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java index be61533da26..43cd85b5a9f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSoftmax.java @@ -48,8 +48,6 @@ * (3) Renormalizes the remaining elements. *

    Hence, the {@code SparseTensor} result has exactly the same non-zero indices and * shape. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseSoftmax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java index 22a1d407274..80b44623ca8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMaximum.java @@ -37,8 +37,6 @@ /** * Returns the element-wise max of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSparseMaximum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java index 8dd8978c627..ecbc022d09d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSparseMinimum.java @@ -37,8 +37,6 @@ /** * Returns the element-wise min of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSparseMinimum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java index a09e9ff9d38..da66d34d134 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseSplit.java @@ -55,8 +55,6 @@ * [ d e ] * [ ] * - * - * @param data type for {@code output_values} output */ @OpMetadata( opType = SparseSplit.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java index c153cf68776..7f73769030b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseAdd.java @@ -37,8 +37,6 @@ /** * Adds up a {@code SparseTensor} and a dense {@code Tensor}, producing a dense {@code Tensor}. * This Op does not require {@code a_indices} be sorted in standard lexicographic order. - * - * @param data type for {@code output} output */ @OpMetadata( opType = SparseTensorDenseAdd.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java index 346c9297596..0425354268c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseTensorDenseMatMul.java @@ -45,8 +45,6 @@ * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). - * - * @param data type for {@code product} output */ @OpMetadata( opType = SparseTensorDenseMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java index 95c8f189d48..448a7c4ec83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToDense.java @@ -52,8 +52,6 @@ *

    Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If {@code validate_indices} is true, these properties * are checked during execution. - * - * @param data type for {@code dense} output */ @OpMetadata( opType = SparseToDense.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java index 8a71016a669..e658f88abb7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/SparseToSparseSetOperation.java @@ -54,8 +54,6 @@ * has rank {@code n} and the same 1st {@code n-1} dimensions as {@code set1} and {@code set2}. The {@code nth} * dimension contains the result of {@code set_operation} applied to the corresponding * {@code [0...n-1]} dimension of {@code set}. - * - * @param data type for {@code result_values} output */ @OpMetadata( opType = SparseToSparseSetOperation.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java index e72ec904466..2c6293f402d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/sparse/TakeManySparseFromTensorsMap.java @@ -77,8 +77,6 @@ * values = [1, 2, 3, 4, 5] * shape = [2 50] * - * - * @param data type for {@code sparse_values} output */ @OpMetadata( opType = TakeManySparseFromTensorsMap.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java index 6f5739989d0..c04fa6cd987 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/StringNGrams.java @@ -40,8 +40,6 @@ * This op accepts a ragged tensor with 1 ragged dimension containing only * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. - * - * @param data type for {@code ngrams_splits} output */ @OpMetadata( opType = StringNGrams.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java index e4564334bf1..74e4816ed43 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/ToNumber.java @@ -50,8 +50,6 @@ * * * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ToNumber.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java index bffb35e17e0..40624c66adf 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecode.java @@ -52,8 +52,6 @@ *

  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th * string (in row-major order).
  • * - * - * @param data type for {@code row_splits} output */ @OpMetadata( opType = UnicodeDecode.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java index 690789b6843..5989e8e7106 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/strings/UnicodeDecodeWithOffsets.java @@ -56,8 +56,6 @@ *
  • {@code row_splits[i+1] - row_splits[i]} is the number of characters in the {@code i}th * string (in row-major order).
  • * - * - * @param data type for {@code row_splits} output */ @OpMetadata( opType = UnicodeDecodeWithOffsets.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java index dfe6664886c..3bd1592cbc7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/AllToAll.java @@ -49,8 +49,6 @@ * split_count=2 *

    replica 0's output: {@code [[A], [C]]} * replica 1's output: {@code [[B], [D]]} - * - * @param data type for {@code output} output */ @OpMetadata( opType = AllToAll.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java index c56e985eafb..15e942cac31 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/CrossReplicaSum.java @@ -41,8 +41,6 @@ * Passing group_assignment={@code [[0,2,4,6],[1,3,5,7]]} sets {@code A, C, E, G} as group 0, * and {@code B, D, F, H} as group 1. Thus we get the outputs: * {@code [A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = CrossReplicaSum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java index 20e200e26af..2f2d689a23a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/InfeedDequeue.java @@ -37,8 +37,6 @@ /** * A placeholder op for a value that will be fed into the computation. - * - * @param data type for {@code output} output */ @OpMetadata( opType = InfeedDequeue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java index 27a9edc8214..f2043c5047c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeue.java @@ -38,8 +38,6 @@ /** * Retrieves a single tensor from the computation outfeed. * This operation will block indefinitely until data is available. - * - * @param data type for {@code output} output */ @OpMetadata( opType = OutfeedDequeue.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java index 481f916e86a..dc0d6a3649a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/OutfeedDequeueV2.java @@ -40,8 +40,6 @@ * Retrieves a single tensor from the computation outfeed. Device ordinal is a * tensor allowing dynamic outfeed. * This operation will block indefinitely until data is available. - * - * @param data type for {@code output} output */ @OpMetadata( opType = OutfeedDequeueV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java index be69029e573..89d11541c1b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedInput.java @@ -37,8 +37,6 @@ /** * An op that groups a list of partitioned inputs together. Supports ND sharding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PartitionedInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java index a49b96f066d..b69bdea9a7b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/PartitionedOutput.java @@ -38,8 +38,6 @@ /** * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned * outputs outside the XLA computation. Supports ND sharding. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PartitionedOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java index 37c057fc375..5f5ae14be0e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedInput.java @@ -46,8 +46,6 @@ * %computation = "tf.Computation"(%replicated_input) * *

    The above computation has a replicated input of two replicas. - * - * @param data type for {@code output} output */ @OpMetadata( opType = ReplicatedInput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java index fcc447fb932..6daab9ae1a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/ReplicatedOutput.java @@ -45,8 +45,6 @@ * %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) * *

    The above computation has a replicated output of two replicas. - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = ReplicatedOutput.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java index ad72b480077..8e8d4537dff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/SplitDedupData.java @@ -41,10 +41,6 @@ * Deduplication data is an XLA tuple, which consists of integer and floating point * values. This op is to split these values into two groups for two types, and * construct each group as one tensor to return. - * - * @param data type for {@code integer_tensor} output - * - * @param data type for {@code float_tensor} output */ @OpMetadata( opType = SplitDedupData.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java index 1816bb842df..80ac7e3ea03 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedInput.java @@ -47,8 +47,6 @@ * *

    The above computation has a replicated input of two replicas. * - * @param data type for {@code output} output - * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedInput} instead */ @OpMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java index ea53c36f109..dcc1b12b2b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/TPUReplicatedOutput.java @@ -46,8 +46,6 @@ * *

    The above computation has a replicated output of two replicas. * - * @param data type for {@code outputs} output - * * @deprecated use {@link org.tensorflow.op.tpu.ReplicatedOutput} instead */ @OpMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java index a2d152ab93e..e7c94866732 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/AccumulatorTakeGradient.java @@ -43,8 +43,6 @@ * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * - * @param data type for {@code average} output */ @OpMetadata( opType = AccumulatorTakeGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java index 5a6b4fa2871..0bdb47444ad 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdaMax.java @@ -38,8 +38,6 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * v_t <- max(beta2 * v_{t-1}, abs(g)) * variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdaMax.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java index be5bdc297ea..7d53245fe2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdadelta.java @@ -39,8 +39,6 @@ * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdadelta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java index 9a717cb0daf..0d243bfce4b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagrad.java @@ -37,8 +37,6 @@ * Update '*var' according to the adagrad scheme. * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java index b1577260bf8..a2769eae2e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradDa.java @@ -36,8 +36,6 @@ /** * Update '*var' according to the proximal adagrad scheme. - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagradDa.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java index 6766d80538e..22d0edd340e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdagradV2.java @@ -37,8 +37,6 @@ * Update '*var' according to the adagrad scheme. * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdagradV2.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java index 91dbb1d72f6..8dbd525dc98 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAdam.java @@ -39,8 +39,6 @@ * $$m_t := \beta_1 \cdot m{t-1} + (1 - \beta_1) \cdot g$$ * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAdam.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java index 434802b1590..69127231eb1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyAddSign.java @@ -38,8 +38,6 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyAddSign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java index 46f9975e74a..f7801bf277e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyCenteredRmsProp.java @@ -49,8 +49,6 @@ * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyCenteredRmsProp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java index c14505600ef..cd010677d47 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyFtrl.java @@ -42,8 +42,6 @@ * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyFtrl.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java index f7c93955d6b..5ebb7b31330 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyGradientDescent.java @@ -35,8 +35,6 @@ /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyGradientDescent.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java index fc82fa94853..1aa402b6783 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyMomentum.java @@ -38,8 +38,6 @@ * Set use_nesterov = True if you want to use Nesterov momentum. *

    accum = accum * momentum + grad * var -= lr * accum - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyMomentum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java index dad41ae5e50..f298f853be2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyPowerSign.java @@ -38,8 +38,6 @@ * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyPowerSign.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java index 8f2c0b1d0b2..a095146963b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalAdagrad.java @@ -38,8 +38,6 @@ * accum += grad * grad * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyProximalAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java index 488faf4d559..ffd6ee70e68 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyProximalGradientDescent.java @@ -37,8 +37,6 @@ * Update '*var' as FOBOS algorithm with fixed learning rate. * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyProximalGradientDescent.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java index 539fa33e176..fcfeb5b895a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ApplyRmsProp.java @@ -43,8 +43,6 @@ *

    ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * - * @param data type for {@code out} output */ @OpMetadata( opType = ApplyRmsProp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java index 14fdcd8d781..17560573705 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/BatchMatMul.java @@ -56,8 +56,6 @@ *

    NOTE: {@code train.BatchMatMul} supports broadcasting in the batch dimensions. More * about broadcasting * here . - * - * @param data type for {@code output} output */ @OpMetadata( opType = BatchMatMul.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java index a7181e6cb0b..c98b11d0050 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/PreventGradient.java @@ -41,8 +41,6 @@ * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. - * - * @param data type for {@code output} output */ @OpMetadata( opType = PreventGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java index 4b7a918f597..843ecae89f1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/ResourceAccumulatorTakeGradient.java @@ -42,8 +42,6 @@ * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * - * @param data type for {@code average} output */ @OpMetadata( opType = ResourceAccumulatorTakeGradient.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java index b0faba2454c..a33a34b3179 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/RestoreSlice.java @@ -42,8 +42,6 @@ * larger tensor and the slice that the restored tensor covers. *

    The {@code shape_and_slice} input has the same format as the * elements of the {@code shapes_and_slices} input of the {@code SaveSlices} op. - * - * @param data type for {@code tensor} output */ @OpMetadata( opType = RestoreSlice.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java index c68618fecc1..8b12e83f51f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdadelta.java @@ -36,8 +36,6 @@ /** * var: Should be from a Variable(). - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdadelta.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java index a75507dde54..fbda4c582a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagrad.java @@ -39,8 +39,6 @@ * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad * grad$$ * $$var -= lr * grad * (1 / sqrt(accum))$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java index cdd24328bb6..33cdae176f0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyAdagradDa.java @@ -37,8 +37,6 @@ /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyAdagradDa.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java index 731ecdd88a7..cfbf01b8044 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyCenteredRmsProp.java @@ -49,8 +49,6 @@ *

    $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyCenteredRmsProp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java index 8c609b198bd..72cce364480 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyFtrl.java @@ -44,8 +44,6 @@ * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyFtrl.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java index 2e22789fd23..d2ae83d8c17 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyMomentum.java @@ -40,8 +40,6 @@ *

    That is for rows we have grad for, we update var and accum as follows: *

    $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyMomentum.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java index 68ca59089e1..70b28897f24 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalAdagrad.java @@ -41,8 +41,6 @@ * $$prox_v = var$$ * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyProximalAdagrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java index 08b098f80ca..3da972089e7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyProximalGradientDescent.java @@ -39,8 +39,6 @@ * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha * grad$$ * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyProximalGradientDescent.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java index a648dc04b08..3c642ebcf81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/SparseApplyRmsProp.java @@ -44,8 +44,6 @@ *

    $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * - * @param data type for {@code out} output */ @OpMetadata( opType = SparseApplyRmsProp.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java index bdc5c23fc46..9e1b7e0fbb4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/train/TileGrad.java @@ -39,8 +39,6 @@ * Since {@code Tile} takes an input and repeats the input {@code multiples} times * along each dimension, {@code train.TileGrad} takes in {@code multiples} and aggregates * each repeated tile of {@code input} into {@code output}. - * - * @param data type for {@code output} output */ @OpMetadata( opType = TileGrad.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java index 7e55c95e679..1967df6a719 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ConcatND.java @@ -66,8 +66,6 @@ * [4, 5, 6], * [8, 9, 10]] * - * - * @param data type for {@code output} output */ @OpMetadata( opType = ConcatND.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java index 666103dd273..91571db9698 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/ReadVariableSplitND.java @@ -67,8 +67,6 @@ * [[8, 0], * [0, 0]] * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = ReadVariableSplitND.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java index 6bf5656f68c..dd6b61d2e81 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/SplitND.java @@ -66,8 +66,6 @@ * [[8, 0], * [0, 0]] * - * - * @param data type for {@code outputs} output */ @OpMetadata( opType = SplitND.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java index c8d5507a673..b05f7199f7a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/XlaRecvFromHost.java @@ -41,8 +41,6 @@ * Toutput: element type for output. * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. - * - * @param data type for {@code output} output */ @OpMetadata( opType = XlaRecvFromHost.OP_NAME,