diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 84736ada6a5..007ee9d0d42 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -345,10 +345,10 @@ public final class Ops { public final SignalOps signal; - public final TrainOps train; - public final QuantizationOps quantization; + public final TrainOps train; + private final Scope scope; private Ops(Scope scope) { @@ -370,8 +370,8 @@ private Ops(Scope scope) { math = new MathOps(this); audio = new AudioOps(this); signal = new SignalOps(this); - train = new TrainOps(this); quantization = new QuantizationOps(this); + train = new TrainOps(this); } /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java index 290e4e80b57..894bd073758 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Glorot.java @@ -62,7 +62,6 @@ * VarianceScaling.Distribution#TRUNCATED_NORMAL} for the distribution parameter. *
For a GlorotUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} * for the distribution parameter. - *
* * @paramFor an HeUniform equivalent initializer, use {@link VarianceScaling.Distribution#UNIFORM} * for the distribution parameter. - *
* * @param[0, 1]. When > 0, label values are smoothed, meaning the
- * confidence on label values are relaxed. e.g. labelSmoothing=0.2 means that we will use a
- * value of 0.1 for label 0 and 0.9 for label 1
+ * @param labelSmoothing Float in [0, 1]. When > 0, label values are
+ * smoothed, meaning the confidence on label values are relaxed. e.g. labelSmoothing=0.2
+ * means that we will use a value of 0.1 for label 0 and
+ * 0.9 for label 1
*/
public CategoricalCrossentropy(Ops tf, boolean fromLogits, float labelSmoothing) {
this(tf, null, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS);
}
/**
- * Creates a categorical cross entropy Loss using a Loss Reduction of {@link Loss#REDUCTION_DEFAULT},
- * and a channel axis of {@link #DEFAULT_AXIS}
+ * Creates a categorical cross entropy Loss using a Loss Reduction of {@link
+ * Loss#REDUCTION_DEFAULT}, and a channel axis of {@link #DEFAULT_AXIS}
*
* @param tf the TensorFlow Ops
* @param name the name of this loss
* @param fromLogits Whether to interpret predictions as a tensor of logit values
- * @param labelSmoothing Float in [0, 1]. When > 0, label values are smoothed, meaning the
- * confidence on label values are relaxed. e.g. labelSmoothing=0.2 means that we will use a
- * value of 0.1 for label 0 and 0.9 for label 1
+ * @param labelSmoothing Float in [0, 1]. When > 0, label values are
+ * smoothed, meaning the confidence on label values are relaxed. e.g. labelSmoothing=0.2
+ * means that we will use a value of 0.1 for label 0 and
+ * 0.9 for label 1
*/
public CategoricalCrossentropy(Ops tf, String name, boolean fromLogits, float labelSmoothing) {
this(tf, name, fromLogits, labelSmoothing, REDUCTION_DEFAULT, DEFAULT_AXIS);
@@ -183,9 +185,10 @@ public CategoricalCrossentropy(Ops tf, String name, boolean fromLogits, float la
*
* @param tf the TensorFlow Ops
* @param fromLogits Whether to interpret predictions as a tensor of logit values
- * @param labelSmoothing Float in [0, 1]. When > 0, label values are smoothed, meaning the
- * confidence on label values are relaxed. e.g. x=0.2 means that we will use a
- * value of 0.1 for label 0 and 0.9 for label 1
+ * @param labelSmoothing Float in [0, 1]. When > 0, label values are
+ * smoothed, meaning the confidence on label values are relaxed. e.g. x=0.2 means
+ * that we will use a value of 0.1 for label 0 and 0.9
+ * for label 1
* @param reduction Type of Reduction to apply to loss.
*/
public CategoricalCrossentropy(
@@ -199,13 +202,14 @@ public CategoricalCrossentropy(
* @param tf the TensorFlow Ops
* @param name the name of this loss
* @param fromLogits Whether to interpret predictions as a tensor of logit values
- * @param labelSmoothing Float in [0, 1]. When > 0, label values are smoothed, meaning the
- * confidence on label values are relaxed. e.g. labelSmoothing=0.2 means that we will use a
- * value of 0.1 for label 0 and 0.9 for label 1
+ * @param labelSmoothing Float in [0, 1]. When > 0, label values are
+ * smoothed, meaning the confidence on label values are relaxed. e.g. labelSmoothing=0.2
+ * means that we will use a value of 0.1 for label 0 and
+ * 0.9 for label 1
* @param reduction Type of Reduction to apply to loss.
* @param axis The channels axis. axis=-1 corresponds to data format "Channels Last"
- * and axis=1 corresponds to data format "Channels First".
- * {@link Losses#CHANNELS_LAST} and {@link Losses#CHANNELS_FIRST}
+ * and axis=1 corresponds to data format "Channels First". {@link
+ * Losses#CHANNELS_LAST} and {@link Losses#CHANNELS_FIRST}
* @throws IllegalArgumentException if labelSmoothing is not in the inclusive range of 0. - 1.
*/
public CategoricalCrossentropy(
@@ -242,13 +246,12 @@ public CategoricalCrossentropy(
* predictions is scaled by the corresponding value of SampleWeights. (Note on dN-1: all loss
* functions reduce by 1 dimension, usually axis=-1.)
* @param loss = maximum(neg - pos + 1, 0) where neg=maximum((1-labels)*predictions)
* and pos=sum(labels*predictions)
*
- *
labels values are expected to be 0 or 1.
labels values are expected to be 0 or 1.
*
*
Standalone usage:
*
@@ -99,8 +99,8 @@ public CategoricalHinge(Ops tf, String name, Reduction reduction) {
/** {@inheritDoc} */
@Override
- public Note that it is a number between Note that it is a number between Standalone usage:
*
@@ -106,7 +107,7 @@ public Hinge(Ops tf, String name, Reduction reduction) {
* label values are not in the set [-1., 0., 1.].
*
* @param labels the truth values or labels, must be either -1, 0, or 1. Values are expected to be
- * -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1.
+ * -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1.
* @param predictions the predictions, values must be in the range [0. to 1.] inclusive.
* @param sampleWeights Optional sampleWeights acts as a coefficient for the loss. If a scalar is
* provided, then the loss is simply scaled by the given value. If sampleWeights is a tensor
@@ -116,21 +117,19 @@ public Hinge(Ops tf, String name, Reduction reduction) {
* predictions is scaled by the corresponding value of SampleWeights. (Note on dN-1: all loss
* functions reduce by 1 dimension, usually axis=-1.)
* @param -1 and 1. When it is a negative number between -1 and 0, 0
- * indicates orthogonality and values closer to -1indicate greater similarity. The values closer to
- * 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you
- * try to maximize the proximity between predictions and targets. If either labels or predictions is
- * a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and
- * targets.
+ * -1 and 1. When it is a negative
+ * number between -1 and 0, 0 indicates orthogonality and
+ * values closer to -1indicate greater similarity. The values closer to 1
+ * indicate greater dissimilarity. This makes it usable as a loss function in a setting where you
+ * try to maximize the proximity between predictions and targets. If either labels or
+ * predictions is a zero vector, cosine similarity will be 0 regardless of
+ * the proximity between predictions and targets.
*
* loss = -sum(l2Norm(labels) * l2Norm(predictions))
*
@@ -71,7 +72,7 @@ public class CosineSimilarity extends Loss {
public static final int DEFAULT_AXIS = -1;
public static final Reduction DEFAULT_REDUCTION = Reduction.AUTO;
- private final int axis;
+ private final int[] axis;
/**
* Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name, an axis
@@ -107,6 +108,17 @@ public CosineSimilarity(Ops tf, int axis) {
this(tf, null, axis, DEFAULT_REDUCTION);
}
+ /**
+ * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name, and a
+ * Loss Reduction of {@link #DEFAULT_REDUCTION}
+ *
+ * @param tf the TensorFlow Ops
+ * @param axis The dimension along which the cosine similarity is computed.
+ */
+ public CosineSimilarity(Ops tf, int[] axis) {
+
+ this(tf, null, axis, DEFAULT_REDUCTION);
+ }
/**
* Creates a Cosine Similarity Loss using a Loss Reduction of {@link #DEFAULT_REDUCTION}
@@ -120,6 +132,18 @@ public CosineSimilarity(Ops tf, String name, int axis) {
this(tf, name, axis, DEFAULT_REDUCTION);
}
+ /**
+ * Creates a Cosine Similarity Loss using a Loss Reduction of {@link #DEFAULT_REDUCTION}
+ *
+ * @param tf the TensorFlow Ops
+ * @param name the name of the loss
+ * @param axis The dimension along which the cosine similarity is computed.
+ */
+ public CosineSimilarity(Ops tf, String name, int[] axis) {
+
+ this(tf, name, axis, DEFAULT_REDUCTION);
+ }
+
/**
* Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name and an
* axis of {@link #DEFAULT_AXIS}
@@ -153,6 +177,18 @@ public CosineSimilarity(Ops tf, String name, Reduction reduction) {
*/
public CosineSimilarity(Ops tf, int axis, Reduction reduction) {
+ this(tf, null, new int[] {axis}, reduction);
+ }
+
+ /**
+ * Creates a Cosine Similarity Loss using {@link Class#getSimpleName()} as the loss name
+ *
+ * @param tf the TensorFlow Ops
+ * @param axis The dimension along which the cosine similarity is computed.
+ * @param reduction Type of Reduction to apply to the loss.
+ */
+ public CosineSimilarity(Ops tf, int[] axis, Reduction reduction) {
+
this(tf, null, axis, reduction);
}
@@ -165,15 +201,28 @@ public CosineSimilarity(Ops tf, int axis, Reduction reduction) {
* @param reduction Type of Reduction to apply to the loss.
*/
public CosineSimilarity(Ops tf, String name, int axis, Reduction reduction) {
+ this(tf, name, new int[] {axis}, reduction);
+ }
+
+ /**
+ * Creates a Cosine Similarity Loss
+ *
+ * @param tf the TensorFlow Ops
+ * @param name the name of the loss
+ * @param axis The dimension along which the cosine similarity is computed.
+ * @param reduction Type of Reduction to apply to the loss.
+ */
+ public CosineSimilarity(Ops tf, String name, int[] axis, Reduction reduction) {
super(tf, name, reduction);
this.axis = axis;
}
/** {@inheritDoc} */
@Override
- public loss = maximum(1 - labels * predictions, 0)loss = maximum(1 - labels * predictions, 0).
*
- * labels values are expected to be -1 or 1.
- * If binary (0 or 1) labels are provided, they will be converted to -1 or 1.labels values are expected to be -1 or 1. If binary (0 or 1) labels are provided,
+ * they will be converted to -1 or 1.
*
* [0, 1]. When > 0, label values are smoothed, meaning the
- * confidence on label values are relaxed. e.g. labelSmoothing=0.2 means that we will use a
- * value of 0.1 for label 0 and 0.9 for label 1
+ * @param labelSmoothing Float in [0, 1]. When > 0, label values are
+ * smoothed, meaning the confidence on label values are relaxed. e.g. labelSmoothing=0.2
+ * means that we will use a value of 0.1 for label 0 and
+ * 0.9 for label 1
* @param axis the
* @param