diff --git a/benchmark/src/main/java/io/prometheus/benchmark/SummaryBenchmark.java b/benchmark/src/main/java/io/prometheus/benchmark/SummaryBenchmark.java index 3a77dc5ec..5f192e2f4 100644 --- a/benchmark/src/main/java/io/prometheus/benchmark/SummaryBenchmark.java +++ b/benchmark/src/main/java/io/prometheus/benchmark/SummaryBenchmark.java @@ -23,9 +23,14 @@ public class SummaryBenchmark { io.prometheus.client.metrics.Summary prometheusSummary; io.prometheus.client.metrics.Summary.Child prometheusSummaryChild; + io.prometheus.client.Summary prometheusSimpleSummary; io.prometheus.client.Summary.Child prometheusSimpleSummaryChild; io.prometheus.client.Summary prometheusSimpleSummaryNoLabels; + io.prometheus.client.Summary prometheusSimpleSummaryQuantiles; + io.prometheus.client.Summary.Child prometheusSimpleSummaryQuantilesChild; + io.prometheus.client.Summary prometheusSimpleSummaryQuantilesNoLabels; + io.prometheus.client.Histogram prometheusSimpleHistogram; io.prometheus.client.Histogram.Child prometheusSimpleHistogramChild; io.prometheus.client.Histogram prometheusSimpleHistogramNoLabels; @@ -49,10 +54,25 @@ public void setup() { .help("some description..") .create(); + prometheusSimpleSummaryQuantiles = io.prometheus.client.Summary.build() + .name("name") + .help("some description..") + .labelNames("some", "group") + .quantile(0.5).quantile(0.9).quantile(0.95).quantile(0.99) + .create(); + prometheusSimpleSummaryQuantilesChild = prometheusSimpleSummaryQuantiles.labels("test", "group"); + + prometheusSimpleSummaryQuantilesNoLabels = io.prometheus.client.Summary.build() + .name("name") + .help("some description..") + .quantile(0.5).quantile(0.9).quantile(0.95).quantile(0.99) + .create(); + prometheusSimpleHistogram = io.prometheus.client.Histogram.build() .name("name") .help("some description..") - .labelNames("some", "group").create(); + .labelNames("some", "group") + .create(); prometheusSimpleHistogramChild = prometheusSimpleHistogram.labels("test", "group"); prometheusSimpleHistogramNoLabels = io.prometheus.client.Histogram.build() @@ -99,6 +119,27 @@ public void prometheusSimpleSummaryNoLabelsBenchmark() { prometheusSimpleSummaryNoLabels.observe(1); } + @Benchmark + @BenchmarkMode({Mode.AverageTime}) + @OutputTimeUnit(TimeUnit.NANOSECONDS) + public void prometheusSimpleSummaryQuantilesBenchmark() { + prometheusSimpleSummaryQuantiles.labels("test", "group").observe(1); + } + + @Benchmark + @BenchmarkMode({Mode.AverageTime}) + @OutputTimeUnit(TimeUnit.NANOSECONDS) + public void prometheusSimpleSummaryQuantilesChildBenchmark() { + prometheusSimpleSummaryQuantilesChild.observe(1); + } + + @Benchmark + @BenchmarkMode({Mode.AverageTime}) + @OutputTimeUnit(TimeUnit.NANOSECONDS) + public void prometheusSimpleSummaryQuantilesNoLabelsBenchmark() { + prometheusSimpleSummaryQuantilesNoLabels.observe(1); + } + @Benchmark @BenchmarkMode({Mode.AverageTime}) @OutputTimeUnit(TimeUnit.NANOSECONDS) diff --git a/simpleclient/src/main/java/io/prometheus/client/CKMSQuantiles.java b/simpleclient/src/main/java/io/prometheus/client/CKMSQuantiles.java deleted file mode 100644 index 1ffb65382..000000000 --- a/simpleclient/src/main/java/io/prometheus/client/CKMSQuantiles.java +++ /dev/null @@ -1,293 +0,0 @@ -package io.prometheus.client; - -// Copied from https://raw.githubusercontent.com/Netflix/ocelli/master/ocelli-core/src/main/java/netflix/ocelli/stats/CKMSQuantiles.java -// Revision d0357b8bf5c17a173ce94d6b26823775b3f999f6 from Jan 21, 2015. -// -// This is the original code except for the following modifications: -// -// - Changed the type of the observed values from int to double. -// - Removed the Quantiles interface and corresponding @Override annotations. -// - Changed the package name. -// - Make get() return NaN when no sample was observed. -// - Make class package private - -/* - Copyright 2012 Andrew Wang (andrew@umbrant.com) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - */ - -import java.util.Arrays; -import java.util.LinkedList; -import java.util.ListIterator; - -/** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * - */ -class CKMSQuantiles { - /** - * Total number of items in stream. - */ - private int count = 0; - - /** - * Used for tracking incremental compression. - */ - private int compressIdx = 0; - - /** - * Current list of sampled items, maintained in sorted order with error - * bounds. - */ - protected LinkedList sample; - - /** - * Buffers incoming items to be inserted in batch. - */ - private double[] buffer = new double[500]; - - private int bufferCount = 0; - - /** - * Array of Quantiles that we care about, along with desired error. - */ - private final Quantile quantiles[]; - - public CKMSQuantiles(Quantile[] quantiles) { - this.quantiles = quantiles; - this.sample = new LinkedList(); - } - - /** - * Add a new value from the stream. - * - * @param value - */ - public void insert(double value) { - buffer[bufferCount] = value; - bufferCount++; - - if (bufferCount == buffer.length) { - insertBatch(); - compress(); - } - } - - /** - * Get the estimated value at the specified quantile. - * - * @param q - * Queried quantile, e.g. 0.50 or 0.99. - * @return Estimated value at that quantile. - */ - public double get(double q) { - // clear the buffer - insertBatch(); - compress(); - - if (sample.size() == 0) { - return Double.NaN; - } - - int rankMin = 0; - int desired = (int) (q * count); - - ListIterator it = sample.listIterator(); - Item prev, cur; - cur = it.next(); - while (it.hasNext()) { - prev = cur; - cur = it.next(); - - rankMin += prev.g; - - if (rankMin + cur.g + cur.delta > desired - + (allowableError(desired) / 2)) { - return prev.value; - } - } - - // edge case of wanting max value - return sample.getLast().value; - } - - /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how - * wide the range of this rank can be. - * - * @param rank - * the index in the list of samples - */ - private double allowableError(int rank) { - // NOTE: according to CKMS, this should be count, not size, but this - // leads - // to error larger than the error bounds. Leaving it like this is - // essentially a HACK, and blows up memory, but does "work". - // int size = count; - int size = sample.size(); - double minError = size + 1; - - for (Quantile q : quantiles) { - double error; - if (rank <= q.quantile * size) { - error = q.u * (size - rank); - } else { - error = q.v * rank; - } - if (error < minError) { - minError = error; - } - } - - return minError; - } - - private boolean insertBatch() { - if (bufferCount == 0) { - return false; - } - - Arrays.sort(buffer, 0, bufferCount); - - // Base case: no samples - int start = 0; - if (sample.size() == 0) { - Item newItem = new Item(buffer[0], 1, 0); - sample.add(newItem); - start++; - count++; - } - - ListIterator it = sample.listIterator(); - Item item = it.next(); - - for (int i = start; i < bufferCount; i++) { - double v = buffer[i]; - while (it.nextIndex() < sample.size() && item.value < v) { - item = it.next(); - } - - // If we found that bigger item, back up so we insert ourselves - // before it - if (item.value > v) { - it.previous(); - } - - // We use different indexes for the edge comparisons, because of the - // above - // if statement that adjusts the iterator - int delta; - if (it.previousIndex() == 0 || it.nextIndex() == sample.size()) { - delta = 0; - } - else { - delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1; - } - - Item newItem = new Item(v, 1, delta); - it.add(newItem); - count++; - item = newItem; - } - - bufferCount = 0; - return true; - } - - /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges - * it with the adjacent item if it is. - */ - private void compress() { - if (sample.size() < 2) { - return; - } - - ListIterator it = sample.listIterator(); - int removed = 0; - - Item prev = null; - Item next = it.next(); - - while (it.hasNext()) { - prev = next; - next = it.next(); - - if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) { - next.g += prev.g; - // Remove prev. it.remove() kills the last thing returned. - it.previous(); - it.previous(); - it.remove(); - // it.next() is now equal to next, skip it back forward again - it.next(); - removed++; - } - } - } - - private class Item { - public final double value; - public int g; - public final int delta; - - public Item(double value, int lower_delta, int delta) { - this.value = value; - this.g = lower_delta; - this.delta = delta; - } - - @Override - public String toString() { - return String.format("I{val=%.3f, g=%d, del=%d}", value, g, delta); - } - } - - public static class Quantile { - public final double quantile; - public final double error; - public final double u; - public final double v; - - public Quantile(double quantile, double error) { - this.quantile = quantile; - this.error = error; - u = 2.0 * error / (1.0 - quantile); - v = 2.0 * error / quantile; - } - - @Override - public String toString() { - return String.format("Q{q=%.3f, eps=%.3f}", quantile, error); - } - } - -} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogram.java new file mode 100644 index 000000000..a28a52f16 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogram.java @@ -0,0 +1,2499 @@ +/* + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.*; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.zip.DataFormatException; +import java.util.zip.Deflater; +import java.util.zip.Inflater; + +import static java.nio.ByteOrder.BIG_ENDIAN; + +/** + * This non-public AbstractHistogramBase super-class separation is meant to bunch "cold" fields + * separately from "hot" fields, in an attempt to force the JVM to place the (hot) fields + * commonly used in the value recording code paths close together. + * Subclass boundaries tend to be strongly control memory layout decisions in most practical + * JVM implementations, making this an effective method for control filed grouping layout. + */ + +abstract class AbstractHistogramBase extends EncodableHistogram { + static AtomicLong constructionIdentityCount = new AtomicLong(0); + + // "Cold" accessed fields. Not used in the recording code path: + long identity; + volatile boolean autoResize = false; + + long highestTrackableValue; + long lowestDiscernibleValue; + int numberOfSignificantValueDigits; + + int bucketCount; + /** + * Power-of-two length of linearly scaled array slots in the counts array. Long enough to hold the first sequence of + * entries that must be distinguished by a single unit (determined by configured precision). + */ + int subBucketCount; + int countsArrayLength; + int wordSizeInBytes; + + long startTimeStampMsec = Long.MAX_VALUE; + long endTimeStampMsec = 0; + String tag = null; + + double integerToDoubleValueConversionRatio = 1.0; + double doubleToIntegerValueConversionRatio = 1.0; + + PercentileIterator percentileIterator; + RecordedValuesIterator recordedValuesIterator; + + ByteBuffer intermediateUncompressedByteBuffer = null; + byte[] intermediateUncompressedByteArray = null; + + double getIntegerToDoubleValueConversionRatio() { + return integerToDoubleValueConversionRatio; + } + + double getDoubleToIntegerValueConversionRatio() { + return doubleToIntegerValueConversionRatio; + } + + void nonConcurrentSetIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { + this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; + this.doubleToIntegerValueConversionRatio = 1.0/integerToDoubleValueConversionRatio; + } + + abstract void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio); +} + +/** + *

An abstract base class for integer values High Dynamic Range (HDR) Histograms

+ *

+ * AbstractHistogram supports the recording and analyzing sampled data value counts across a configurable integer value + * range with configurable value precision within the range. Value precision is expressed as the number of significant + * digits in the value recording, and provides control over value quantization behavior across the value range and the + * subsequent value resolution at any given level. + *

+ * For example, a Histogram could be configured to track the counts of observed integer values between 0 and + * 3,600,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization + * within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could + * be used to track and analyze the counts of observed response times ranging between 1 microsecond and 1 hour + * in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of + * 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At it's + * maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). + *

+ * See package description for {@link org.HdrHistogram} for details. + * + */ +public abstract class AbstractHistogram extends AbstractHistogramBase implements ValueRecorder, Serializable { + + // "Hot" accessed fields (used in the the value recording code path) are bunched here, such + // that they will have a good chance of ending up in the same cache line as the totalCounts and + // counts array reference fields that subclass implementations will typically add. + + /** + * Number of leading zeros in the largest value that can fit in bucket 0. + */ + int leadingZeroCountBase; + int subBucketHalfCountMagnitude; + + /** + * Largest k such that 2^k <= lowestDiscernibleValue + */ + int unitMagnitude; + int subBucketHalfCount; + /** + * Biggest value that can fit in bucket 0 + */ + long subBucketMask; + /** + * Lowest unitMagnitude bits are set + */ + long unitMagnitudeMask; + volatile long maxValue = 0; + volatile long minNonZeroValue = Long.MAX_VALUE; + + private static final AtomicLongFieldUpdater maxValueUpdater = + AtomicLongFieldUpdater.newUpdater(AbstractHistogram.class, "maxValue"); + private static final AtomicLongFieldUpdater minNonZeroValueUpdater = + AtomicLongFieldUpdater.newUpdater(AbstractHistogram.class, "minNonZeroValue"); + + // Sub-classes will typically add a totalCount field and a counts array field, which will likely be laid out + // right around here due to the subclass layout rules in most practical JVM implementations. + + // ######## ### ###### ## ## ### ###### ######## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## + // ######## ## ## ## ##### ## ## ## #### ###### + // ## ######### ## ## ## ######### ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ###### ## ## ## ## ###### ######## + // + // ### ######## ###### ######## ######## ### ###### ######## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ######## ###### ## ######## ## ## ## ## + // ######### ## ## ## ## ## ## ######### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ######## ###### ## ## ## ## ## ###### ## + // + // Abstract, counts-type dependent methods to be provided by subclass implementations: + // + + abstract long getCountAtIndex(int index); + + abstract long getCountAtNormalizedIndex(int index); + + abstract void incrementCountAtIndex(int index); + + abstract void addToCountAtIndex(int index, long value); + + abstract void setCountAtIndex(int index, long value); + + abstract void setCountAtNormalizedIndex(int index, long value); + + abstract int getNormalizingIndexOffset(); + + abstract void setNormalizingIndexOffset(int normalizingIndexOffset); + + abstract void shiftNormalizingIndexByOffset(int offsetToAdd, boolean lowestHalfBucketPopulated, + double newIntegerToDoubleValueConversionRatio); + + abstract void setTotalCount(long totalCount); + + abstract void incrementTotalCount(); + + abstract void addToTotalCount(long value); + + abstract void clearCounts(); + + abstract int _getEstimatedFootprintInBytes(); + + abstract void resize(long newHighestTrackableValue); + + /** + * Get the total count of all recorded values in the histogram + * @return the total count of all recorded values in the histogram + */ + abstract public long getTotalCount(); + + /** + * Set internally tracked maxValue to new value if new value is greater than current one. + * May be overridden by subclasses for synchronization or atomicity purposes. + * @param value new maxValue to set + */ + private void updatedMaxValue(final long value) { + final long internalValue = value | unitMagnitudeMask; // Max unit-equivalent value + long sampledMaxValue; + while (internalValue > (sampledMaxValue = maxValue)) { + maxValueUpdater.compareAndSet(this, sampledMaxValue, internalValue); + } + } + + private void resetMaxValue(final long maxValue) { + this.maxValue = maxValue | unitMagnitudeMask; // Max unit-equivalent value + } + + /** + * Set internally tracked minNonZeroValue to new value if new value is smaller than current one. + * May be overridden by subclasses for synchronization or atomicity purposes. + * @param value new minNonZeroValue to set + */ + private void updateMinNonZeroValue(final long value) { + if (value <= unitMagnitudeMask) { + return; // Unit-equivalent to 0. + } + final long internalValue = value & ~unitMagnitudeMask; // Min unit-equivalent value + long sampledMinNonZeroValue; + while (internalValue < (sampledMinNonZeroValue = minNonZeroValue)) { + minNonZeroValueUpdater.compareAndSet(this, sampledMinNonZeroValue, internalValue); + } + } + + private void resetMinNonZeroValue(final long minNonZeroValue) { + final long internalValue = minNonZeroValue & ~unitMagnitudeMask; // Min unit-equivalent value + this.minNonZeroValue = (minNonZeroValue == Long.MAX_VALUE) ? + minNonZeroValue : internalValue; + } + + // ###### ####### ## ## ###### ######## ######## ## ## ###### ######## #### ####### ## ## + // ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## + // ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## #### ## + // ## ## ## ## ## ## ###### ## ######## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## #### + // ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### + // ###### ####### ## ## ###### ## ## ## ####### ###### ## #### ####### ## ## + // + // Construction: + // + + /** + * Construct an auto-resizing histogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will + * maintain value resolution and separation. Must be a non-negative + * integer between 0 and 5. + */ + protected AbstractHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + autoResize = true; + } + + /** + * Construct a histogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be discerned (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will + * maintain value resolution and separation. Must be a non-negative + * integer between 0 and 5. + */ + protected AbstractHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + // Verify argument validity + if (lowestDiscernibleValue < 1) { + throw new IllegalArgumentException("lowestDiscernibleValue must be >= 1"); + } + if (lowestDiscernibleValue > Long.MAX_VALUE / 2) { + // prevent subsequent multiplication by 2 for highestTrackableValue check from overflowing + throw new IllegalArgumentException("lowestDiscernibleValue must be <= Long.MAX_VALUE / 2"); + } + if (highestTrackableValue < 2L * lowestDiscernibleValue) { + throw new IllegalArgumentException("highestTrackableValue must be >= 2 * lowestDiscernibleValue"); + } + if ((numberOfSignificantValueDigits < 0) || (numberOfSignificantValueDigits > 5)) { + throw new IllegalArgumentException("numberOfSignificantValueDigits must be between 0 and 5"); + } + identity = constructionIdentityCount.getAndIncrement(); + + init(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, 1.0, 0); + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + protected AbstractHistogram(final AbstractHistogram source) { + this(source.getLowestDiscernibleValue(), source.getHighestTrackableValue(), + source.getNumberOfSignificantValueDigits()); + this.setStartTimeStamp(source.getStartTimeStamp()); + this.setEndTimeStamp(source.getEndTimeStamp()); + this.autoResize = source.autoResize; + } + + private void init(final long lowestDiscernibleValue, + final long highestTrackableValue, + final int numberOfSignificantValueDigits, + final double integerToDoubleValueConversionRatio, + final int normalizingIndexOffset) { + this.lowestDiscernibleValue = lowestDiscernibleValue; + this.highestTrackableValue = highestTrackableValue; + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; + if (normalizingIndexOffset != 0) { + setNormalizingIndexOffset(normalizingIndexOffset); + } + + /* + * Given a 3 decimal point accuracy, the expectation is obviously for "+/- 1 unit at 1000". It also means that + * it's "ok to be +/- 2 units at 2000". The "tricky" thing is that it is NOT ok to be +/- 2 units at 1999. Only + * starting at 2000. So internally, we need to maintain single unit resolution to 2x 10^decimalPoints. + */ + final long largestValueWithSingleUnitResolution = 2 * (long) Math.pow(10, numberOfSignificantValueDigits); + + unitMagnitude = (int) (Math.log(lowestDiscernibleValue)/Math.log(2)); + unitMagnitudeMask = (1 << unitMagnitude) - 1; + + // We need to maintain power-of-two subBucketCount (for clean direct indexing) that is large enough to + // provide unit resolution to at least largestValueWithSingleUnitResolution. So figure out + // largestValueWithSingleUnitResolution's nearest power-of-two (rounded up), and use that: + int subBucketCountMagnitude = (int) Math.ceil(Math.log(largestValueWithSingleUnitResolution)/Math.log(2)); + subBucketHalfCountMagnitude = subBucketCountMagnitude - 1; + subBucketCount = 1 << subBucketCountMagnitude; + subBucketHalfCount = subBucketCount / 2; + subBucketMask = ((long)subBucketCount - 1) << unitMagnitude; + + if (subBucketCountMagnitude + unitMagnitude > 62) { + // subBucketCount entries can't be represented, with unitMagnitude applied, in a positive long. + // Technically it still sort of works if their sum is 63: you can represent all but the last number + // in the shifted subBucketCount. However, the utility of such a histogram vs ones whose magnitude here + // fits in 62 bits is debatable, and it makes it harder to work through the logic. + // Sums larger than 64 are totally broken as leadingZeroCountBase would go negative. + throw new IllegalArgumentException("Cannot represent numberOfSignificantValueDigits worth of values " + + "beyond lowestDiscernibleValue"); + } + + // determine exponent range needed to support the trackable value with no overflow: + establishSize(highestTrackableValue); + + // Establish leadingZeroCountBase, used in getBucketIndex() fast path: + // subtract the bits that would be used by the largest value in bucket 0. + leadingZeroCountBase = 64 - unitMagnitude - subBucketCountMagnitude; + + percentileIterator = new PercentileIterator(this, 1); + recordedValuesIterator = new RecordedValuesIterator(this); + } + + /** + * The buckets (each of which has subBucketCount sub-buckets, here assumed to be 2048 as an example) overlap: + * + *

+     * The 0'th bucket covers from 0...2047 in multiples of 1, using all 2048 sub-buckets
+     * The 1'th bucket covers from 2048..4097 in multiples of 2, using only the top 1024 sub-buckets
+     * The 2'th bucket covers from 4096..8191 in multiple of 4, using only the top 1024 sub-buckets
+     * ...
+     * 
+ * + * Bucket 0 is "special" here. It is the only one that has 2048 entries. All the rest have 1024 entries (because + * their bottom half overlaps with and is already covered by the all of the previous buckets put together). In other + * words, the k'th bucket could represent 0 * 2^k to 2048 * 2^k in 2048 buckets with 2^k precision, but the midpoint + * of 1024 * 2^k = 2048 * 2^(k-1) = the k-1'th bucket's end, so we would use the previous bucket for those lower + * values as it has better precision. + */ + final void establishSize(long newHighestTrackableValue) { + // establish counts array length: + countsArrayLength = determineArrayLengthNeeded(newHighestTrackableValue); + // establish exponent range needed to support the trackable value with no overflow: + bucketCount = getBucketsNeededToCoverValue(newHighestTrackableValue); + // establish the new highest trackable value: + highestTrackableValue = newHighestTrackableValue; + } + + final int determineArrayLengthNeeded(long highestTrackableValue) { + if (highestTrackableValue < 2L * lowestDiscernibleValue) { + throw new IllegalArgumentException("highestTrackableValue (" + highestTrackableValue + + ") cannot be < (2 * lowestDiscernibleValue)"); + } + //determine counts array length needed: + int countsArrayLength = getLengthForNumberOfBuckets(getBucketsNeededToCoverValue(highestTrackableValue)); + return countsArrayLength; + } + + // ### ## ## ######## ####### + // ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## + // ######### ## ## ## ## ## + // ## ## ## ## ## ## ## + // ## ## ####### ## ####### + // + // ######## ######## ###### #### ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## #### ## ## + // ######## ###### ###### ## ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ### ## ## + // ## ## ######## ###### #### ######## #### ## ## ###### + // + // Auto-resizing control: + // + + /** + * Indicate whether or not the histogram is set to auto-resize and auto-adjust it's + * highestTrackableValue + * @return autoResize setting + */ + public boolean isAutoResize() { + return autoResize; + } + + /** + * Indicate whether or not the histogram is capable of supporting auto-resize functionality. + * Note that this is an indication that enabling auto-resize by calling setAutoResize() is allowed, + * and NOT that the histogram will actually auto-resize. Use isAutoResize() to determine if + * the histogram is in auto-resize mode. + * @return autoResize setting + */ + public boolean supportsAutoResize() { return true; } + + /** + * Control whether or not the histogram can auto-resize and auto-adjust it's + * highestTrackableValue + * @param autoResize autoResize setting + */ + public void setAutoResize(boolean autoResize) { + this.autoResize = autoResize; + } + + // ## ## ### ## ## ## ######## + // ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ###### + // ## ## ######### ## ## ## ## + // ## ## ## ## ## ## ## ## + // ### ## ## ######## ####### ######## + // + // ######## ######## ###### ####### ######## ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## #### ## ## + // ######## ###### ## ## ## ######## ## ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## + // ## ## ######## ###### ####### ## ## ######## #### ## ## ###### + // + // Value recording support: + // + + + /** + * Record a value in the histogram + * + * @param value The value to be recorded + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValue(final long value) throws ArrayIndexOutOfBoundsException { + recordSingleValue(value); + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException { + recordCountAtValue(count, value); + } + + /** + * Record a value in the histogram. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided + * by {@link #copyCorrectedForCoordinatedOmission(long)}. + * The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordSingleValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } + + // Package-internal support for converting and recording double values into integer histograms: + void recordConvertedDoubleValue(final double value) { + long integerValue = (long) (value * doubleToIntegerValueConversionRatio); + recordValue(integerValue); + } + + public void recordConvertedDoubleValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { + long integerValue = (long) (value * doubleToIntegerValueConversionRatio); + recordCountAtValue(count, integerValue); + } + + /** + * @deprecated + * + * Record a value in the histogram. This deprecated method has identical behavior to + * recordValueWithExpectedInterval(). It was renamed to avoid ambiguity. + * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + public void recordValue(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } + + void updateMinAndMax(final long value) { + if (value > maxValue) { + updatedMaxValue(value); + } + if ((value < minNonZeroValue) && (value != 0)) { + updateMinNonZeroValue(value); + } + } + + private void recordCountAtValue(final long count, final long value) + throws ArrayIndexOutOfBoundsException { + int countsIndex = countsArrayIndex(value); + try { + addToCountAtIndex(countsIndex, count); + } catch (IndexOutOfBoundsException ex) { + handleRecordException(count, value, ex); + } + updateMinAndMax(value); + addToTotalCount(count); + } + + private void recordSingleValue(final long value) throws ArrayIndexOutOfBoundsException { + int countsIndex = countsArrayIndex(value); + try { + incrementCountAtIndex(countsIndex); + } catch (IndexOutOfBoundsException ex) { + handleRecordException(1, value, ex); + } + updateMinAndMax(value); + incrementTotalCount(); + } + + private void handleRecordException(final long count, final long value, Exception ex) { + if (!autoResize) { + throw new ArrayIndexOutOfBoundsException("value " + value + " outside of histogram covered range. Caused by: " + ex); + } + resize(value); + int countsIndex = countsArrayIndex(value); + addToCountAtIndex(countsIndex, count); + this.highestTrackableValue = highestEquivalentValue(valueFromIndex(countsArrayLength - 1)); + } + + private void recordValueWithCountAndExpectedInterval(final long value, final long count, + final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordCountAtValue(count, value); + if (expectedIntervalBetweenValueSamples <= 0) + return; + for (long missingValue = value - expectedIntervalBetweenValueSamples; + missingValue >= expectedIntervalBetweenValueSamples; + missingValue -= expectedIntervalBetweenValueSamples) { + recordCountAtValue(count, missingValue); + } + } + + private void recordSingleValueWithExpectedInterval(final long value, + final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordSingleValue(value); + if (expectedIntervalBetweenValueSamples <= 0) + return; + for (long missingValue = value - expectedIntervalBetweenValueSamples; + missingValue >= expectedIntervalBetweenValueSamples; + missingValue -= expectedIntervalBetweenValueSamples) { + recordSingleValue(missingValue); + } + } + + // ###### ## ######## ### ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## ## #### ## ## + // ## ## ###### ## ## ######## ## ## ## ## ## #### + // ## ## ## ######### ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ## ### ## ## + // ###### ######## ######## ## ## ## ## #### ## ## ###### + // + // Clearing support: + // + + + /** + * Reset the contents and stats of this histogram + */ + @Override + public void reset() { + clearCounts(); + resetMaxValue(0); + resetMinNonZeroValue(Long.MAX_VALUE); + setNormalizingIndexOffset(0); + startTimeStampMsec = Long.MAX_VALUE; + endTimeStampMsec = 0; + tag = null; + } + + // ###### ####### ######## ## ## + // ## ## ## ## ## ## ## ## + // ## ## ## ## ## #### + // ## ## ## ######## ## + // ## ## ## ## ## + // ## ## ## ## ## ## + // ###### ####### ## ## + // + // Copy support: + // + + + /** + * Create a copy of this histogram, complete with data and everything. + * + * @return A distinct copy of this histogram. + */ + abstract public AbstractHistogram copy(); + + /** + * Get a copy of this histogram, corrected for coordinated omission. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, the new histogram will include an auto-generated additional series of + * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found + * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. + * + * Note: This is a post-correction method, as opposed to the at-recording correction method provided + * by {@link #recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval}. The two + * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * by + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @return a copy of this histogram, corrected for coordinated omission. + */ + abstract public AbstractHistogram copyCorrectedForCoordinatedOmission(long expectedIntervalBetweenValueSamples); + + /** + * Copy this histogram into the target histogram, overwriting it's contents. + * + * @param targetHistogram the histogram to copy into + */ + public void copyInto(final AbstractHistogram targetHistogram) { + targetHistogram.reset(); + targetHistogram.add(this); + targetHistogram.setStartTimeStamp(this.startTimeStampMsec); + targetHistogram.setEndTimeStamp(this.endTimeStampMsec); + } + + /** + * Copy this histogram, corrected for coordinated omission, into the target histogram, overwriting it's contents. + * (see {@link #copyCorrectedForCoordinatedOmission} for more detailed explanation about how correction is applied) + * + * @param targetHistogram the histogram to copy into + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + */ + public void copyIntoCorrectedForCoordinatedOmission(final AbstractHistogram targetHistogram, + final long expectedIntervalBetweenValueSamples) { + targetHistogram.reset(); + targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + targetHistogram.setStartTimeStamp(this.startTimeStampMsec); + targetHistogram.setEndTimeStamp(this.endTimeStampMsec); + } + + // ### ######## ######## + // ## ## ## ## ## ## + // ## ## ## ## ## ## + // ## ## ## ## ## ## + // ######### ## ## ## ## + // ## ## ## ## ## ## + // ## ## ######## ######## + // + // Add support: + // + + + /** + * Add the contents of another histogram to this one. + *

+ * As part of adding the contents, the start/end timestamp range of this histogram will be + * extended to include the start/end timestamp range of the other histogram. + * + * @param otherHistogram The other histogram. + * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's are + * higher than highestTrackableValue. + */ + public void add(final AbstractHistogram otherHistogram) throws ArrayIndexOutOfBoundsException { + long highestRecordableValue = highestEquivalentValue(valueFromIndex(countsArrayLength - 1)); + if (highestRecordableValue < otherHistogram.getMaxValue()) { + if (!isAutoResize()) { + throw new ArrayIndexOutOfBoundsException( + "The other histogram includes values that do not fit in this histogram's range."); + } + resize(otherHistogram.getMaxValue()); + } + if ((bucketCount == otherHistogram.bucketCount) && + (subBucketCount == otherHistogram.subBucketCount) && + (unitMagnitude == otherHistogram.unitMagnitude) && + (getNormalizingIndexOffset() == otherHistogram.getNormalizingIndexOffset()) && + !(otherHistogram instanceof ConcurrentHistogram) ) { + // Counts arrays are of the same length and meaning, so we can just iterate and add directly: + long observedOtherTotalCount = 0; + for (int i = 0; i < otherHistogram.countsArrayLength; i++) { + long otherCount = otherHistogram.getCountAtIndex(i); + if (otherCount > 0) { + addToCountAtIndex(i, otherCount); + observedOtherTotalCount += otherCount; + } + } + setTotalCount(getTotalCount() + observedOtherTotalCount); + updatedMaxValue(Math.max(getMaxValue(), otherHistogram.getMaxValue())); + updateMinNonZeroValue(Math.min(getMinNonZeroValue(), otherHistogram.getMinNonZeroValue())); + } else { + // Arrays are not a direct match (or the other could change on the fly in some valid way), + // so we can't just stream through and add them. Instead, go through the array and add each + // non-zero value found at it's proper value: + + // Do max value first, to avoid max value updates on each iteration: + int otherMaxIndex = otherHistogram.countsArrayIndex(otherHistogram.getMaxValue()); + long otherCount = otherHistogram.getCountAtIndex(otherMaxIndex); + recordValueWithCount(otherHistogram.valueFromIndex(otherMaxIndex), otherCount); + + // Record the remaining values, up to but not including the max value: + for (int i = 0; i < otherMaxIndex; i++) { + otherCount = otherHistogram.getCountAtIndex(i); + if (otherCount > 0) { + recordValueWithCount(otherHistogram.valueFromIndex(i), otherCount); + } + } + } + setStartTimeStamp(Math.min(startTimeStampMsec, otherHistogram.startTimeStampMsec)); + setEndTimeStamp(Math.max(endTimeStampMsec, otherHistogram.endTimeStampMsec)); + } + + /** + * Subtract the contents of another histogram from this one. + *

+ * The start/end timestamps of this histogram will remain unchanged. + * + * @param otherHistogram The other histogram. + * @throws ArrayIndexOutOfBoundsException (may throw) if values in otherHistogram's are higher than highestTrackableValue. + * + */ + public void subtract(final AbstractHistogram otherHistogram) + throws ArrayIndexOutOfBoundsException, IllegalArgumentException { + if (highestEquivalentValue(otherHistogram.getMaxValue()) > + highestEquivalentValue(valueFromIndex(this.countsArrayLength - 1))) { + throw new IllegalArgumentException( + "The other histogram includes values that do not fit in this histogram's range."); + } + for (int i = 0; i < otherHistogram.countsArrayLength; i++) { + long otherCount = otherHistogram.getCountAtIndex(i); + if (otherCount > 0) { + long otherValue = otherHistogram.valueFromIndex(i); + if (getCountAtValue(otherValue) < otherCount) { + throw new IllegalArgumentException("otherHistogram count (" + otherCount + ") at value " + + otherValue + " is larger than this one's (" + getCountAtValue(otherValue) + ")"); + } + recordValueWithCount(otherValue, -otherCount); + } + } + // With subtraction, the max and minNonZero values could have changed: + if ((getCountAtValue(getMaxValue()) <= 0) || getCountAtValue(getMinNonZeroValue()) <= 0) { + establishInternalTackingValues(); + } + } + + /** + * Add the contents of another histogram to this one, while correcting the incoming data for coordinated omission. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, the values added will include an auto-generated additional series of + * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found + * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. + * + * Note: This is a post-recording correction method, as opposed to the at-recording correction method provided + * by {@link #recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval}. The two + * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * by + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param otherHistogram The other histogram. highestTrackableValue and largestValueWithSingleUnitResolution must match. + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if values exceed highestTrackableValue + */ + public void addWhileCorrectingForCoordinatedOmission(final AbstractHistogram otherHistogram, + final long expectedIntervalBetweenValueSamples) { + final AbstractHistogram toHistogram = this; + + for (HistogramIterationValue v : otherHistogram.recordedValues()) { + toHistogram.recordValueWithCountAndExpectedInterval(v.getValueIteratedTo(), + v.getCountAtValueIteratedTo(), expectedIntervalBetweenValueSamples); + } + } + + // ###### ## ## #### ######## ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## #### ## ## + // ###### ######### ## ###### ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ### ## ## + // ###### ## ## #### ## ## #### ## ## ###### + // + // + // Shifting support: + // + + + /** + * Shift recorded values to the left (the equivalent of a << shift operation on all recorded values). The + * configured integer value range limits and value precision setting will remain unchanged. + * + * An {@link ArrayIndexOutOfBoundsException} will be thrown if any recorded values may be lost + * as a result of the attempted operation, reflecting an "overflow" conditions. Expect such an overflow + * exception if the operation would cause the current maxValue to be scaled to a value that is outside + * of the covered value range. + * + * @param numberOfBinaryOrdersOfMagnitude The number of binary orders of magnitude to shift by + */ + public void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude) { + shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, integerToDoubleValueConversionRatio); + } + + void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude, final double newIntegerToDoubleValueConversionRatio) { + if (numberOfBinaryOrdersOfMagnitude < 0) { + throw new IllegalArgumentException("Cannot shift by a negative number of magnitudes"); + } + if (numberOfBinaryOrdersOfMagnitude == 0) { + return; + } + if (getTotalCount() == getCountAtIndex(0)) { + // (no need to shift any values if all recorded values are at the 0 value level:) + return; + } + + final int shiftAmount = numberOfBinaryOrdersOfMagnitude << subBucketHalfCountMagnitude; + int maxValueIndex = countsArrayIndex(getMaxValue()); + // indicate overflow if maxValue is in the range being wrapped: + if (maxValueIndex >= (countsArrayLength - shiftAmount)) { + throw new ArrayIndexOutOfBoundsException( + "Operation would overflow, would discard recorded value counts"); + } + + long maxValueBeforeShift = maxValueUpdater.getAndSet(this, 0); + long minNonZeroValueBeforeShift = minNonZeroValueUpdater.getAndSet(this, Long.MAX_VALUE); + + boolean lowestHalfBucketPopulated = (minNonZeroValueBeforeShift < (subBucketHalfCount << unitMagnitude)); + + // Perform the shift: + shiftNormalizingIndexByOffset(shiftAmount, lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio); + + // adjust min, max: + updateMinAndMax(maxValueBeforeShift << numberOfBinaryOrdersOfMagnitude); + if (minNonZeroValueBeforeShift < Long.MAX_VALUE) { + updateMinAndMax(minNonZeroValueBeforeShift << numberOfBinaryOrdersOfMagnitude); + } + } + + void nonConcurrentNormalizingIndexShift(int shiftAmount, boolean lowestHalfBucketPopulated) { + + // Save and clear the 0 value count: + long zeroValueCount = getCountAtIndex(0); + setCountAtIndex(0, 0); + int preShiftZeroIndex = normalizeIndex(0, getNormalizingIndexOffset(), countsArrayLength); + + setNormalizingIndexOffset(getNormalizingIndexOffset() + shiftAmount); + + // Deal with lower half bucket if needed: + if (lowestHalfBucketPopulated) { + if (shiftAmount <= 0) { + // Shifts with lowest half bucket populated can only be to the left. + // Any right shift logic calling this should have already verified that + // the lowest half bucket is not populated. + throw new ArrayIndexOutOfBoundsException( + "Attempt to right-shift with already-recorded value counts that would underflow and lose precision"); + } + shiftLowestHalfBucketContentsLeft(shiftAmount, preShiftZeroIndex); + } + + // Restore the 0 value count: + setCountAtIndex(0, zeroValueCount); + } + + private void shiftLowestHalfBucketContentsLeft(int shiftAmount, int preShiftZeroIndex) { + final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude; + + // The lowest half-bucket (not including the 0 value) is special: unlike all other half + // buckets, the lowest half bucket values cannot be scaled by simply changing the + // normalizing offset. Instead, they must be individually re-recorded at the new + // scale, and cleared from the current one. + // + // We know that all half buckets "below" the current lowest one are full of 0s, because + // we would have overflowed otherwise. So we need to shift the values in the current + // lowest half bucket into that range (including the current lowest half bucket itself). + // Iterating up from the lowermost non-zero "from slot" and copying values to the newly + // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass, + // because the scale "to slot" index will always be a lower index than its or any + // preceding non-scaled "from slot" index: + // + // (Note that we specifically avoid slot 0, as it is directly handled in the outer case) + + for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) { + long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude; + int toIndex = countsArrayIndex(toValue); + long countAtFromIndex = getCountAtNormalizedIndex(fromIndex + preShiftZeroIndex); + setCountAtIndex(toIndex, countAtFromIndex); + setCountAtNormalizedIndex(fromIndex + preShiftZeroIndex, 0); + } + + // Note that the above loop only creates O(N) work for histograms that have values in + // the lowest half-bucket (excluding the 0 value). Histograms that never have values + // there (e.g. all integer value histograms used as internal storage in DoubleHistograms) + // will never loop, and their shifts will remain O(1). + } + + /** + * Shift recorded values to the right (the equivalent of a >> shift operation on all recorded values). The + * configured integer value range limits and value precision setting will remain unchanged. + *

+ * Shift right operations that do not underflow are reversible with a shift left operation with no loss of + * information. An {@link ArrayIndexOutOfBoundsException} reflecting an "underflow" conditions will be thrown + * if any recorded values may lose representation accuracy as a result of the attempted shift operation. + *

+ * For a shift of a single order of magnitude, expect such an underflow exception if any recorded non-zero + * values up to [numberOfSignificantValueDigits (rounded up to nearest power of 2) multiplied by + * (2 ^ numberOfBinaryOrdersOfMagnitude) currently exist in the histogram. + * + * @param numberOfBinaryOrdersOfMagnitude The number of binary orders of magnitude to shift by + */ + public void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude) { + shiftValuesRight(numberOfBinaryOrdersOfMagnitude, integerToDoubleValueConversionRatio); + } + + void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude, final double newIntegerToDoubleValueConversionRatio) { + if (numberOfBinaryOrdersOfMagnitude < 0) { + throw new IllegalArgumentException("Cannot shift by a negative number of magnitudes"); + } + if (numberOfBinaryOrdersOfMagnitude == 0) { + return; + } + if (getTotalCount() == getCountAtIndex(0)) { + // (no need to shift any values if all recorded values are at the 0 value level:) + return; + } + + final int shiftAmount = subBucketHalfCount * numberOfBinaryOrdersOfMagnitude; + + // indicate underflow if minValue is in the range being shifted from: + int minNonZeroValueIndex = countsArrayIndex(getMinNonZeroValue()); + + // Any shifting into the bottom-most half bucket would represents a loss of accuracy, + // and a non-reversible operation. Therefore any non-0 value that falls in an + // index below (shiftAmount + subBucketHalfCount) would represent an underflow: + // + // The fact that the top and bottom halves of the first bucket use the same scale + // means any shift into the bottom half is invalid. The shift requires that each + // successive subBucketCount be encoded with a scale 2x the previous one, as that + // is how the powers of 2 are applied. + // In particular, if the shift amount is such that it would shift something from + // the top half of the first bucket to the bottom half, that's all stored with the + // same unit, so half of a larger odd value couldn't be restored to its proper + // value by a subsequent left shift because we would need the bottom half to be + // encoded in half-units. + // Furthermore, shifts from anywhere (in the top half of the first bucket or + // beyond) will be incorrectly encoded if they end up in the bottom half. If + // subBucketHalfCount is, say, 1024, and the shift is by 1, the value for 1600 + // would become 576, which is certainly not 1600/2. With a shift of 2 and a + // value of 2112 (index 2048 + 32), the resulting value is 32, not 525. For + // comparison, with shift 2 and value 4096 (index 2048 + 1024 = 3072), 3072 - 2048 = 1024. + // That's the first entry in the top half of bucket 0, which encodes simply + // 1024 = 4096 / 4. Thus, any non-0 value that falls in an index below + // (shiftAmount + subBucketHalfCount) would represent an underflow. + // + + if (minNonZeroValueIndex < shiftAmount + subBucketHalfCount) { + throw new ArrayIndexOutOfBoundsException( + "Operation would underflow and lose precision of already recorded value counts"); + } + + // perform shift: + + long maxValueBeforeShift = maxValueUpdater.getAndSet(this, 0); + long minNonZeroValueBeforeShift = minNonZeroValueUpdater.getAndSet(this, Long.MAX_VALUE); + + // move normalizingIndexOffset + shiftNormalizingIndexByOffset(-shiftAmount, false, newIntegerToDoubleValueConversionRatio); + + // adjust min, max: + updateMinAndMax(maxValueBeforeShift >> numberOfBinaryOrdersOfMagnitude); + if (minNonZeroValueBeforeShift < Long.MAX_VALUE) { + updateMinAndMax(minNonZeroValueBeforeShift >> numberOfBinaryOrdersOfMagnitude); + } + } + + // ###### ####### ## ## ######## ### ######## #### ###### ####### ## ## + // ## ## ## ## ### ### ## ## ## ## ## ## ## ## ## ## ## ### ## + // ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## #### ## + // ## ## ## ## ### ## ######## ## ## ######## ## ###### ## ## ## ## ## + // ## ## ## ## ## ## ######### ## ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### + // ###### ####### ## ## ## ## ## ## ## #### ###### ####### ## ## + // + // Comparison support: + // + + + /** + * Determine if this histogram is equivalent to another. + * + * @param other the other histogram to compare to + * @return True if this histogram are equivalent with the other. + */ + public boolean equals(final Object other){ + if ( this == other ) { + return true; + } + if ( !(other instanceof AbstractHistogram) ) { + return false; + } + AbstractHistogram that = (AbstractHistogram)other; + if ((lowestDiscernibleValue != that.lowestDiscernibleValue) || + (numberOfSignificantValueDigits != that.numberOfSignificantValueDigits) || + (integerToDoubleValueConversionRatio != that.integerToDoubleValueConversionRatio)) { + return false; + } + if (getTotalCount() != that.getTotalCount()) { + return false; + } + if (getMaxValue() != that.getMaxValue()) { + return false; + } + if (getMinNonZeroValue() != that.getMinNonZeroValue()) { + return false; + } + // 2 histograms may be equal but have different underlying array sizes. This can happen for instance due to + // resizing. + if (countsArrayLength == that.countsArrayLength) { + for (int i = 0; i < countsArrayLength; i++) { + if (getCountAtIndex(i) != that.getCountAtIndex(i)) { + return false; + } + } + } + else + { + // Comparing the values is valid here because we have already confirmed the histograms have the same total + // count. It would not be correct otherwise. + for (HistogramIterationValue value : this.recordedValues()) { + long countAtValueIteratedTo = value.getCountAtValueIteratedTo(); + long valueIteratedTo = value.getValueIteratedTo(); + if (that.getCountAtValue(valueIteratedTo) != countAtValueIteratedTo) { + return false; + } + } + } + return true; + } + + @Override + public int hashCode() { + int h = 0; + h = oneAtATimeHashStep(h, unitMagnitude); + h = oneAtATimeHashStep(h, numberOfSignificantValueDigits); + h = oneAtATimeHashStep(h, (int) getTotalCount()); + h = oneAtATimeHashStep(h, (int) getMaxValue()); + h = oneAtATimeHashStep(h, (int) getMinNonZeroValue()); + h += (h << 3); + h ^= (h >> 11); + h += (h << 15); + return h; + } + + private int oneAtATimeHashStep(int h, final int v) { + h += v; + h += (h << 10); + h ^= (h >> 6); + return h; + } + + // ###### ######## ######## ## ## ###### ######## ## ## ######## ######## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ###### ## ######## ## ## ## ## ## ## ######## ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ###### ## ## ## ####### ###### ## ####### ## ## ######## + // + // ####### ## ## ######## ######## ## ## #### ## ## ###### + // ## ## ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## #### ## #### ## ## + // ## ## ## ## ###### ######## ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ## ### ## ## + // ##### ## ####### ######## ## ## ## #### ## ## ###### + // + // Histogram structure querying support: + // + + /** + * get the configured lowestDiscernibleValue + * @return lowestDiscernibleValue + */ + public long getLowestDiscernibleValue() { + return lowestDiscernibleValue; + } + + /** + * get the configured highestTrackableValue + * @return highestTrackableValue + */ + public long getHighestTrackableValue() { + return highestTrackableValue; + } + + /** + * get the configured numberOfSignificantValueDigits + * @return numberOfSignificantValueDigits + */ + public int getNumberOfSignificantValueDigits() { + return numberOfSignificantValueDigits; + } + + /** + * Get the size (in value units) of the range of values that are equivalent to the given value within the + * histogram's resolution. Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The size of the range of values equivalent to the given value. + */ + public long sizeOfEquivalentValueRange(final long value) { + final int bucketIndex = getBucketIndex(value); + long distanceToNextValue = 1L << (unitMagnitude + bucketIndex); + return distanceToNextValue; + } + + /** + * Get the lowest value that is equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The lowest value that is equivalent to the given value within the histogram's resolution. + */ + public long lowestEquivalentValue(final long value) { + final int bucketIndex = getBucketIndex(value); + final int subBucketIndex = getSubBucketIndex(value, bucketIndex); + long thisValueBaseLevel = valueFromIndex(bucketIndex, subBucketIndex); + return thisValueBaseLevel; + } + + /** + * Get the highest value that is equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The highest value that is equivalent to the given value within the histogram's resolution. + */ + public long highestEquivalentValue(final long value) { + return nextNonEquivalentValue(value) - 1; + } + + /** + * Get a value that lies in the middle (rounded up) of the range of values equivalent the given value. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The value lies in the middle (rounded up) of the range of values equivalent the given value. + */ + public long medianEquivalentValue(final long value) { + return (lowestEquivalentValue(value) + (sizeOfEquivalentValueRange(value) >> 1)); + } + + /** + * Get the next value that is not equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The next value that is not equivalent to the given value within the histogram's resolution. + */ + public long nextNonEquivalentValue(final long value) { + return lowestEquivalentValue(value) + sizeOfEquivalentValueRange(value); + } + +/** + * Determine if two values are equivalent with the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value1 first value to compare + * @param value2 second value to compare + * @return True if values are equivalent with the histogram's resolution. + */ + public boolean valuesAreEquivalent(final long value1, final long value2) { + return (lowestEquivalentValue(value1) == lowestEquivalentValue(value2)); + } + + /** + * Provide a (conservatively high) estimate of the Histogram's total footprint in bytes + * + * @return a (conservatively high) estimate of the Histogram's total footprint in bytes + */ + public int getEstimatedFootprintInBytes() { + return _getEstimatedFootprintInBytes(); + } + + // ######## #### ## ## ######## ###### ######## ### ## ## ######## + // ## ## ### ### ## ## ## ## ## ## ### ### ## ## + // ## ## #### #### ## ## ## ## ## #### #### ## ## + // ## ## ## ### ## ###### ###### ## ## ## ## ### ## ######## + // ## ## ## ## ## ## ## ######### ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## #### ## ## ######## ###### ## ## ## ## ## ## + // + // #### ######## ### ###### + // ## ## ## ## ## ## ## + // #### ## ## ## ## + // #### ## ## ## ## #### + // ## ## ## ## ######### ## ## + // ## ## ## ## ## ## ## + // #### ## ## ## ## ###### + // + // Timestamp and tag support: + // + + /** + * get the start time stamp [optionally] stored with this histogram + * @return the start time stamp [optionally] stored with this histogram + */ + @Override + public long getStartTimeStamp() { + return startTimeStampMsec; + } + + /** + * Set the start time stamp value associated with this histogram to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + @Override + public void setStartTimeStamp(final long timeStampMsec) { + this.startTimeStampMsec = timeStampMsec; + } + + /** + * get the end time stamp [optionally] stored with this histogram + * @return the end time stamp [optionally] stored with this histogram + */ + @Override + public long getEndTimeStamp() { + return endTimeStampMsec; + } + + /** + * Set the end time stamp value associated with this histogram to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + @Override + public void setEndTimeStamp(final long timeStampMsec) { + this.endTimeStampMsec = timeStampMsec; + } + + /** + * get the tag string [optionally] associated with this histogram + * @return tag string [optionally] associated with this histogram + */ + public String getTag() { + return tag; + } + + /** + * Set the tag string associated with this histogram + * @param tag the tag string to associate with this histogram + */ + public void setTag(String tag) { + this.tag = tag; + } + + // ######## ### ######## ### ### ###### ###### ######## ###### ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ###### ###### ###### + // ## ## ######### ## ######### ######### ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ######## ## ## ## ## ## ## ## ###### ###### ######## ###### ###### + // + // Histogram Data access support: + // + + + /** + * Get the lowest recorded value level in the histogram. If the histogram has no recorded values, + * the value returned is undefined. + * + * @return the Min value recorded in the histogram + */ + public long getMinValue() { + if ((getCountAtIndex(0) > 0) || (getTotalCount() == 0)) { + return 0; + } + return getMinNonZeroValue(); + } + + /** + * Get the highest recorded value level in the histogram. If the histogram has no recorded values, + * the value returned is undefined. + * + * @return the Max value recorded in the histogram + */ + public long getMaxValue() { + return (maxValue == 0) ? 0 : highestEquivalentValue(maxValue); + } + + /** + * Get the lowest recorded non-zero value level in the histogram. If the histogram has no recorded values, + * the value returned is undefined. + * + * @return the lowest recorded non-zero value level in the histogram + */ + public long getMinNonZeroValue() { + return (minNonZeroValue == Long.MAX_VALUE) ? + Long.MAX_VALUE : lowestEquivalentValue(minNonZeroValue); + } + + /** + * Get the highest recorded value level in the histogram as a double + * + * @return the Max value recorded in the histogram + */ + @Override + public double getMaxValueAsDouble() { + return getMaxValue(); + } + + /** + * Get the computed mean value of all recorded values in the histogram + * + * @return the mean value (in value units) of the histogram data + */ + public double getMean() { + if (getTotalCount() == 0) { + return 0.0; + } + recordedValuesIterator.reset(); + double totalValue = 0; + while (recordedValuesIterator.hasNext()) { + HistogramIterationValue iterationValue = recordedValuesIterator.next(); + totalValue += medianEquivalentValue(iterationValue.getValueIteratedTo()) + * (double) iterationValue.getCountAtValueIteratedTo(); + } + return (totalValue * 1.0) / getTotalCount(); + } + + /** + * Get the computed standard deviation of all recorded values in the histogram + * + * @return the standard deviation (in value units) of the histogram data + */ + public double getStdDeviation() { + if (getTotalCount() == 0) { + return 0.0; + } + final double mean = getMean(); + double geometric_deviation_total = 0.0; + recordedValuesIterator.reset(); + while (recordedValuesIterator.hasNext()) { + HistogramIterationValue iterationValue = recordedValuesIterator.next(); + double deviation = (medianEquivalentValue(iterationValue.getValueIteratedTo()) * 1.0) - mean; + geometric_deviation_total += (deviation * deviation) * iterationValue.getCountAddedInThisIterationStep(); + } + double std_deviation = Math.sqrt(geometric_deviation_total / getTotalCount()); + return std_deviation; + } + + /** + * Get the value at a given percentile. + * Returns the largest value that (100% - percentile) [+/- 1 ulp] of the overall recorded value entries + * in the histogram are either larger than or equivalent to. Returns 0 if no recorded values exist. + *

+ * Note that two values are "equivalent" in this statement if + * {@link AbstractHistogram#valuesAreEquivalent} would return true. + * + * @param percentile The percentile for which to return the associated value + * @return The largest value that (100% - percentile) [+/- 1 ulp] of the overall recorded value entries + * in the histogram are either larger than or equivalent to. Returns 0 if no recorded values exist. + */ + public long getValueAtPercentile(final double percentile) { + // Truncate to 0..100%, and remove 1 ulp to avoid roundoff overruns into next bucket when we + // subsequently round up to the nearest integer: + double requestedPercentile = + Math.min(Math.max(Math.nextAfter(percentile, Double.NEGATIVE_INFINITY), 0.0D), 100.0D); + // derive the count at the requested percentile. We round up to nearest integer to ensure that the + // largest value that the requested percentile of overall recorded values is <= is actually included. + double fpCountAtPercentile = (requestedPercentile * getTotalCount()) / 100.0D; + long countAtPercentile = (long)(Math.ceil(fpCountAtPercentile)); // round up + + countAtPercentile = Math.max(countAtPercentile, 1); // Make sure we at least reach the first recorded entry + long totalToCurrentIndex = 0; + for (int i = 0; i < countsArrayLength; i++) { + totalToCurrentIndex += getCountAtIndex(i); + if (totalToCurrentIndex >= countAtPercentile) { + long valueAtIndex = valueFromIndex(i); + return (percentile == 0.0) ? + lowestEquivalentValue(valueAtIndex) : + highestEquivalentValue(valueAtIndex); + } + } + return 0; + } + + /** + * Get the percentile at a given value. + * The percentile returned is the percentile of values recorded in the histogram that are smaller + * than or equivalent to the given value. + *

+ * Note that two values are "equivalent" in this statement if + * {@link AbstractHistogram#valuesAreEquivalent} would return true. + * + * @param value The value for which to return the associated percentile + * @return The percentile of values recorded in the histogram that are smaller than or equivalent + * to the given value. + */ + public double getPercentileAtOrBelowValue(final long value) { + if (getTotalCount() == 0) { + return 100.0; + } + final int targetIndex = Math.min(countsArrayIndex(value), (countsArrayLength - 1)); + long totalToCurrentIndex = 0; + for (int i = 0; i <= targetIndex; i++) { + totalToCurrentIndex += getCountAtIndex(i); + } + return (100.0 * totalToCurrentIndex) / getTotalCount(); + } + + /** + * Get the count of recorded values within a range of value levels (inclusive to within the histogram's resolution). + * + * @param lowValue The lower value bound on the range for which + * to provide the recorded count. Will be rounded down with + * {@link Histogram#lowestEquivalentValue lowestEquivalentValue}. + * @param highValue The higher value bound on the range for which to provide the recorded count. + * Will be rounded up with {@link Histogram#highestEquivalentValue highestEquivalentValue}. + * @return the total count of values recorded in the histogram within the value range that is + * {@literal >=} lowestEquivalentValue(lowValue) and {@literal <=} highestEquivalentValue(highValue) + */ + public long getCountBetweenValues(final long lowValue, final long highValue) throws ArrayIndexOutOfBoundsException { + final int lowIndex = Math.max(0, countsArrayIndex(lowValue)); + final int highIndex = Math.min(countsArrayIndex(highValue), (countsArrayLength - 1)); + long count = 0; + for (int i = lowIndex ; i <= highIndex; i++) { + count += getCountAtIndex(i); + } + return count; + } + + /** + * Get the count of recorded values at a specific value (to within the histogram resolution at the value level). + * + * @param value The value for which to provide the recorded count + * @return The total count of values recorded in the histogram within the value range that is + * {@literal >=} lowestEquivalentValue(value) and {@literal <=} highestEquivalentValue(value) + */ + public long getCountAtValue(final long value) throws ArrayIndexOutOfBoundsException { + final int index = Math.min(Math.max(0, countsArrayIndex(value)), (countsArrayLength - 1)); + return getCountAtIndex(index); + } + + // #### ######## ######## ######## ### ######## #### ####### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ### ## + // ## ## ## ## ## ## ## ## ## ## ## #### ## + // ## ## ###### ######## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ######### ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## ### + // #### ## ######## ## ## ## ## ## #### ####### ## ## + // + // Iteration Support: + // + /** + * Provide a means of iterating through histogram values according to percentile levels. The iteration is + * performed in steps that start at 0% and reduce their distance to 100% according to the + * percentileTicksPerHalfDistance parameter, ultimately reaching 100% when all recorded histogram + * values are exhausted. + *

+ * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. + * @return An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} + * through the histogram using a + * {@link PercentileIterator} + */ + public Percentiles percentiles(final int percentileTicksPerHalfDistance) { + return new Percentiles(this, percentileTicksPerHalfDistance); + } + + /** + * Provide a means of iterating through histogram values using linear steps. The iteration is + * performed in steps of valueUnitsPerBucket in size, terminating when all recorded histogram + * values are exhausted. + * + * @param valueUnitsPerBucket The size (in value units) of the linear buckets to use + * @return An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} + * through the histogram using a + * {@link LinearIterator} + */ + public LinearBucketValues linearBucketValues(final long valueUnitsPerBucket) { + return new LinearBucketValues(this, valueUnitsPerBucket); + } + + /** + * Provide a means of iterating through histogram values at logarithmically increasing levels. The iteration is + * performed in steps that start at valueUnitsInFirstBucket and increase exponentially according to + * logBase, terminating when all recorded histogram values are exhausted. + * + * @param valueUnitsInFirstBucket The size (in value units) of the first bucket in the iteration + * @param logBase The multiplier by which bucket sizes will grow in each iteration step + * @return An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} + * through the histogram using + * a {@link LogarithmicIterator} + */ + public LogarithmicBucketValues logarithmicBucketValues(final long valueUnitsInFirstBucket, final double logBase) { + return new LogarithmicBucketValues(this, valueUnitsInFirstBucket, logBase); + } + + /** + * Provide a means of iterating through all recorded histogram values using the finest granularity steps + * supported by the underlying representation. The iteration steps through all non-zero recorded value counts, + * and terminates when all recorded histogram values are exhausted. + * + * @return An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} + * through the histogram using + * a {@link RecordedValuesIterator} + */ + public RecordedValues recordedValues() { + return new RecordedValues(this); + } + + /** + * Provide a means of iterating through all histogram values using the finest granularity steps supported by + * the underlying representation. The iteration steps through all possible unit value levels, regardless of + * whether or not there were recorded values for that value level, and terminates when all recorded histogram + * values are exhausted. + * + * @return An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} + * through the histogram using + * a {@link AllValuesIterator} + */ + public AllValues allValues() { + return new AllValues(this); + } + + // Percentile iterator support: + + /** + * An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through + * the histogram using a {@link PercentileIterator} + */ + public class Percentiles implements Iterable { + final AbstractHistogram histogram; + final int percentileTicksPerHalfDistance; + + private Percentiles(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { + this.histogram = histogram; + this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; + } + + /** + * @return A {@link PercentileIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + @Override + public Iterator iterator() { + return new PercentileIterator(histogram, percentileTicksPerHalfDistance); + } + } + + // Linear iterator support: + + /** + * An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through + * the histogram using a {@link LinearIterator} + */ + public class LinearBucketValues implements Iterable { + final AbstractHistogram histogram; + final long valueUnitsPerBucket; + + private LinearBucketValues(final AbstractHistogram histogram, final long valueUnitsPerBucket) { + this.histogram = histogram; + this.valueUnitsPerBucket = valueUnitsPerBucket; + } + + /** + * @return A {@link LinearIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new LinearIterator(histogram, valueUnitsPerBucket); + } + } + + // Logarithmic iterator support: + + /** + * An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through + * the histogram using a {@link LogarithmicIterator} + */ + public class LogarithmicBucketValues implements Iterable { + final AbstractHistogram histogram; + final long valueUnitsInFirstBucket; + final double logBase; + + private LogarithmicBucketValues(final AbstractHistogram histogram, + final long valueUnitsInFirstBucket, final double logBase) { + this.histogram = histogram; + this.valueUnitsInFirstBucket = valueUnitsInFirstBucket; + this.logBase = logBase; + } + + /** + * @return A {@link LogarithmicIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new LogarithmicIterator(histogram, valueUnitsInFirstBucket, logBase); + } + } + + // Recorded value iterator support: + + /** + * An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through + * the histogram using a {@link RecordedValuesIterator} + */ + public class RecordedValues implements Iterable { + final AbstractHistogram histogram; + + private RecordedValues(final AbstractHistogram histogram) { + this.histogram = histogram; + } + + /** + * @return A {@link RecordedValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new RecordedValuesIterator(histogram); + } + } + + // AllValues iterator support: + + /** + * An {@link Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through + * the histogram using a {@link AllValuesIterator} + */ + public class AllValues implements Iterable { + final AbstractHistogram histogram; + + private AllValues(final AbstractHistogram histogram) { + this.histogram = histogram; + } + + /** + * @return A {@link AllValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new AllValuesIterator(histogram); + } + } + + // ######## ######## ######## ###### ######## ## ## ######## #### ## ######## + // ## ## ## ## ## ## ## ## ### ## ## ## ## ## + // ## ## ## ## ## ## ## #### ## ## ## ## ## + // ######## ###### ######## ## ###### ## ## ## ## ## ## ###### + // ## ## ## ## ## ## ## #### ## ## ## ## + // ## ## ## ## ## ## ## ## ### ## ## ## ## + // ## ######## ## ## ###### ######## ## ## ## #### ######## ######## + // + // ####### ## ## ######## ######## ## ## ######## + // ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ######## ## ## ## + // ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## + // ####### ####### ## ## ####### ## + // + // Textual percentile output support: + // + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * five (5) percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + */ + public void outputPercentileDistribution(final PrintStream printStream, + final Double outputValueUnitScalingRatio) { + outputPercentileDistribution(printStream, 5, outputValueUnitScalingRatio); + } + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * dumpTicksPerHalf percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + */ + public void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio) { + outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, false); + } + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * dumpTicksPerHalf percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + * @param useCsvFormat Output in CSV format if true. Otherwise use plain text form. + */ + public void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio, + final boolean useCsvFormat) { + + if (useCsvFormat) { + printStream.format("\"Value\",\"Percentile\",\"TotalCount\",\"1/(1-Percentile)\"\n"); + } else { + printStream.format("%12s %14s %10s %14s\n\n", "Value", "Percentile", "TotalCount", "1/(1-Percentile)"); + } + + PercentileIterator iterator = percentileIterator; + iterator.reset(percentileTicksPerHalfDistance); + + String percentileFormatString; + String lastLinePercentileFormatString; + if (useCsvFormat) { + percentileFormatString = "%." + numberOfSignificantValueDigits + "f,%.12f,%d,%.2f\n"; + lastLinePercentileFormatString = "%." + numberOfSignificantValueDigits + "f,%.12f,%d,Infinity\n"; + } else { + percentileFormatString = "%12." + numberOfSignificantValueDigits + "f %2.12f %10d %14.2f\n"; + lastLinePercentileFormatString = "%12." + numberOfSignificantValueDigits + "f %2.12f %10d\n"; + } + + while (iterator.hasNext()) { + HistogramIterationValue iterationValue = iterator.next(); + if (iterationValue.getPercentileLevelIteratedTo() != 100.0D) { + printStream.format(Locale.US, percentileFormatString, + iterationValue.getValueIteratedTo() / outputValueUnitScalingRatio, + iterationValue.getPercentileLevelIteratedTo()/100.0D, + iterationValue.getTotalCountToThisValue(), + 1/(1.0D - (iterationValue.getPercentileLevelIteratedTo()/100.0D)) ); + } else { + printStream.format(Locale.US, lastLinePercentileFormatString, + iterationValue.getValueIteratedTo() / outputValueUnitScalingRatio, + iterationValue.getPercentileLevelIteratedTo()/100.0D, + iterationValue.getTotalCountToThisValue()); + } + } + + if (!useCsvFormat) { + // Calculate and output mean and std. deviation. + // Note: mean/std. deviation numbers are very often completely irrelevant when + // data is extremely non-normal in distribution (e.g. in cases of strong multi-modal + // response time distribution associated with GC pauses). However, reporting these numbers + // can be very useful for contrasting with the detailed percentile distribution + // reported by outputPercentileDistribution(). It is not at all surprising to find + // percentile distributions where results fall many tens or even hundreds of standard + // deviations away from the mean - such results simply indicate that the data sampled + // exhibits a very non-normal distribution, highlighting situations for which the std. + // deviation metric is a useless indicator. + // + + double mean = getMean() / outputValueUnitScalingRatio; + double std_deviation = getStdDeviation() / outputValueUnitScalingRatio; + printStream.format(Locale.US, + "#[Mean = %12." + numberOfSignificantValueDigits + "f, StdDeviation = %12." + + numberOfSignificantValueDigits +"f]\n", + mean, std_deviation); + printStream.format(Locale.US, + "#[Max = %12." + numberOfSignificantValueDigits + "f, Total count = %12d]\n", + getMaxValue() / outputValueUnitScalingRatio, getTotalCount()); + printStream.format(Locale.US, "#[Buckets = %12d, SubBuckets = %12d]\n", + bucketCount, subBucketCount); + } + } + + // ###### ######## ######## #### ### ## #### ######## ### ######## #### ####### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## + // ###### ###### ######## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ######### ## ## ## ######### ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### + // ###### ######## ## ## #### ## ## ######## #### ######## ## ## ## #### ####### ## ## + // + // Serialization support: + // + + private static final long serialVersionUID = 0x1c849302; + + private void writeObject(final ObjectOutputStream o) + throws IOException + { + o.writeLong(lowestDiscernibleValue); + o.writeLong(highestTrackableValue); + o.writeInt(numberOfSignificantValueDigits); + o.writeInt(getNormalizingIndexOffset()); + o.writeDouble(integerToDoubleValueConversionRatio); + o.writeLong(getTotalCount()); + // Max Value is added to the serialized form because establishing max via scanning is "harder" during + // deserialization, as the counts array is not available at the subclass deserializing level, and we don't + // really want to have each subclass establish max on it's own... + o.writeLong(maxValue); + o.writeLong(minNonZeroValue); + o.writeLong(startTimeStampMsec); + o.writeLong(endTimeStampMsec); + o.writeBoolean(autoResize); + o.writeInt(wordSizeInBytes); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + final long lowestDiscernibleValue = o.readLong(); + final long highestTrackableValue = o.readLong(); + final int numberOfSignificantValueDigits = o.readInt(); + final int normalizingIndexOffset = o.readInt(); + final double integerToDoubleValueConversionRatio = o.readDouble(); + final long indicatedTotalCount = o.readLong(); + final long indicatedMaxValue = o.readLong(); + final long indicatedMinNonZeroValue = o.readLong(); + final long indicatedStartTimeStampMsec = o.readLong(); + final long indicatedEndTimeStampMsec = o.readLong(); + final boolean indicatedAutoResize = o.readBoolean(); + final int indicatedWordSizeInBytes = o.readInt(); + + init(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, + integerToDoubleValueConversionRatio, normalizingIndexOffset); + // Set internalTrackingValues (can't establish them from array yet, because it's not yet read...) + setTotalCount(indicatedTotalCount); + maxValue = indicatedMaxValue; + minNonZeroValue = indicatedMinNonZeroValue; + startTimeStampMsec = indicatedStartTimeStampMsec; + endTimeStampMsec = indicatedEndTimeStampMsec; + autoResize = indicatedAutoResize; + wordSizeInBytes = indicatedWordSizeInBytes; + } + + // ######## ## ## ###### ####### ######## #### ## ## ###### + // ## ### ## ## ## ## ## ## ## ## ### ## ## ## + // ## #### ## ## ## ## ## ## ## #### ## ## + // ###### ## ## ## ## ## ## ## ## ## ## ## ## ## #### + // ## ## #### ## ## ## ## ## ## ## #### ## ## + // ## ## ### ## ## ## ## ## ## ## ## ### ## ## + // ######## ## ## ###### ####### ######## #### ## ## ###### + // + // #### ######## ######## ###### ####### ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## + // #### ## ## ## ## ## ## ## ## ## #### ## ## + // #### ## ## ###### ## ## ## ## ## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## + // #### ## ######## ######## ###### ####### ######## #### ## ## ###### + // + // Encoding/Decoding support: + // + + /** + * Get the capacity needed to encode this histogram into a ByteBuffer + * @return the capacity needed to encode this histogram into a ByteBuffer + */ + @Override + public int getNeededByteBufferCapacity() { + return getNeededByteBufferCapacity(countsArrayLength); + } + + private static final int ENCODING_HEADER_SIZE = 40; + private static final int V0_ENCODING_HEADER_SIZE = 32; + + int getNeededByteBufferCapacity(final int relevantLength) { + return getNeededPayloadByteBufferCapacity(relevantLength) + ENCODING_HEADER_SIZE; + } + + int getNeededPayloadByteBufferCapacity(final int relevantLength) { + return (relevantLength * V2maxWordSizeInBytes); + } + + int getNeededV0PayloadByteBufferCapacity(final int relevantLength) { + return (relevantLength * wordSizeInBytes); + } + + private static final int V0EncodingCookieBase = 0x1c849308; + private static final int V0CompressedEncodingCookieBase = 0x1c849309; + + private static final int V1EncodingCookieBase = 0x1c849301; + private static final int V1CompressedEncodingCookieBase = 0x1c849302; + + private static final int V2EncodingCookieBase = 0x1c849303; + private static final int V2CompressedEncodingCookieBase = 0x1c849304; + + private static final int V2maxWordSizeInBytes = 9; // LEB128-64b9B + ZigZag require up to 9 bytes per word + + private static final int encodingCookieBase = V2EncodingCookieBase; + private static final int compressedEncodingCookieBase = V2CompressedEncodingCookieBase; + + private int getEncodingCookie() { + return encodingCookieBase | 0x10; // LSBit of wordSize byte indicates TLZE Encoding + } + + private int getCompressedEncodingCookie() { + return compressedEncodingCookieBase | 0x10; // LSBit of wordSize byte indicates TLZE Encoding + } + + private static int getCookieBase(final int cookie) { + return (cookie & ~0xf0); + } + + private static int getWordSizeInBytesFromCookie(final int cookie) { + if ((getCookieBase(cookie) == V2EncodingCookieBase) || + (getCookieBase(cookie) == V2CompressedEncodingCookieBase)) { + return V2maxWordSizeInBytes; + } + int sizeByte = (cookie & 0xf0) >> 4; + return sizeByte & 0xe; + } + + /** + * Encode this histogram into a ByteBuffer + * @param buffer The buffer to encode into + * @return The number of bytes written to the buffer + */ + synchronized public int encodeIntoByteBuffer(final ByteBuffer buffer) { + final long maxValue = getMaxValue(); + final int relevantLength = countsArrayIndex(maxValue) + 1; + if (buffer.capacity() < getNeededByteBufferCapacity(relevantLength)) { + throw new ArrayIndexOutOfBoundsException("buffer does not have capacity for " + + getNeededByteBufferCapacity(relevantLength) + " bytes"); + } + int initialPosition = buffer.position(); + buffer.putInt(getEncodingCookie()); + buffer.putInt(0); // Placeholder for payload length in bytes. + buffer.putInt(getNormalizingIndexOffset()); + buffer.putInt(numberOfSignificantValueDigits); + buffer.putLong(lowestDiscernibleValue); + buffer.putLong(highestTrackableValue); + buffer.putDouble(getIntegerToDoubleValueConversionRatio()); + + int payloadStartPosition = buffer.position(); + fillBufferFromCountsArray(buffer); + buffer.putInt(initialPosition + 4, buffer.position() - payloadStartPosition); // Record the payload length + + + return buffer.position() - initialPosition; + } + + /** + * Encode this histogram in compressed form into a byte array + * @param targetBuffer The buffer to encode into + * @param compressionLevel Compression level (for java.util.zip.Deflater). + * @return The number of bytes written to the buffer + */ + @Override + synchronized public int encodeIntoCompressedByteBuffer( + final ByteBuffer targetBuffer, + final int compressionLevel) { + int neededCapacity = getNeededByteBufferCapacity(countsArrayLength); + if (intermediateUncompressedByteBuffer == null || intermediateUncompressedByteBuffer.capacity() < neededCapacity) { + intermediateUncompressedByteBuffer = ByteBuffer.allocate(neededCapacity).order(BIG_ENDIAN); + } + intermediateUncompressedByteBuffer.clear(); + int initialTargetPosition = targetBuffer.position(); + + final int uncompressedLength = encodeIntoByteBuffer(intermediateUncompressedByteBuffer); + targetBuffer.putInt(getCompressedEncodingCookie()); + + targetBuffer.putInt(0); // Placeholder for compressed contents length + + Deflater compressor = new Deflater(compressionLevel); + compressor.setInput(intermediateUncompressedByteBuffer.array(), 0, uncompressedLength); + compressor.finish(); + + byte[] targetArray; + + if (targetBuffer.hasArray()) { + targetArray = targetBuffer.array(); + } else { + if (intermediateUncompressedByteArray == null || + intermediateUncompressedByteArray.length < targetBuffer.capacity()) { + intermediateUncompressedByteArray = new byte[targetBuffer.capacity()]; + } + targetArray = intermediateUncompressedByteArray; + } + + int compressedTargetOffset = initialTargetPosition + 8; + int compressedDataLength = + compressor.deflate( + targetArray, + compressedTargetOffset, + targetArray.length - compressedTargetOffset + ); + compressor.end(); + + if (!targetBuffer.hasArray()) { + targetBuffer.put(targetArray, compressedTargetOffset, compressedDataLength); + } + + targetBuffer.putInt(initialTargetPosition + 4, compressedDataLength); // Record the compressed length + int bytesWritten = compressedDataLength + 8; + targetBuffer.position(initialTargetPosition + bytesWritten); + return bytesWritten; + } + + /** + * Encode this histogram in compressed form into a byte array + * @param targetBuffer The buffer to encode into + * @return The number of bytes written to the array + */ + public int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { + return encodeIntoCompressedByteBuffer(targetBuffer, Deflater.DEFAULT_COMPRESSION); + } + + private static final Class[] constructorArgsTypes = {Long.TYPE, Long.TYPE, Integer.TYPE}; + + static T decodeFromByteBuffer( + final ByteBuffer buffer, + final Class histogramClass, + final long minBarForHighestTrackableValue) { + try { + return decodeFromByteBuffer(buffer, histogramClass, minBarForHighestTrackableValue, null); + } catch (DataFormatException ex) { + throw new RuntimeException(ex); + } + } + + private static T decodeFromByteBuffer( + final ByteBuffer buffer, + final Class histogramClass, + final long minBarForHighestTrackableValue, + final Inflater decompressor) throws DataFormatException { + + final int cookie = buffer.getInt(); + final int payloadLengthInBytes; + final int normalizingIndexOffset; + final int numberOfSignificantValueDigits; + final long lowestTrackableUnitValue; + long highestTrackableValue; + final double integerToDoubleValueConversionRatio; + + if ((getCookieBase(cookie) == encodingCookieBase) || + (getCookieBase(cookie) == V1EncodingCookieBase)) { + if (getCookieBase(cookie) == V2EncodingCookieBase) { + if (getWordSizeInBytesFromCookie(cookie) != V2maxWordSizeInBytes) { + throw new IllegalArgumentException( + "The buffer does not contain a Histogram (no valid cookie found)"); + } + } + payloadLengthInBytes = buffer.getInt(); + normalizingIndexOffset = buffer.getInt(); + numberOfSignificantValueDigits = buffer.getInt(); + lowestTrackableUnitValue = buffer.getLong(); + highestTrackableValue = buffer.getLong(); + integerToDoubleValueConversionRatio = buffer.getDouble(); + } else if (getCookieBase(cookie) == V0EncodingCookieBase) { + numberOfSignificantValueDigits = buffer.getInt(); + lowestTrackableUnitValue = buffer.getLong(); + highestTrackableValue = buffer.getLong(); + buffer.getLong(); // Discard totalCount field in V0 header. + payloadLengthInBytes = Integer.MAX_VALUE; + integerToDoubleValueConversionRatio = 1.0; + normalizingIndexOffset = 0; + } else { + throw new IllegalArgumentException("The buffer does not contain a Histogram (no valid cookie found)"); + } + highestTrackableValue = Math.max(highestTrackableValue, minBarForHighestTrackableValue); + + T histogram; + + // Construct histogram: + try { + Constructor constructor = histogramClass.getConstructor(constructorArgsTypes); + histogram = constructor.newInstance(lowestTrackableUnitValue, highestTrackableValue, + numberOfSignificantValueDigits); + histogram.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + histogram.setNormalizingIndexOffset(normalizingIndexOffset); + try { + histogram.setAutoResize(true); + } catch (IllegalStateException ex) { + // Allow histogram to refuse auto-sizing setting + } + } catch (IllegalAccessException ex) { + throw new IllegalArgumentException(ex); + } catch (NoSuchMethodException ex) { + throw new IllegalArgumentException(ex); + } catch (InstantiationException ex) { + throw new IllegalArgumentException(ex); + } catch (InvocationTargetException ex) { + throw new IllegalArgumentException(ex); + } + + ByteBuffer payLoadSourceBuffer; + + final int expectedCapacity = + Math.min( + histogram.getNeededV0PayloadByteBufferCapacity(histogram.countsArrayLength), + payloadLengthInBytes + ); + + if (decompressor == null) { + // No compressed source buffer. Payload is in buffer, after header. + if (expectedCapacity > buffer.remaining()) { + throw new IllegalArgumentException("The buffer does not contain the full Histogram payload"); + } + payLoadSourceBuffer = buffer; + } else { + // Compressed source buffer. Payload needs to be decoded from there. + payLoadSourceBuffer = ByteBuffer.allocate(expectedCapacity).order(BIG_ENDIAN); + int decompressedByteCount = decompressor.inflate(payLoadSourceBuffer.array()); + if ((payloadLengthInBytes != Integer.MAX_VALUE) && (decompressedByteCount < payloadLengthInBytes)) { + throw new IllegalArgumentException("The buffer does not contain the indicated payload amount"); + } + } + + int filledLength = ((AbstractHistogram) histogram).fillCountsArrayFromSourceBuffer( + payLoadSourceBuffer, + expectedCapacity, + getWordSizeInBytesFromCookie(cookie)); + + + histogram.establishInternalTackingValues(filledLength); + + return histogram; + } + + private int fillCountsArrayFromSourceBuffer(ByteBuffer sourceBuffer, int lengthInBytes, int wordSizeInBytes) { + if ((wordSizeInBytes != 2) && (wordSizeInBytes != 4) && + (wordSizeInBytes != 8) && (wordSizeInBytes != V2maxWordSizeInBytes)) { + throw new IllegalArgumentException("word size must be 2, 4, 8, or V2maxWordSizeInBytes ("+ + V2maxWordSizeInBytes + ") bytes"); + } + final long maxAllowableCountInHistogram = + ((this.wordSizeInBytes == 2) ? Short.MAX_VALUE : + ((this.wordSizeInBytes == 4) ? Integer.MAX_VALUE : Long.MAX_VALUE) + ); + + int dstIndex = 0; + int endPosition = sourceBuffer.position() + lengthInBytes; + while (sourceBuffer.position() < endPosition) { + long count; + int zerosCount = 0; + if (wordSizeInBytes == V2maxWordSizeInBytes) { + // V2 encoding format uses a long encoded in a ZigZag LEB128 format (up to V2maxWordSizeInBytes): + count = ZigZagEncoding.getLong(sourceBuffer); + if (count < 0) { + long zc = -count; + if (zc > Integer.MAX_VALUE) { + throw new IllegalArgumentException( + "An encoded zero count of > Integer.MAX_VALUE was encountered in the source"); + } + zerosCount = (int) zc; + } + } else { + // decoding V1 and V0 encoding formats depends on indicated word size: + count = + ((wordSizeInBytes == 2) ? sourceBuffer.getShort() : + ((wordSizeInBytes == 4) ? sourceBuffer.getInt() : + sourceBuffer.getLong() + ) + ); + } + if (count > maxAllowableCountInHistogram) { + throw new IllegalArgumentException( + "An encoded count (" + count + + ") does not fit in the Histogram's (" + + this.wordSizeInBytes + " bytes) was encountered in the source"); + } + if (zerosCount > 0) { + dstIndex += zerosCount; // No need to set zeros in array. Just skip them. + } else { + setCountAtIndex(dstIndex++, count); + } + } + return dstIndex; // this is the destination length + } + + synchronized void fillBufferFromCountsArray(ByteBuffer buffer) { + final int countsLimit = countsArrayIndex(maxValue) + 1; + int srcIndex = 0; + + while (srcIndex < countsLimit) { + // V2 encoding format uses a ZigZag LEB128-64b9B encoded long. Positive values are counts, + // while negative values indicate a repeat zero counts. + long count = getCountAtIndex(srcIndex++); + if (count < 0) { + throw new RuntimeException("Cannot encode histogram containing negative counts (" + + count + ") at index " + srcIndex + ", corresponding the value range [" + + lowestEquivalentValue(valueFromIndex(srcIndex)) + "," + + nextNonEquivalentValue(valueFromIndex(srcIndex)) + ")"); + } + // Count trailing 0s (which follow this count): + long zerosCount = 0; + if (count == 0) { + zerosCount = 1; + while ((srcIndex < countsLimit) && (getCountAtIndex(srcIndex) == 0)) { + zerosCount++; + srcIndex++; + } + } + if (zerosCount > 1) { + ZigZagEncoding.putLong(buffer, -zerosCount); + } else { + ZigZagEncoding.putLong(buffer, count); + } + } + } + + static T decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + final Class histogramClass, + final long minBarForHighestTrackableValue) + throws DataFormatException { + int initialTargetPosition = buffer.position(); + final int cookie = buffer.getInt(); + final int headerSize; + if ((getCookieBase(cookie) == compressedEncodingCookieBase) || + (getCookieBase(cookie) == V1CompressedEncodingCookieBase)) { + headerSize = ENCODING_HEADER_SIZE; + } else if (getCookieBase(cookie) == V0CompressedEncodingCookieBase) { + headerSize = V0_ENCODING_HEADER_SIZE; + } else { + throw new IllegalArgumentException("The buffer does not contain a compressed Histogram"); + } + + final int lengthOfCompressedContents = buffer.getInt(); + final Inflater decompressor = new Inflater(); + + if (buffer.hasArray()) { + decompressor.setInput(buffer.array(), initialTargetPosition + 8, lengthOfCompressedContents); + } else { + byte[] compressedContents = new byte[lengthOfCompressedContents]; + buffer.get(compressedContents); + decompressor.setInput(compressedContents); + } + + final ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize).order(BIG_ENDIAN); + decompressor.inflate(headerBuffer.array()); + T histogram = decodeFromByteBuffer( + headerBuffer, histogramClass, minBarForHighestTrackableValue, decompressor); + + decompressor.end(); + + return histogram; + } + + // #### ## ## ######## ######## ######## ## ## ### ## + // ## ### ## ## ## ## ## ### ## ## ## ## + // ## #### ## ## ## ## ## #### ## ## ## ## + // ## ## ## ## ## ###### ######## ## ## ## ## ## ## + // ## ## #### ## ## ## ## ## #### ######### ## + // ## ## ### ## ## ## ## ## ### ## ## ## + // #### ## ## ## ######## ## ## ## ## ## ## ######## + // + // ## ## ######## ## ######## ######## ######## ###### + // ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## + // ######### ###### ## ######## ###### ######## ###### + // ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## + // ## ## ######## ######## ## ######## ## ## ###### + // + // Internal helper methods: + // + + private String recordedValuesToString() { + String output = ""; + try { + for (int i = 0; i < countsArrayLength; i++) { + if (getCountAtIndex(i) != 0) { + output += String.format("[%d] : %d\n", i, getCountAtIndex(i)); + } + } + return output; + } catch(Exception ex) { + output += "!!! Exception thrown in value iteration...\n"; + } + return output; + } + + @Override + public String toString() { + String output = "AbstractHistogram:\n"; + output += super.toString(); + output += recordedValuesToString(); + return output; + } + + void establishInternalTackingValues() { + establishInternalTackingValues(countsArrayLength); + } + + void establishInternalTackingValues(final int lengthToCover) { + resetMaxValue(0); + resetMinNonZeroValue(Long.MAX_VALUE); + int maxIndex = -1; + int minNonZeroIndex = -1; + long observedTotalCount = 0; + for (int index = 0; index < lengthToCover; index++) { + long countAtIndex; + if ((countAtIndex = getCountAtIndex(index)) > 0) { + observedTotalCount += countAtIndex; + maxIndex = index; + if ((minNonZeroIndex == -1) && (index != 0)) { + minNonZeroIndex = index; + } + } + } + if (maxIndex >= 0) { + updatedMaxValue(highestEquivalentValue(valueFromIndex(maxIndex))); + } + if (minNonZeroIndex >= 0) { + updateMinNonZeroValue(valueFromIndex(minNonZeroIndex)); + } + setTotalCount(observedTotalCount); + } + + int getBucketsNeededToCoverValue(final long value) { + // Shift won't overflow because subBucketMagnitude + unitMagnitude <= 62. + // the k'th bucket can express from 0 * 2^k to subBucketCount * 2^k in units of 2^k + long smallestUntrackableValue = ((long) subBucketCount) << unitMagnitude; + + // always have at least 1 bucket + int bucketsNeeded = 1; + while (smallestUntrackableValue <= value) { + if (smallestUntrackableValue > (Long.MAX_VALUE / 2)) { + // next shift will overflow, meaning that bucket could represent values up to ones greater than + // Long.MAX_VALUE, so it's the last bucket + return bucketsNeeded + 1; + } + smallestUntrackableValue <<= 1; + bucketsNeeded++; + } + return bucketsNeeded; + } + + /** + * If we have N such that subBucketCount * 2^N > max value, we need storage for N+1 buckets, each with enough + * slots to hold the top half of the subBucketCount (the lower half is covered by previous buckets), and the +1 + * being used for the lower half of the 0'th bucket. Or, equivalently, we need 1 more bucket to capture the max + * value if we consider the sub-bucket length to be halved. + */ + int getLengthForNumberOfBuckets(final int numberOfBuckets) { + final int lengthNeeded = (numberOfBuckets + 1) * (subBucketHalfCount); + return lengthNeeded; + } + + int countsArrayIndex(final long value) { + if (value < 0) { + throw new ArrayIndexOutOfBoundsException("Histogram recorded value cannot be negative."); + } + final int bucketIndex = getBucketIndex(value); + final int subBucketIndex = getSubBucketIndex(value, bucketIndex); + return countsArrayIndex(bucketIndex, subBucketIndex); + } + + private int countsArrayIndex(final int bucketIndex, final int subBucketIndex) { + assert(subBucketIndex < subBucketCount); + assert(bucketIndex == 0 || (subBucketIndex >= subBucketHalfCount)); + // Calculate the index for the first entry that will be used in the bucket (halfway through subBucketCount). + // For bucketIndex 0, all subBucketCount entries may be used, but bucketBaseIndex is still set in the middle. + final int bucketBaseIndex = (bucketIndex + 1) << subBucketHalfCountMagnitude; + // Calculate the offset in the bucket. This subtraction will result in a positive value in all buckets except + // the 0th bucket (since a value in that bucket may be less than half the bucket's 0 to subBucketCount range). + // However, this works out since we give bucket 0 twice as much space. + final int offsetInBucket = subBucketIndex - subBucketHalfCount; + // The following is the equivalent of ((subBucketIndex - subBucketHalfCount) + bucketBaseIndex; + return bucketBaseIndex + offsetInBucket; + } + + /** + * @return the lowest (and therefore highest precision) bucket index that can represent the value + */ + int getBucketIndex(final long value) { + // Calculates the number of powers of two by which the value is greater than the biggest value that fits in + // bucket 0. This is the bucket index since each successive bucket can hold a value 2x greater. + // The mask maps small values to bucket 0. + return leadingZeroCountBase - Long.numberOfLeadingZeros(value | subBucketMask); + } + + int getSubBucketIndex(final long value, final int bucketIndex) { + // For bucketIndex 0, this is just value, so it may be anywhere in 0 to subBucketCount. + // For other bucketIndex, this will always end up in the top half of subBucketCount: assume that for some bucket + // k > 0, this calculation will yield a value in the bottom half of 0 to subBucketCount. Then, because of how + // buckets overlap, it would have also been in the top half of bucket k-1, and therefore would have + // returned k-1 in getBucketIndex(). Since we would then shift it one fewer bits here, it would be twice as big, + // and therefore in the top half of subBucketCount. + return (int)(value >>> (bucketIndex + unitMagnitude)); + } + + /** + * @return The value `index - normalizingIndexOffset` modulo arrayLength (always non-negative) + */ + int normalizeIndex(int index, int normalizingIndexOffset, int arrayLength) { + if (normalizingIndexOffset == 0) { + // Fastpath out of normalization. Keeps integer value histograms fast while allowing + // others (like DoubleHistogram) to use normalization at a cost... + return index; + } + if ((index > arrayLength) || (index < 0)) { + throw new ArrayIndexOutOfBoundsException("index out of covered value range"); + } + int normalizedIndex = index - normalizingIndexOffset; + // The following is the same as an unsigned remainder operation, as long as no double wrapping happens + // (which shouldn't happen, as normalization is never supposed to wrap, since it would have overflowed + // or underflowed before it did). This (the + and - tests) seems to be faster than a % op with a + // correcting if < 0...: + if (normalizedIndex < 0) { + normalizedIndex += arrayLength; + } else if (normalizedIndex >= arrayLength) { + normalizedIndex -= arrayLength; + } + return normalizedIndex; + } + + private long valueFromIndex(final int bucketIndex, final int subBucketIndex) { + return ((long) subBucketIndex) << (bucketIndex + unitMagnitude); + } + + final long valueFromIndex(final int index) { + int bucketIndex = (index >> subBucketHalfCountMagnitude) - 1; + int subBucketIndex = (index & (subBucketHalfCount - 1)) + subBucketHalfCount; + if (bucketIndex < 0) { + subBucketIndex -= subBucketHalfCount; + bucketIndex = 0; + } + return valueFromIndex(bucketIndex, subBucketIndex); + } + + static int numberOfSubBuckets(final int numberOfSignificantValueDigits) { + final long largestValueWithSingleUnitResolution = 2 * (long) Math.pow(10, numberOfSignificantValueDigits); + + // We need to maintain power-of-two subBucketCount (for clean direct indexing) that is large enough to + // provide unit resolution to at least largestValueWithSingleUnitResolution. So figure out + // largestValueWithSingleUnitResolution's nearest power-of-two (rounded up), and use that: + int subBucketCountMagnitude = (int) Math.ceil(Math.log(largestValueWithSingleUnitResolution)/Math.log(2)); + int subBucketCount = (int) Math.pow(2, subBucketCountMagnitude); + return subBucketCount; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogramIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogramIterator.java new file mode 100644 index 000000000..a09ed45ac --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AbstractHistogramIterator.java @@ -0,0 +1,149 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Used for iterating through histogram values. + */ +abstract class AbstractHistogramIterator implements Iterator { + AbstractHistogram histogram; + long arrayTotalCount; + + int currentIndex; + long currentValueAtIndex; + + long nextValueAtIndex; + + long prevValueIteratedTo; + long totalCountToPrevIndex; + + long totalCountToCurrentIndex; + long totalValueToCurrentIndex; + + long countAtThisValue; + + private boolean freshSubBucket; + final HistogramIterationValue currentIterationValue = new HistogramIterationValue(); + + private double integerToDoubleValueConversionRatio; + + void resetIterator(final AbstractHistogram histogram) { + this.histogram = histogram; + this.arrayTotalCount = histogram.getTotalCount(); + this.integerToDoubleValueConversionRatio = histogram.getIntegerToDoubleValueConversionRatio(); + this.currentIndex = 0; + this.currentValueAtIndex = 0; + this.nextValueAtIndex = 1 << histogram.unitMagnitude; + this.prevValueIteratedTo = 0; + this.totalCountToPrevIndex = 0; + this.totalCountToCurrentIndex = 0; + this.totalValueToCurrentIndex = 0; + this.countAtThisValue = 0; + this.freshSubBucket = true; + currentIterationValue.reset(); + } + + /** + * Returns true if the iteration has more elements. (In other words, returns true if next would return an + * element rather than throwing an exception.) + * + * @return true if the iterator has more elements. + */ + @Override + public boolean hasNext() { + if (histogram.getTotalCount() != arrayTotalCount) { + throw new ConcurrentModificationException(); + } + return (totalCountToCurrentIndex < arrayTotalCount); + } + + /** + * Returns the next element in the iteration. + * + * @return the {@link HistogramIterationValue} associated with the next element in the iteration. + */ + @Override + public HistogramIterationValue next() { + // Move through the sub buckets and buckets until we hit the next reporting level: + while (!exhaustedSubBuckets()) { + countAtThisValue = histogram.getCountAtIndex(currentIndex); + if (freshSubBucket) { // Don't add unless we've incremented since last bucket... + totalCountToCurrentIndex += countAtThisValue; + totalValueToCurrentIndex += countAtThisValue * histogram.highestEquivalentValue(currentValueAtIndex); + freshSubBucket = false; + } + if (reachedIterationLevel()) { + long valueIteratedTo = getValueIteratedTo(); + currentIterationValue.set(valueIteratedTo, prevValueIteratedTo, countAtThisValue, + (totalCountToCurrentIndex - totalCountToPrevIndex), totalCountToCurrentIndex, + totalValueToCurrentIndex, ((100.0 * totalCountToCurrentIndex) / arrayTotalCount), + getPercentileIteratedTo(), integerToDoubleValueConversionRatio); + prevValueIteratedTo = valueIteratedTo; + totalCountToPrevIndex = totalCountToCurrentIndex; + // move the next iteration level forward: + incrementIterationLevel(); + if (histogram.getTotalCount() != arrayTotalCount) { + throw new ConcurrentModificationException(); + } + return currentIterationValue; + } + incrementSubBucket(); + } + // Should not reach here. But possible for concurrent modification or overflowed histograms + // under certain conditions + if ((histogram.getTotalCount() != arrayTotalCount) || + (totalCountToCurrentIndex > arrayTotalCount)) { + throw new ConcurrentModificationException(); + } + throw new NoSuchElementException(); + } + + /** + * Not supported. Will throw an {@link UnsupportedOperationException}. + */ + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + abstract void incrementIterationLevel(); + + /** + * @return true if the current position's data should be emitted by the iterator + */ + abstract boolean reachedIterationLevel(); + + double getPercentileIteratedTo() { + return (100.0 * (double) totalCountToCurrentIndex) / arrayTotalCount; + } + + double getPercentileIteratedFrom() { + return (100.0 * (double) totalCountToPrevIndex) / arrayTotalCount; + } + + long getValueIteratedTo() { + return histogram.highestEquivalentValue(currentValueAtIndex); + } + + private boolean exhaustedSubBuckets() { + return (currentIndex >= histogram.countsArrayLength); + } + + void incrementSubBucket() { + freshSubBucket = true; + // Take on the next index: + currentIndex++; + currentValueAtIndex = histogram.valueFromIndex(currentIndex); + // Figure out the value at the next index (used by some iterators): + nextValueAtIndex = histogram.valueFromIndex(currentIndex + 1); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AllValuesIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AllValuesIterator.java new file mode 100644 index 000000000..25f5c11a9 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AllValuesIterator.java @@ -0,0 +1,59 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.ConcurrentModificationException; +import java.util.Iterator; + +/** + * Used for iterating through histogram values using the finest granularity steps supported by the underlying + * representation. The iteration steps through all possible unit value levels, regardless of whether or not + * there were recorded values for that value level, and terminates when all recorded histogram values are exhausted. + */ + +public class AllValuesIterator extends AbstractHistogramIterator implements Iterator { + int visitedIndex; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + */ + public void reset() { + reset(histogram); + } + + private void reset(final AbstractHistogram histogram) { + super.resetIterator(histogram); + visitedIndex = -1; + } + + /** + * @param histogram The histogram this iterator will operate on + */ + public AllValuesIterator(final AbstractHistogram histogram) { + reset(histogram); + } + + @Override + void incrementIterationLevel() { + visitedIndex = currentIndex; + } + + @Override + boolean reachedIterationLevel() { + return (visitedIndex != currentIndex); + } + + @Override + public boolean hasNext() { + if (histogram.getTotalCount() != arrayTotalCount) { + throw new ConcurrentModificationException(); + } + // Unlike other iterators AllValuesIterator is only done when we've exhausted the indices: + return (currentIndex < (histogram.countsArrayLength - 1)); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AtomicHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AtomicHistogram.java new file mode 100644 index 000000000..729470546 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/AtomicHistogram.java @@ -0,0 +1,249 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLongArray; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.zip.DataFormatException; + +/** + *

A High Dynamic Range (HDR) Histogram using atomic long count type

+ * An AtomicHistogram guarantees lossless recording of values into the histogram even when the + * histogram is updated by multiple threads. It is important to note though that this lossless + * recording capability is the only thread-safe behavior provided by AtomicHistogram, and that it + * is not otherwise synchronized. Specifically, AtomicHistogram does not support auto-resizing, + * does not support value shift operations, and provides no implicit synchronization + * that would prevent the contents of the histogram from changing during iterations, copies, or + * addition operations on the histogram. Callers wishing to make potentially concurrent, + * multi-threaded updates that would safely work in the presence of queries, copies, or additions + * of histogram objects should either take care to externally synchronize and/or order their access, + * use the {@link SynchronizedHistogram} variant, or (recommended) use the + * {@link Recorder} class, which is intended for this purpose. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class AtomicHistogram extends Histogram { + + static final AtomicLongFieldUpdater totalCountUpdater = + AtomicLongFieldUpdater.newUpdater(AtomicHistogram.class, "totalCount"); + volatile long totalCount; + volatile AtomicLongArray counts; + + @Override + long getCountAtIndex(final int index) { + return counts.get(index); + } + + @Override + long getCountAtNormalizedIndex(final int index) { + return counts.get(index); + } + + @Override + void incrementCountAtIndex(final int index) { + counts.getAndIncrement(index); + } + + @Override + void addToCountAtIndex(final int index, final long value) { + counts.getAndAdd(index, value); + } + + @Override + void setCountAtIndex(int index, long value) { + counts.lazySet(index, value); + } + + @Override + void setCountAtNormalizedIndex(int index, long value) { + counts.lazySet(index, value); + } + + @Override + int getNormalizingIndexOffset() { + return 0; + } + + @Override + void setNormalizingIndexOffset(int normalizingIndexOffset) { + if (normalizingIndexOffset != 0) { + throw new IllegalStateException( + "AtomicHistogram does not support non-zero normalizing index settings." + + " Use ConcurrentHistogram Instead."); + } + } + + @Override + void shiftNormalizingIndexByOffset(int offsetToAdd, + boolean lowestHalfBucketPopulated, + double newIntegerToDoubleValueConversionRatio) { + throw new IllegalStateException( + "AtomicHistogram does not support Shifting operations." + + " Use ConcurrentHistogram Instead."); + } + + @Override + void resize(long newHighestTrackableValue) { + throw new IllegalStateException( + "AtomicHistogram does not support resizing operations." + + " Use ConcurrentHistogram Instead."); + } + + @Override + public void setAutoResize(boolean autoResize) { + throw new IllegalStateException( + "AtomicHistogram does not support AutoResize operation." + + " Use ConcurrentHistogram Instead."); + } + + @Override + public boolean supportsAutoResize() { return false; } + + @Override + void clearCounts() { + for (int i = 0; i < counts.length(); i++) { + counts.lazySet(i, 0); + } + totalCountUpdater.set(this, 0); + } + + @Override + public AtomicHistogram copy() { + AtomicHistogram copy = new AtomicHistogram(this); + copy.add(this); + return copy; + } + + @Override + public AtomicHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + AtomicHistogram toHistogram = new AtomicHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + public long getTotalCount() { + return totalCountUpdater.get(this); + } + + @Override + void setTotalCount(final long totalCount) { + totalCountUpdater.set(this, totalCount); + } + + @Override + void incrementTotalCount() { + totalCountUpdater.incrementAndGet(this); + } + + @Override + void addToTotalCount(final long value) { + totalCountUpdater.addAndGet(this, value); + } + + @Override + int _getEstimatedFootprintInBytes() { + return (512 + (8 * counts.length())); + } + + /** + * Construct a AtomicHistogram given the Highest value to be tracked and a number of significant decimal digits. + * The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public AtomicHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a AtomicHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public AtomicHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, false); + counts = new AtomicLongArray(countsArrayLength); + wordSizeInBytes = 8; + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public AtomicHistogram(final AbstractHistogram source) { + super(source, false); + counts = new AtomicLongArray(countsArrayLength); + wordSizeInBytes = 8; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static AtomicHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, AtomicHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static AtomicHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, AtomicHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new AtomicHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A AtomicHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static AtomicHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Base64Helper.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Base64Helper.java new file mode 100644 index 000000000..83417bc7d --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Base64Helper.java @@ -0,0 +1,97 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.lang.reflect.Method; + +/** + * Base64Helper exists to bridge inconsistencies in Java SE support of Base64 encoding and decoding. + * Earlier Java SE platforms (up to and including Java SE 8) supported base64 encode/decode via the + * javax.xml.bind.DatatypeConverter class, which was deprecated and eventually removed in Java SE 9. + * Later Java SE platforms (Java SE 8 and later) support base64 encode/decode via the + * java.util.Base64 class (first introduced in Java SE 8, and not available on e.g. Java SE 6 or 7). + * + * This makes it "hard" to write a single piece of source code that deals with base64 encodings and + * will compile and run on e.g. Java SE 7 AND Java SE 9. And such common source is a common need for + * libraries. This class is intended to encapsulate this "hard"-ness and hide the ugly pretzel-twisting + * needed under the covers. + * + * Base64Helper provides a common API that works across Java SE 6..9 (and beyond hopefully), and + * uses late binding (Reflection) internally to avoid javac-compile-time dependencies on a specific + * Java SE version (e.g. beyond 7 or before 9). + * + */ +class Base64Helper { + + /** + * Converts an array of bytes into a Base64 string. + * + * @param binaryArray A binary encoded input array + * @return a String containing the Base64 encoded equivalent of the binary input + */ + static String printBase64Binary(byte [] binaryArray) { + try { + return (String) encodeMethod.invoke(encoderObj, binaryArray); + } catch (Throwable e) { + throw new UnsupportedOperationException("Failed to use platform's base64 encode method"); + } + } + + /** + * Converts a Base64 encoded String to a byte array + * + * @param base64input A base64-encoded input String + * @return a byte array containing the binary representation equivalent of the Base64 encoded input + */ + static byte[] parseBase64Binary(String base64input) { + try { + return (byte []) decodeMethod.invoke(decoderObj, base64input); + } catch (Throwable e) { + throw new UnsupportedOperationException("Failed to use platform's base64 decode method"); + } + } + + + private static Method decodeMethod; + private static Method encodeMethod; + + // encoderObj and decoderObj are used in non-static method forms, and + // irrelevant for static method forms: + private static Object decoderObj; + private static Object encoderObj; + + static { + try { + Class javaUtilBase64Class = Class.forName("java.util.Base64"); + + Method getDecoderMethod = javaUtilBase64Class.getMethod("getDecoder"); + decoderObj = getDecoderMethod.invoke(null); + decodeMethod = decoderObj.getClass().getMethod("decode", String.class); + + Method getEncoderMethod = javaUtilBase64Class.getMethod("getEncoder"); + encoderObj = getEncoderMethod.invoke(null); + encodeMethod = encoderObj.getClass().getMethod("encodeToString", byte[].class); + } catch (Throwable e) { + decodeMethod = null; + encodeMethod = null; + } + + if (encodeMethod == null) { + decoderObj = null; + encoderObj = null; + try { + Class javaxXmlBindDatatypeConverterClass = Class.forName("javax.xml.bind.DatatypeConverter"); + decodeMethod = javaxXmlBindDatatypeConverterClass.getMethod("parseBase64Binary", String.class); + encodeMethod = javaxXmlBindDatatypeConverterClass.getMethod("printBase64Binary", byte[].class); + } catch (Throwable e) { + decodeMethod = null; + encodeMethod = null; + } + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentDoubleHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentDoubleHistogram.java new file mode 100644 index 000000000..2a5f77e90 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentDoubleHistogram.java @@ -0,0 +1,155 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

A floating point values High Dynamic Range (HDR) Histogram that supports safe concurrent recording + * operations.

+ *

+ * A {@link ConcurrentDoubleHistogram} is a variant of {@link DoubleHistogram} that guarantees + * lossless recording of values into the histogram even when the histogram is updated by multiple threads, and + * supports auto-resize and auto-ranging operations that may occur concurrently as a result of recording operations. + *

+ * It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe behaviors + * provided by {@link ConcurrentDoubleHistogram}, and that it is not otherwise synchronized. Specifically, {@link + * ConcurrentDoubleHistogram} provides no implicit synchronization that would prevent the contents of the histogram + * from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make + * potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or + * additions of histogram objects should either take care to externally synchronize and/or order their access, + * use the {@link SynchronizedDoubleHistogram} variant, or (recommended) use the {@link DoubleRecorder} + * or {@link SingleWriterDoubleRecorder} which are intended for this purpose. + *

+ * {@link ConcurrentDoubleHistogram} supports the recording and analyzing sampled data value counts across a + * configurable dynamic range of floating point (double) values, with configurable value precision within the range. + * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram + * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, + * and provides control over value quantization behavior across the value range and the subsequent value resolution at + * any given level. + *

+ * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link + * ConcurrentDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is + * (optionally) specified. E.g. When a {@link ConcurrentDoubleHistogram} is created to track a dynamic range of + * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any + * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the + * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 + * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. + *

+ * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link + * ConcurrentDoubleHistogram#setAutoResize}) a {@link ConcurrentDoubleHistogram} will auto-resize its dynamic range to + * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take + * longer to execute, as resizing incurs allocation and copying of internal data structures. + *

+ * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of + * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either + * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have + * resulted in discarding or losing the required value precision of values already recorded in the histogram. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class ConcurrentDoubleHistogram extends DoubleHistogram { + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal + * digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public ConcurrentDoubleHistogram(final int numberOfSignificantValueDigits) { + this(2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio}) + * and using a precision stated as a number of significant decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public ConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { + this(highestToLowestValueRatio, numberOfSignificantValueDigits, ConcurrentHistogram.class); + } + + /** + * Construct a {@link ConcurrentDoubleHistogram} with the same range settings as a given source, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public ConcurrentDoubleHistogram(final DoubleHistogram source) { + super(source); + } + + ConcurrentDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass); + } + + ConcurrentDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass, + AbstractHistogram internalCountsHistogram) { + super( + highestToLowestValueRatio, + numberOfSignificantValueDigits, + internalCountsHistogramClass, + internalCountsHistogram + ); + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + */ + public static ConcurrentDoubleHistogram decodeFromByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) { + try { + int cookie = buffer.getInt(); + if (!isNonCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); + } + ConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + ConcurrentDoubleHistogram.class, ConcurrentHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } catch (DataFormatException ex) { + throw new RuntimeException(ex); + } + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static ConcurrentDoubleHistogram decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) throws DataFormatException { + int cookie = buffer.getInt(); + if (!isCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); + } + ConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + ConcurrentDoubleHistogram.class, ConcurrentHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentHistogram.java new file mode 100644 index 000000000..d6dc055f5 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ConcurrentHistogram.java @@ -0,0 +1,691 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLongArray; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.zip.DataFormatException; + +/** + *

An integer values High Dynamic Range (HDR) Histogram that supports safe concurrent recording operations.

+ * A ConcurrentHistogram guarantees lossless recording of values into the histogram even when the + * histogram is updated by multiple threads, and supports auto-resize and shift operations that may + * result from or occur concurrently with other recording operations. + *

+ * It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe + * behaviors provided by {@link ConcurrentHistogram}, and that it is not otherwise synchronized. Specifically, {@link + * ConcurrentHistogram} provides no implicit synchronization that would prevent the contents of the histogram + * from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make + * potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or + * additions of histogram objects should either take care to externally synchronize and/or order their access, + * use the {@link SynchronizedHistogram} variant, or (recommended) use {@link Recorder} or + * {@link SingleWriterRecorder} which are intended for this purpose. + *

+ * Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link + * Histogram#setAutoResize}) a {@link Histogram} will auto-resize its dynamic range to include recorded values as + * they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing + * incurs allocation and copying of internal data structures. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +@SuppressWarnings("unused") +public class ConcurrentHistogram extends Histogram { + + static final AtomicLongFieldUpdater totalCountUpdater = + AtomicLongFieldUpdater.newUpdater(ConcurrentHistogram.class, "totalCount"); + volatile long totalCount; + + volatile ConcurrentArrayWithNormalizingOffset activeCounts; + volatile ConcurrentArrayWithNormalizingOffset inactiveCounts; + transient WriterReaderPhaser wrp = new WriterReaderPhaser(); + + @Override + void setIntegerToDoubleValueConversionRatio(final double integerToDoubleValueConversionRatio) { + try { + wrp.readerLock(); + + inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / integerToDoubleValueConversionRatio); + + // switch active and inactive: + ConcurrentArrayWithNormalizingOffset tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / integerToDoubleValueConversionRatio); + + // switch active and inactive again: + tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + // At this point, both active and inactive have normalizingIndexOffset safely set, + // and the switch in each was done without any writers using the wrong value in flight. + + } finally { + wrp.readerUnlock(); + } + super.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + } + + @Override + long getCountAtIndex(final int index) { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + long activeCount = activeCounts.get( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length())); + long inactiveCount = inactiveCounts.get( + normalizeIndex(index, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length())); + return activeCount + inactiveCount; + } finally { + wrp.readerUnlock(); + } + } + + @Override + long getCountAtNormalizedIndex(final int index) { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + long activeCount = activeCounts.get(index); + long inactiveCount = inactiveCounts.get(index); + return activeCount + inactiveCount; + } finally { + wrp.readerUnlock(); + } + } + + @Override + void incrementCountAtIndex(final int index) { + long criticalValue = wrp.writerCriticalSectionEnter(); + try { + activeCounts.atomicIncrement( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length())); + } finally { + wrp.writerCriticalSectionExit(criticalValue); + } + } + + @Override + void addToCountAtIndex(final int index, final long value) { + long criticalValue = wrp.writerCriticalSectionEnter(); + try { + activeCounts.atomicAdd( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), value); + } finally { + wrp.writerCriticalSectionExit(criticalValue); + } + } + + @Override + void setCountAtIndex(final int index, final long value) { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + activeCounts.lazySet( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), value); + inactiveCounts.lazySet( + normalizeIndex(index, inactiveCounts.getNormalizingIndexOffset(), + inactiveCounts.length()), 0); + } finally { + wrp.readerUnlock(); + } + } + + @Override + void setCountAtNormalizedIndex(final int index, final long value) { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + inactiveCounts.lazySet(index, value); + activeCounts.lazySet(index, 0); + } finally { + wrp.readerUnlock(); + } + } + + @Override + void recordConvertedDoubleValue(final double value) { + long criticalValue = wrp.writerCriticalSectionEnter(); + try { + long integerValue = (long) (value * activeCounts.getDoubleToIntegerValueConversionRatio()); + int index = countsArrayIndex(integerValue); + activeCounts.atomicIncrement( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length())); + updateMinAndMax(integerValue); + incrementTotalCount(); + } finally { + wrp.writerCriticalSectionExit(criticalValue); + } + } + + @Override + public void recordConvertedDoubleValueWithCount(final double value, final long count) + throws ArrayIndexOutOfBoundsException { + long criticalValue = wrp.writerCriticalSectionEnter(); + try { + long integerValue = (long) (value * activeCounts.getDoubleToIntegerValueConversionRatio()); + int index = countsArrayIndex(integerValue); + activeCounts.atomicAdd( + normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), count); + updateMinAndMax(integerValue); + addToTotalCount(count); + } finally { + wrp.writerCriticalSectionExit(criticalValue); + } + } + + @Override + int getNormalizingIndexOffset() { + return activeCounts.getNormalizingIndexOffset(); + } + + @Override + void setNormalizingIndexOffset(final int normalizingIndexOffset) { + setNormalizingIndexOffset(normalizingIndexOffset, 0, + false, getIntegerToDoubleValueConversionRatio()); + } + + private void setNormalizingIndexOffset( + final int newNormalizingIndexOffset, + final int shiftedAmount, + final boolean lowestHalfBucketPopulated, + final double newIntegerToDoubleValueConversionRatio) { + try { + wrp.readerLock(); + + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + + assert (activeCounts.getNormalizingIndexOffset() == inactiveCounts.getNormalizingIndexOffset()); + + if (newNormalizingIndexOffset == activeCounts.getNormalizingIndexOffset()) { + return; // Nothing to do. + } + + setNormalizingIndexOffsetForInactive(newNormalizingIndexOffset, shiftedAmount, + lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio); + + // switch active and inactive: + ConcurrentArrayWithNormalizingOffset tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + setNormalizingIndexOffsetForInactive(newNormalizingIndexOffset, shiftedAmount, + lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio); + + // switch active and inactive again: + tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + // At this point, both active and inactive have normalizingIndexOffset safely set, + // and the switch in each was done without any writers using the wrong value in flight. + + } finally { + wrp.readerUnlock(); + } + } + + private void setNormalizingIndexOffsetForInactive(final int newNormalizingIndexOffset, + final int shiftedAmount, + final boolean lowestHalfBucketPopulated, + final double newIntegerToDoubleValueConversionRatio) { + int zeroIndex; + long inactiveZeroValueCount; + + // Save and clear the inactive 0 value count: + zeroIndex = normalizeIndex(0, inactiveCounts.getNormalizingIndexOffset(), + inactiveCounts.length()); + inactiveZeroValueCount = inactiveCounts.get(zeroIndex); + inactiveCounts.lazySet(zeroIndex, 0); + + // Change the normalizingIndexOffset on the current inactiveCounts: + inactiveCounts.setNormalizingIndexOffset(newNormalizingIndexOffset); + + // Handle the inactive lowest half bucket: + if ((shiftedAmount > 0) && lowestHalfBucketPopulated) { + shiftLowestInactiveHalfBucketContentsLeft(shiftedAmount, zeroIndex); + } + + // Restore the inactive 0 value count: + zeroIndex = normalizeIndex(0, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length()); + inactiveCounts.lazySet(zeroIndex, inactiveZeroValueCount); + + inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / newIntegerToDoubleValueConversionRatio); + } + + private void shiftLowestInactiveHalfBucketContentsLeft(final int shiftAmount, final int preShiftZeroIndex) { + final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude; + + // The lowest inactive half-bucket (not including the 0 value) is special: unlike all other half + // buckets, the lowest half bucket values cannot be scaled by simply changing the + // normalizing offset. Instead, they must be individually re-recorded at the new + // scale, and cleared from the current one. + // + // We know that all half buckets "below" the current lowest one are full of 0s, because + // we would have overflowed otherwise. So we need to shift the values in the current + // lowest half bucket into that range (including the current lowest half bucket itself). + // Iterating up from the lowermost non-zero "from slot" and copying values to the newly + // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass, + // because the scale "to slot" index will always be a lower index than its or any + // preceding non-scaled "from slot" index: + // + // (Note that we specifically avoid slot 0, as it is directly handled in the outer case) + + for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) { + long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude; + int toIndex = countsArrayIndex(toValue); + int normalizedToIndex = + normalizeIndex(toIndex, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length()); + long countAtFromIndex = inactiveCounts.get(fromIndex + preShiftZeroIndex); + inactiveCounts.lazySet(normalizedToIndex, countAtFromIndex); + inactiveCounts.lazySet(fromIndex + preShiftZeroIndex, 0); + } + + // Note that the above loop only creates O(N) work for histograms that have values in + // the lowest half-bucket (excluding the 0 value). Histograms that never have values + // there (e.g. all integer value histograms used as internal storage in DoubleHistograms) + // will never loop, and their shifts will remain O(1). + } + + @Override + void shiftNormalizingIndexByOffset(final int offsetToAdd, + final boolean lowestHalfBucketPopulated, + final double newIntegerToDoubleValueConversionRatio) { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + int newNormalizingIndexOffset = getNormalizingIndexOffset() + offsetToAdd; + setNormalizingIndexOffset(newNormalizingIndexOffset, + offsetToAdd, + lowestHalfBucketPopulated, + newIntegerToDoubleValueConversionRatio + ); + } finally { + wrp.readerUnlock(); + } + } + + ConcurrentArrayWithNormalizingOffset allocateArray(int length, int normalizingIndexOffset) { + return new AtomicLongArrayWithNormalizingOffset(length, normalizingIndexOffset); + } + + @Override + void resize(final long newHighestTrackableValue) { + try { + wrp.readerLock(); + + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + + int newArrayLength = determineArrayLengthNeeded(newHighestTrackableValue); + int countsDelta = newArrayLength - countsArrayLength; + + if (countsDelta <= 0) { + // This resize need was already covered by a concurrent resize op. + return; + } + + // Allocate both counts arrays here, so if one allocation fails, neither will "take": + ConcurrentArrayWithNormalizingOffset newInactiveCounts1 = + allocateArray(newArrayLength, inactiveCounts.getNormalizingIndexOffset()); + ConcurrentArrayWithNormalizingOffset newInactiveCounts2 = + allocateArray(newArrayLength, activeCounts.getNormalizingIndexOffset()); + + + // Resize the current inactiveCounts: + ConcurrentArrayWithNormalizingOffset oldInactiveCounts = inactiveCounts; + inactiveCounts = newInactiveCounts1; + + // Copy inactive contents to newly sized inactiveCounts: + copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); + + // switch active and inactive: + ConcurrentArrayWithNormalizingOffset tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + // Resize the newly inactiveCounts: + oldInactiveCounts = inactiveCounts; + inactiveCounts = newInactiveCounts2; + + // Copy inactive contents to newly sized inactiveCounts: + copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); + + // switch active and inactive again: + tmp = activeCounts; + activeCounts = inactiveCounts; + inactiveCounts = tmp; + + wrp.flipPhase(); + + // At this point, both active and inactive have been safely resized, + // and the switch in each was done without any writers modifying it in flight. + + // We resized things. We can now make the histogram establish size accordingly for future recordings: + establishSize(newHighestTrackableValue); + + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + + } finally { + wrp.readerUnlock(); + } + } + + void copyInactiveCountsContentsOnResize( + ConcurrentArrayWithNormalizingOffset oldInactiveCounts, int countsDelta) { + int oldNormalizedZeroIndex = + normalizeIndex(0, + oldInactiveCounts.getNormalizingIndexOffset(), + oldInactiveCounts.length()); + + if (oldNormalizedZeroIndex == 0) { + // Copy old inactive contents to (current) newly sized inactiveCounts, in place: + for (int i = 0; i < oldInactiveCounts.length(); i++) { + inactiveCounts.lazySet(i, oldInactiveCounts.get(i)); + } + } else { + // We need to shift the stuff from the zero index and up to the end of the array: + + // Copy everything up to the oldNormalizedZeroIndex in place: + for (int fromIndex = 0; fromIndex < oldNormalizedZeroIndex; fromIndex++) { + inactiveCounts.lazySet(fromIndex, oldInactiveCounts.get(fromIndex)); + } + + // Copy everything from the oldNormalizedZeroIndex to the end with an index delta shift: + for (int fromIndex = oldNormalizedZeroIndex; fromIndex < oldInactiveCounts.length(); fromIndex++) { + int toIndex = fromIndex + countsDelta; + inactiveCounts.lazySet(toIndex, oldInactiveCounts.get(fromIndex)); + } + } + } + + @Override + public void setAutoResize(final boolean autoResize) { + this.autoResize = true; + } + + @Override + void clearCounts() { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + for (int i = 0; i < activeCounts.length(); i++) { + activeCounts.lazySet(i, 0); + inactiveCounts.lazySet(i, 0); + } + totalCountUpdater.set(this, 0); + } finally { + wrp.readerUnlock(); + } + } + + @Override + public ConcurrentHistogram copy() { + ConcurrentHistogram copy = new ConcurrentHistogram(this); + copy.add(this); + return copy; + } + + @Override + public ConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + ConcurrentHistogram toHistogram = new ConcurrentHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + public long getTotalCount() { + return totalCountUpdater.get(this); + } + + @Override + void setTotalCount(final long totalCount) { + totalCountUpdater.set(this, totalCount); + } + + @Override + void incrementTotalCount() { + totalCountUpdater.incrementAndGet(this); + } + + @Override + void addToTotalCount(final long value) { + totalCountUpdater.addAndGet(this, value); + } + + @Override + int _getEstimatedFootprintInBytes() { + return (512 + (2 * 8 * activeCounts.length())); + } + + /** + * Construct an auto-resizing ConcurrentHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ConcurrentHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a ConcurrentHistogram given the Highest value to be tracked and a number of significant decimal + * digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ConcurrentHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a ConcurrentHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, + true); + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public ConcurrentHistogram(final AbstractHistogram source) { + this(source, true); + } + + ConcurrentHistogram(final AbstractHistogram source, boolean allocateCountsArray) { + super(source,false); + if (allocateCountsArray) { + activeCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0); + inactiveCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0); + } + wordSizeInBytes = 8; + } + + ConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits, boolean allocateCountsArray) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, + false); + if (allocateCountsArray) { + activeCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0); + inactiveCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0); + } + wordSizeInBytes = 8; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static ConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static ConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new ConcurrentHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A ConcurrentHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static ConcurrentHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + wrp = new WriterReaderPhaser(); + } + + @Override + synchronized void fillBufferFromCountsArray(final ByteBuffer buffer) { + try { + wrp.readerLock(); + super.fillBufferFromCountsArray(buffer); + } finally { + wrp.readerUnlock(); + } + } + + interface ConcurrentArrayWithNormalizingOffset { + + int getNormalizingIndexOffset(); + + void setNormalizingIndexOffset(int normalizingIndexOffset); + + double getDoubleToIntegerValueConversionRatio(); + + void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio); + + int getEstimatedFootprintInBytes(); + + long get(int index); + + void atomicIncrement(int index); + + void atomicAdd(int index, long valueToAdd); + + void lazySet(int index, long newValue); + + int length(); + } + + static class AtomicLongArrayWithNormalizingOffset extends AtomicLongArray + implements ConcurrentArrayWithNormalizingOffset { + private int normalizingIndexOffset; + private double doubleToIntegerValueConversionRatio; + + AtomicLongArrayWithNormalizingOffset(int length, int normalizingIndexOffset) { + super(length); + this.normalizingIndexOffset = normalizingIndexOffset; + } + + @Override + public int getNormalizingIndexOffset() { + return normalizingIndexOffset; + } + + @Override + public void setNormalizingIndexOffset(int normalizingIndexOffset) { + this.normalizingIndexOffset = normalizingIndexOffset; + } + + @Override + public double getDoubleToIntegerValueConversionRatio() { + return doubleToIntegerValueConversionRatio; + } + + @Override + public void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio) { + this.doubleToIntegerValueConversionRatio = doubleToIntegerValueConversionRatio; + } + + @Override + public int getEstimatedFootprintInBytes() { + return 256 + (8 * this.length()); + } + + @Override + public void atomicIncrement(int index) { + incrementAndGet(index); + } + + @Override + public void atomicAdd(int index, long valueToAdd) { + addAndGet(index, valueToAdd); + } + + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleAllValuesIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleAllValuesIterator.java new file mode 100644 index 000000000..e9ffe31bc --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleAllValuesIterator.java @@ -0,0 +1,53 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through {@link DoubleHistogram} values using the finest granularity steps supported by the + * underlying representation. The iteration steps through all possible unit value levels, regardless of whether or not + * there were recorded values for that value level, and terminates when all recorded histogram values are exhausted. + */ +public class DoubleAllValuesIterator implements Iterator { + private final AllValuesIterator integerAllValuesIterator; + private final DoubleHistogramIterationValue iterationValue; + DoubleHistogram histogram; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + */ + public void reset() { + integerAllValuesIterator.reset(); + } + + /** + * @param histogram The histogram this iterator will operate on + */ + public DoubleAllValuesIterator(final DoubleHistogram histogram) { + this.histogram = histogram; + integerAllValuesIterator = new AllValuesIterator(histogram.integerValuesHistogram); + iterationValue = new DoubleHistogramIterationValue(integerAllValuesIterator.currentIterationValue); + } + + @Override + public boolean hasNext() { + return integerAllValuesIterator.hasNext(); + } + + @Override + public DoubleHistogramIterationValue next() { + integerAllValuesIterator.next(); + return iterationValue; + } + + @Override + public void remove() { + integerAllValuesIterator.remove(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogram.java new file mode 100644 index 000000000..ef78e4f8f --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogram.java @@ -0,0 +1,1731 @@ +/* + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.*; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.zip.DataFormatException; +import java.util.zip.Deflater; + +/** + *

A floating point values High Dynamic Range (HDR) Histogram

+ *

+ * It is important to note that {@link DoubleHistogram} is not thread-safe, and does not support safe concurrent + * recording by multiple threads. If concurrent operation is required, consider using + * {@link ConcurrentDoubleHistogram}, {@link SynchronizedDoubleHistogram}, + * or (recommended) {@link DoubleRecorder} or {@link SingleWriterDoubleRecorder} which are intended for this purpose. + *

+ * {@link DoubleHistogram} supports the recording and analyzing sampled data value counts across a + * configurable dynamic range of floating point (double) values, with configurable value precision within the range. + * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram + * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, + * and provides control over value quantization behavior across the value range and the subsequent value resolution at + * any given level. + *

+ * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link + * DoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is + * (optionally) specified. E.g. When a {@link DoubleHistogram} is created to track a dynamic range of + * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any + * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the + * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 + * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. + *

+ * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link + * DoubleHistogram#setAutoResize}) a {@link DoubleHistogram} will auto-resize its dynamic range to + * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take + * longer to execute, as resizing incurs allocation and copying of internal data structures. + *

+ * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of + * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either + * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have + * resulted in discarding or losing the required value precision of values already recorded in the histogram. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ +public class DoubleHistogram extends EncodableHistogram implements DoubleValueRecorder, Serializable { + private static final double highestAllowedValueEver; // A value that will keep us from multiplying into infinity. + + private long configuredHighestToLowestValueRatio; + + private volatile double currentLowestValueInAutoRange; + private volatile double currentHighestValueLimitInAutoRange; + + AbstractHistogram integerValuesHistogram; + +// volatile double doubleToIntegerValueConversionRatio; +// volatile double integerToDoubleValueConversionRatio; + + private boolean autoResize = false; + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number + * of significant decimal digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public DoubleHistogram(final int numberOfSignificantValueDigits) { + this(2, numberOfSignificantValueDigits, Histogram.class, null); + setAutoResize(true); + } + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number + * of significant decimal digits. + * + * The {@link DoubleHistogram} will use the specified AbstractHistogram subclass + * for tracking internal counts (e.g. {@link Histogram}, + * {@link ConcurrentHistogram}, {@link SynchronizedHistogram}, + * {@link IntCountsHistogram}, {@link ShortCountsHistogram}). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param internalCountsHistogramClass The class to use for internal counts tracking + */ + public DoubleHistogram(final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass) { + this(2, numberOfSignificantValueDigits, internalCountsHistogramClass, null); + setAutoResize(true); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in + * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant + * decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public DoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { + this(highestToLowestValueRatio, numberOfSignificantValueDigits, Histogram.class); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in + * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant + * decimal digits. + * + * The {@link DoubleHistogram} will use the specified AbstractHistogram subclass + * for tracking internal counts (e.g. {@link Histogram}, + * {@link ConcurrentHistogram}, {@link SynchronizedHistogram}, + * {@link IntCountsHistogram}, {@link ShortCountsHistogram}). + * + * @param highestToLowestValueRatio specifies the dynamic range to use. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param internalCountsHistogramClass The class to use for internal counts tracking + */ + protected DoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass) { + this(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass, null); + } + + DoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass, + AbstractHistogram internalCountsHistogram) { + this( + highestToLowestValueRatio, + numberOfSignificantValueDigits, + internalCountsHistogramClass, + internalCountsHistogram, + false + ); + } + + private DoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass, + AbstractHistogram internalCountsHistogram, + boolean mimicInternalModel) { + try { + if (highestToLowestValueRatio < 2) { + throw new IllegalArgumentException("highestToLowestValueRatio must be >= 2"); + } + + if ((highestToLowestValueRatio * Math.pow(10.0, numberOfSignificantValueDigits)) >= (1L << 61)) { + throw new IllegalArgumentException( + "highestToLowestValueRatio * (10^numberOfSignificantValueDigits) must be < (1L << 61)"); + } + if (internalCountsHistogramClass == AtomicHistogram.class) { + throw new IllegalArgumentException( + "AtomicHistogram cannot be used as an internal counts histogram (does not support shifting)." + + " Use ConcurrentHistogram instead."); + } + + long integerValueRange = deriveIntegerValueRange(highestToLowestValueRatio, numberOfSignificantValueDigits); + + final AbstractHistogram valuesHistogram; + double initialLowestValueInAutoRange; + + if (internalCountsHistogram == null) { + // Create the internal counts histogram: + Constructor histogramConstructor = + internalCountsHistogramClass.getConstructor(long.class, long.class, int.class); + + valuesHistogram = + histogramConstructor.newInstance( + 1L, + (integerValueRange - 1), + numberOfSignificantValueDigits + ); + + // We want the auto-ranging to tend towards using a value range that will result in using the + // lower tracked value ranges and leave the higher end empty unless the range is actually used. + // This is most easily done by making early recordings force-shift the lower value limit to + // accommodate them (forcing a force-shift for the higher values would achieve the opposite). + // We will therefore start with a very high value range, and let the recordings autoAdjust + // downwards from there: + initialLowestValueInAutoRange = Math.pow(2.0, 800); + } else if (mimicInternalModel) { + Constructor histogramConstructor = + internalCountsHistogramClass.getConstructor(AbstractHistogram.class); + + valuesHistogram = histogramConstructor.newInstance(internalCountsHistogram); + + initialLowestValueInAutoRange = Math.pow(2.0, 800); + } else { + // Verify that the histogram we got matches: + if ((internalCountsHistogram.getLowestDiscernibleValue() != 1) || + (internalCountsHistogram.getHighestTrackableValue() != integerValueRange - 1) || + internalCountsHistogram.getNumberOfSignificantValueDigits() != numberOfSignificantValueDigits) { + throw new IllegalStateException("integer values histogram does not match stated parameters."); + } + valuesHistogram = internalCountsHistogram; + // Derive initialLowestValueInAutoRange from valuesHistogram's integerToDoubleValueConversionRatio: + initialLowestValueInAutoRange = + internalCountsHistogram.getIntegerToDoubleValueConversionRatio() * + internalCountsHistogram.subBucketHalfCount; + } + + // Set our double tracking range and internal histogram: + init(highestToLowestValueRatio, initialLowestValueInAutoRange, valuesHistogram); + + } catch (NoSuchMethodException ex) { + throw new IllegalArgumentException(ex); + } catch (IllegalAccessException ex) { + throw new IllegalArgumentException(ex); + } catch (InstantiationException ex) { + throw new IllegalArgumentException(ex); + } catch (InvocationTargetException ex) { + throw new IllegalArgumentException(ex); + } + } + + /** + * Construct a {@link DoubleHistogram} with the same range settings as a given source, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public DoubleHistogram(final DoubleHistogram source) { + this(source.configuredHighestToLowestValueRatio, + source.getNumberOfSignificantValueDigits(), + source.integerValuesHistogram.getClass(), + source.integerValuesHistogram, + true); + this.autoResize = source.autoResize; + setTrackableValueRange(source.currentLowestValueInAutoRange, source.currentHighestValueLimitInAutoRange); + } + + private void init(final long configuredHighestToLowestValueRatio, final double lowestTrackableUnitValue, + final AbstractHistogram integerValuesHistogram) { + this.configuredHighestToLowestValueRatio = configuredHighestToLowestValueRatio; + this.integerValuesHistogram = integerValuesHistogram; + long internalHighestToLowestValueRatio = + deriveInternalHighestToLowestValueRatio(configuredHighestToLowestValueRatio); + setTrackableValueRange(lowestTrackableUnitValue, lowestTrackableUnitValue * internalHighestToLowestValueRatio); + } + + private void setTrackableValueRange(final double lowestValueInAutoRange, final double highestValueInAutoRange) { + this.currentLowestValueInAutoRange = lowestValueInAutoRange; + this.currentHighestValueLimitInAutoRange = highestValueInAutoRange; + double integerToDoubleValueConversionRatio = lowestValueInAutoRange / getLowestTrackingIntegerValue(); + integerValuesHistogram.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + } + + double getDoubleToIntegerValueConversionRatio() { + return integerValuesHistogram.getDoubleToIntegerValueConversionRatio(); + } + + // + // + // Auto-resizing control: + // + // + + public boolean isAutoResize() { + return autoResize; + } + + public void setAutoResize(boolean autoResize) { + this.autoResize = autoResize; + } + + // + // + // + // Value recording support: + // + // + // + + /** + * Record a value in the histogram + * + * @param value The value to be recorded + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + @Override + public void recordValue(final double value) throws ArrayIndexOutOfBoundsException { + recordSingleValue(value); + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + @Override + public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { + recordCountAtValue(count, value); + } + + /** + * Record a value in the histogram. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided + * by {@link #copyCorrectedForCoordinatedOmission(double)}. + * The use cases for these two methods are mutually exclusive, and only one of the two should be be used on + * a given data set to correct for the same coordinated omission issue. + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + @Override + public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordValueWithCountAndExpectedInterval(value, 1, expectedIntervalBetweenValueSamples); + } + + private void recordCountAtValue(final long count, final double value) throws ArrayIndexOutOfBoundsException { + int throwCount = 0; + while (true) { + if ((value < currentLowestValueInAutoRange) || (value >= currentHighestValueLimitInAutoRange)) { + // Zero is valid and needs no auto-ranging, but also rare enough that we should deal + // with it on the slow path... + autoAdjustRangeForValue(value); + } + try { + integerValuesHistogram.recordConvertedDoubleValueWithCount(value, count); + return; + } catch (ArrayIndexOutOfBoundsException ex) { + // A race that would pass the auto-range check above and would still take an AIOOB + // can only occur due to a value that would have been valid becoming invalid due + // to a concurrent adjustment operation. Such adjustment operations can happen no + // more than 64 times in the entire lifetime of the Histogram, which makes it safe + // to retry with no fear of live-locking. + if (++throwCount > 64) { + // For the retry check to not detect an out of range attempt after 64 retries + // should be theoretically impossible, and would indicate a bug. + throw new ArrayIndexOutOfBoundsException( + "BUG: Unexpected non-transient AIOOB Exception caused by:\n" + ex); + } + } + } + } + + private void recordSingleValue(final double value) throws ArrayIndexOutOfBoundsException { + int throwCount = 0; + while (true) { + if ((value < currentLowestValueInAutoRange) || (value >= currentHighestValueLimitInAutoRange)) { + // Zero is valid and needs no auto-ranging, but also rare enough that we should deal + // with it on the slow path... + autoAdjustRangeForValue(value); + } + try { + integerValuesHistogram.recordConvertedDoubleValue(value); + return; + } catch (ArrayIndexOutOfBoundsException ex) { + // A race that would pass the auto-range check above and would still take an AIOOB + // can only occur due to a value that would have been valid becoming invalid due + // to a concurrent adjustment operation. Such adjustment operations can happen no + // more than 64 times in the entire lifetime of the Histogram, which makes it safe + // to retry with no fear of live-locking. + if (++throwCount > 64) { + // For the retry check to not detect an out of range attempt after 64 retries + // should be theoretically impossible, and would indicate a bug. + throw new ArrayIndexOutOfBoundsException( + "BUG: Unexpected non-transient AIOOB Exception caused by:\n" + ex); + } + } + } + } + + private void recordValueWithCountAndExpectedInterval(final double value, final long count, + final double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + recordCountAtValue(count, value); + if (expectedIntervalBetweenValueSamples <= 0) + return; + for (double missingValue = value - expectedIntervalBetweenValueSamples; + missingValue >= expectedIntervalBetweenValueSamples; + missingValue -= expectedIntervalBetweenValueSamples) { + recordCountAtValue(count, missingValue); + } + } + + // + // + // + // Shift and auto-ranging support: + // + // + // + + private void autoAdjustRangeForValue(final double value) { + // Zero is always valid, and doesn't need auto-range adjustment: + if (value == 0.0) { + return; + } + autoAdjustRangeForValueSlowPath(value); + } + + private synchronized void autoAdjustRangeForValueSlowPath(final double value) { + try { + if (value < currentLowestValueInAutoRange) { + if (value < 0.0) { + throw new ArrayIndexOutOfBoundsException("Negative values cannot be recorded"); + } + do { + int shiftAmount = + findCappedContainingBinaryOrderOfMagnitude( + Math.ceil(currentLowestValueInAutoRange / value) - 1.0); + shiftCoveredRangeToTheRight(shiftAmount); + } + while (value < currentLowestValueInAutoRange); + } else if (value >= currentHighestValueLimitInAutoRange) { + if (value > highestAllowedValueEver) { + throw new ArrayIndexOutOfBoundsException( + "Values above " + highestAllowedValueEver + " cannot be recorded"); + } + do { + // If value is an exact whole multiple of currentHighestValueLimitInAutoRange, it "belongs" with + // the next level up, as it crosses the limit. With floating point values, the simplest way to + // make this shift on exact multiple values happen (but not for any just-smaller-than-exact-multiple + // values) is to use a value that is 1 ulp bigger in computing the ratio for the shift amount: + int shiftAmount = + findCappedContainingBinaryOrderOfMagnitude( + Math.ceil((value + Math.ulp(value)) / currentHighestValueLimitInAutoRange) - 1.0); + shiftCoveredRangeToTheLeft(shiftAmount); + } + while (value >= currentHighestValueLimitInAutoRange); + } + } catch (ArrayIndexOutOfBoundsException ex) { + throw new ArrayIndexOutOfBoundsException("The value " + value + + " is out of bounds for histogram, current covered range [" + + currentLowestValueInAutoRange + ", " + currentHighestValueLimitInAutoRange + + ") cannot be extended any further.\n"+ + "Caused by: " + ex); + } + } + + private void shiftCoveredRangeToTheRight(final int numberOfBinaryOrdersOfMagnitude) { + // We are going to adjust the tracked range by effectively shifting it to the right + // (in the integer shift sense). + // + // To counter the right shift of the value multipliers, we need to left shift the internal + // representation such that the newly shifted integer values will continue to return the + // same double values. + + // Initially, new range is the same as current range, to make sure we correctly recover + // from a shift failure if one happens: + double newLowestValueInAutoRange = currentLowestValueInAutoRange; + double newHighestValueLimitInAutoRange = currentHighestValueLimitInAutoRange; + + try { + double shiftMultiplier = 1.0 / (1L << numberOfBinaryOrdersOfMagnitude); + + // First, temporarily change the highest value in auto-range without changing conversion ratios. + // This is done to force new values higher than the new expected highest value to attempt an + // adjustment (which is synchronized and will wait behind this one). This ensures that we will + // not end up with any concurrently recorded values that would need to be discarded if the shift + // fails. If this shift succeeds, the pending adjustment attempt will end up doing nothing. + currentHighestValueLimitInAutoRange *= shiftMultiplier; + + double newIntegerToDoubleValueConversionRatio = + getIntegerToDoubleValueConversionRatio() * shiftMultiplier; + + // First shift the values, to give the shift a chance to fail: + + // Shift integer histogram left, increasing the recorded integer values for current recordings + // by a factor of (1 << numberOfBinaryOrdersOfMagnitude): + + // (no need to shift any values if all recorded values are at the 0 value level:) + if (getTotalCount() > integerValuesHistogram.getCountAtIndex(0)) { + // Apply the shift: + try { + integerValuesHistogram.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, + newIntegerToDoubleValueConversionRatio); + } catch (ArrayIndexOutOfBoundsException ex) { + // Failed to shift, try to expand size instead: + handleShiftValuesException(numberOfBinaryOrdersOfMagnitude, ex); + // First expand the highest limit to reflect successful size expansion: + newHighestValueLimitInAutoRange /= shiftMultiplier; + // Successfully expanded histogram range by numberOfBinaryOrdersOfMagnitude, but not + // by shifting (shifting failed because there was not room to shift left into). Instead, + // we grew the max value without changing the value mapping. Since we were trying to + // shift values left to begin with, trying to shift the left again will work (we now + // have room to shift into): + integerValuesHistogram.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, + newIntegerToDoubleValueConversionRatio); + } + } + // Shift (or resize) was successful. Adjust new range to reflect: + newLowestValueInAutoRange *= shiftMultiplier; + newHighestValueLimitInAutoRange *= shiftMultiplier; + } finally { + // Set the new range to either the successfully changed one, or the original one: + setTrackableValueRange(newLowestValueInAutoRange, newHighestValueLimitInAutoRange); + } + } + + private void shiftCoveredRangeToTheLeft(final int numberOfBinaryOrdersOfMagnitude) { + // We are going to adjust the tracked range by effectively shifting it to the right + // (in the integer shift sense). + // + // To counter the left shift of the value multipliers, we need to right shift the internal + // representation such that the newly shifted integer values will continue to return the + // same double values. + + // Initially, new range is the same as current range, to make sure we correctly recover + // from a shift failure if one happens: + double newLowestValueInAutoRange = currentLowestValueInAutoRange; + double newHighestValueLimitInAutoRange = currentHighestValueLimitInAutoRange; + + try { + double shiftMultiplier = 1.0 * (1L << numberOfBinaryOrdersOfMagnitude); + + double newIntegerToDoubleValueConversionRatio = + getIntegerToDoubleValueConversionRatio() * shiftMultiplier; + + // First, temporarily change the lowest value in auto-range without changing conversion ratios. + // This is done to force new values lower than the new expected lowest value to attempt an + // adjustment (which is synchronized and will wait behind this one). This ensures that we will + // not end up with any concurrently recorded values that would need to be discarded if the shift + // fails. If this shift succeeds, the pending adjustment attempt will end up doing nothing. + currentLowestValueInAutoRange *= shiftMultiplier; + + // First shift the values, to give the shift a chance to fail: + + // Shift integer histogram right, decreasing the recorded integer values for current recordings + // by a factor of (1 << numberOfBinaryOrdersOfMagnitude): + + // (no need to shift any values if all recorded values are at the 0 value level:) + if (getTotalCount() > integerValuesHistogram.getCountAtIndex(0)) { + // Apply the shift: + try { + integerValuesHistogram.shiftValuesRight(numberOfBinaryOrdersOfMagnitude, + newIntegerToDoubleValueConversionRatio); + // Shift was successful. Adjust new range to reflect: + newLowestValueInAutoRange *= shiftMultiplier; + newHighestValueLimitInAutoRange *= shiftMultiplier; + } catch (ArrayIndexOutOfBoundsException ex) { + // Failed to shift, try to expand size instead: + handleShiftValuesException(numberOfBinaryOrdersOfMagnitude, ex); + // Successfully expanded histogram range by numberOfBinaryOrdersOfMagnitude, but not + // by shifting (shifting failed because there was not room to shift right into). Instead, + // we grew the max value without changing the value mapping. Since we were trying to + // shift values right to begin with to make room for a larger value than we had had + // been able to fit before, no shift is needed, as the value should now fit. So rather + // than shifting and adjusting both lowest and highest limits, we'll end up just + // expanding newHighestValueLimitInAutoRange to indicate the newly expanded range. + // We therefore reverse-scale the newLowestValueInAutoRange before letting the later + // code scale both up: + newLowestValueInAutoRange /= shiftMultiplier; + } + } + // Shift (or resize) was successful. Adjust new range to reflect: + newLowestValueInAutoRange *= shiftMultiplier; + newHighestValueLimitInAutoRange *= shiftMultiplier; + } finally { + // Set the new range to either the successfully changed one, or the original one: + setTrackableValueRange(newLowestValueInAutoRange, newHighestValueLimitInAutoRange); + } + } + + private void handleShiftValuesException(final int numberOfBinaryOrdersOfMagnitude, Exception ex) { + if (!autoResize) { + throw new ArrayIndexOutOfBoundsException("Value outside of histogram covered range.\nCaused by: " + ex); + } + + long highestTrackableValue = integerValuesHistogram.getHighestTrackableValue(); + int currentContainingOrderOfMagnitude = findContainingBinaryOrderOfMagnitude(highestTrackableValue); + int newContainingOrderOfMagnitude = numberOfBinaryOrdersOfMagnitude + currentContainingOrderOfMagnitude; + if (newContainingOrderOfMagnitude > 63) { + throw new ArrayIndexOutOfBoundsException( + "Cannot resize histogram covered range beyond (1L << 63) / (1L << " + + (integerValuesHistogram.subBucketHalfCountMagnitude) + ") - 1.\n" + + "Caused by: " + ex); + } + long newHighestTrackableValue = (1L << newContainingOrderOfMagnitude) - 1; + integerValuesHistogram.resize(newHighestTrackableValue); + integerValuesHistogram.highestTrackableValue = newHighestTrackableValue; + configuredHighestToLowestValueRatio <<= numberOfBinaryOrdersOfMagnitude; + } + + // + // + // + // Clearing support: + // + // + // + + /** + * Reset the contents and stats of this histogram + */ + @Override + public void reset() { + integerValuesHistogram.reset(); + double initialLowestValueInAutoRange = Math.pow(2.0, 800); + init(configuredHighestToLowestValueRatio, initialLowestValueInAutoRange, integerValuesHistogram); + } + + // + // + // + // Copy support: + // + // + // + + /** + * Create a copy of this histogram, complete with data and everything. + * + * @return A distinct copy of this histogram. + */ + public DoubleHistogram copy() { + final DoubleHistogram targetHistogram = + new DoubleHistogram(configuredHighestToLowestValueRatio, getNumberOfSignificantValueDigits()); + targetHistogram.setTrackableValueRange(currentLowestValueInAutoRange, currentHighestValueLimitInAutoRange); + integerValuesHistogram.copyInto(targetHistogram.integerValuesHistogram); + return targetHistogram; + } + + /** + * Get a copy of this histogram, corrected for coordinated omission. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, the new histogram will include an auto-generated additional series of + * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found + * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. + * + * Note: This is a post-correction method, as opposed to the at-recording correction method provided + * by {@link #recordValueWithExpectedInterval(double, double) recordValueWithExpectedInterval}. The two + * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * by + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @return a copy of this histogram, corrected for coordinated omission. + */ + public DoubleHistogram copyCorrectedForCoordinatedOmission(final double expectedIntervalBetweenValueSamples) { + final DoubleHistogram targetHistogram = + new DoubleHistogram(configuredHighestToLowestValueRatio, getNumberOfSignificantValueDigits()); + targetHistogram.setTrackableValueRange(currentLowestValueInAutoRange, currentHighestValueLimitInAutoRange); + targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return targetHistogram; + } + + /** + * Copy this histogram into the target histogram, overwriting it's contents. + * + * @param targetHistogram the histogram to copy into + */ + public void copyInto(final DoubleHistogram targetHistogram) { + targetHistogram.reset(); + targetHistogram.add(this); + targetHistogram.setStartTimeStamp(integerValuesHistogram.startTimeStampMsec); + targetHistogram.setEndTimeStamp(integerValuesHistogram.endTimeStampMsec); + } + + /** + * Copy this histogram, corrected for coordinated omission, into the target histogram, overwriting it's contents. + * (see {@link #copyCorrectedForCoordinatedOmission} for more detailed explanation about how correction is applied) + * + * @param targetHistogram the histogram to copy into + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + */ + public void copyIntoCorrectedForCoordinatedOmission(final DoubleHistogram targetHistogram, + final double expectedIntervalBetweenValueSamples) { + targetHistogram.reset(); + targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + targetHistogram.setStartTimeStamp(integerValuesHistogram.startTimeStampMsec); + targetHistogram.setEndTimeStamp(integerValuesHistogram.endTimeStampMsec); + } + + // + // + // + // Add support: + // + // + // + + /** + * Add the contents of another histogram to this one. + * + * @param fromHistogram The other histogram. + * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's cannot be + * covered by this histogram's range + */ + public void add(final DoubleHistogram fromHistogram) throws ArrayIndexOutOfBoundsException { + int arrayLength = fromHistogram.integerValuesHistogram.countsArrayLength; + AbstractHistogram fromIntegerHistogram = fromHistogram.integerValuesHistogram; + for (int i = 0; i < arrayLength; i++) { + long count = fromIntegerHistogram.getCountAtIndex(i); + if (count > 0) { + recordValueWithCount( + fromIntegerHistogram.valueFromIndex(i) * + fromHistogram.getIntegerToDoubleValueConversionRatio(), + count); + } + } + } + + /** + * Add the contents of another histogram to this one, while correcting the incoming data for coordinated omission. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, the values added will include an auto-generated additional series of + * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found + * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. + * + * Note: This is a post-recording correction method, as opposed to the at-recording correction method provided + * by {@link #recordValueWithExpectedInterval(double, double) recordValueWithExpectedInterval}. The two + * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * by + *

+ * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is + * important. + * + * @param fromHistogram Other histogram. highestToLowestValueRatio and numberOfSignificantValueDigits must match. + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if values exceed highestTrackableValue + */ + public void addWhileCorrectingForCoordinatedOmission(final DoubleHistogram fromHistogram, + final double expectedIntervalBetweenValueSamples) { + final DoubleHistogram toHistogram = this; + + for (HistogramIterationValue v : fromHistogram.integerValuesHistogram.recordedValues()) { + toHistogram.recordValueWithCountAndExpectedInterval( + v.getValueIteratedTo() * getIntegerToDoubleValueConversionRatio(), + v.getCountAtValueIteratedTo(), expectedIntervalBetweenValueSamples); + } + } + + /** + * Subtract the contents of another histogram from this one. + * + * @param otherHistogram The other histogram. + * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's cannot be + * covered by this histogram's range + */ + public void subtract(final DoubleHistogram otherHistogram) { + int arrayLength = otherHistogram.integerValuesHistogram.countsArrayLength; + AbstractHistogram otherIntegerHistogram = otherHistogram.integerValuesHistogram; + for (int i = 0; i < arrayLength; i++) { + long otherCount = otherIntegerHistogram.getCountAtIndex(i); + if (otherCount > 0) { + double otherValue = otherIntegerHistogram.valueFromIndex(i) * + otherHistogram.getIntegerToDoubleValueConversionRatio(); + if (getCountAtValue(otherValue) < otherCount) { + throw new IllegalArgumentException("otherHistogram count (" + otherCount + ") at value " + + otherValue + " is larger than this one's (" + getCountAtValue(otherValue) + ")"); + } + recordValueWithCount(otherValue, -otherCount); + } + } + } + + // + // + // + // Comparison support: + // + // + // + + /** + * Determine if this histogram is equivalent to another. + * + * @param other the other histogram to compare to + * @return True if this histogram are equivalent with the other. + */ + public boolean equals(final Object other){ + if ( this == other ) { + return true; + } + if ( !(other instanceof DoubleHistogram) ) { + return false; + } + DoubleHistogram that = (DoubleHistogram) other; + return integerValuesHistogram.equals(that.integerValuesHistogram); + } + + @Override + public int hashCode() { + return integerValuesHistogram.hashCode(); + } + + // + // + // + // Histogram structure querying support: + // + // + // + + /** + * Get the total count of all recorded values in the histogram + * @return the total count of all recorded values in the histogram + */ + public long getTotalCount() { + return integerValuesHistogram.getTotalCount(); + } + + /** + * get the current lowest (non zero) trackable value the automatically determined range + * (keep in mind that this can change because it is auto ranging) + * @return current lowest trackable value the automatically determined range + */ + double getCurrentLowestTrackableNonZeroValue() { + return currentLowestValueInAutoRange; + } + + /** + * get the current highest trackable value in the automatically determined range + * (keep in mind that this can change because it is auto ranging) + * @return current highest trackable value in the automatically determined range + */ + double getCurrentHighestTrackableValue() { + return currentHighestValueLimitInAutoRange; + } + + /** + * Get the current conversion ratio from interval integer value representation to double units. + * (keep in mind that this can change because it is auto ranging). This ratio can be useful + * for converting integer values found in iteration, although the preferred form for accessing + * iteration values would be to use the + * {@link HistogramIterationValue#getDoubleValueIteratedTo() getDoubleValueIteratedTo()} + * and + * {@link HistogramIterationValue#getDoubleValueIteratedFrom() getDoubleValueIteratedFrom()} + * accessors to {@link HistogramIterationValue} iterated values. + * + * @return the current conversion ratio from interval integer value representation to double units. + */ + public double getIntegerToDoubleValueConversionRatio() { + return integerValuesHistogram.integerToDoubleValueConversionRatio; + } + + /** + * get the configured numberOfSignificantValueDigits + * @return numberOfSignificantValueDigits + */ + public int getNumberOfSignificantValueDigits() { + return integerValuesHistogram.numberOfSignificantValueDigits; + } + + /** + * get the Dynamic range of the histogram: the configured ratio between the highest trackable value and the + * lowest trackable non zero value at any given time. + * @return the dynamic range of the histogram, expressed as the ratio between the highest trackable value + * and the lowest trackable non zero value at any given time. + */ + public long getHighestToLowestValueRatio() { + return configuredHighestToLowestValueRatio; + } + + /** + * Get the size (in value units) of the range of values that are equivalent to the given value within the + * histogram's resolution. Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The lowest value that is equivalent to the given value within the histogram's resolution. + */ + public double sizeOfEquivalentValueRange(final double value) { + return integerValuesHistogram.sizeOfEquivalentValueRange((long)(value * getDoubleToIntegerValueConversionRatio())) * + getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the lowest value that is equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The lowest value that is equivalent to the given value within the histogram's resolution. + */ + public double lowestEquivalentValue(final double value) { + return integerValuesHistogram.lowestEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * + getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the highest value that is equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The highest value that is equivalent to the given value within the histogram's resolution. + */ + public double highestEquivalentValue(final double value) { + double nextNonEquivalentValue = nextNonEquivalentValue(value); + // Theoretically, nextNonEquivalentValue - ulp(nextNonEquivalentValue) == nextNonEquivalentValue + // is possible (if the ulp size switches right at nextNonEquivalentValue), so drop by 2 ulps and + // increment back up to closest within-ulp value. + double highestEquivalentValue = nextNonEquivalentValue - (2 * Math.ulp(nextNonEquivalentValue)); + while (highestEquivalentValue + Math.ulp(highestEquivalentValue) < nextNonEquivalentValue) { + highestEquivalentValue += Math.ulp(highestEquivalentValue); + } + + return highestEquivalentValue; + } + + /** + * Get a value that lies in the middle (rounded up) of the range of values equivalent the given value. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The value lies in the middle (rounded up) of the range of values equivalent the given value. + */ + public double medianEquivalentValue(final double value) { + return integerValuesHistogram.medianEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * + getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the next value that is not equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value The given value + * @return The next value that is not equivalent to the given value within the histogram's resolution. + */ + public double nextNonEquivalentValue(final double value) { + return integerValuesHistogram.nextNonEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * + getIntegerToDoubleValueConversionRatio(); } + + /** + * Determine if two values are equivalent with the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param value1 first value to compare + * @param value2 second value to compare + * @return True if values are equivalent to within the histogram's resolution. + */ + public boolean valuesAreEquivalent(final double value1, final double value2) { + return (lowestEquivalentValue(value1) == lowestEquivalentValue(value2)); + } + + /** + * Provide a (conservatively high) estimate of the Histogram's total footprint in bytes + * + * @return a (conservatively high) estimate of the Histogram's total footprint in bytes + */ + public int getEstimatedFootprintInBytes() { + return integerValuesHistogram._getEstimatedFootprintInBytes(); + } + + // + // + // + // Timestamp and tag support: + // + // + // + + /** + * get the start time stamp [optionally] stored with this histogram + * @return the start time stamp [optionally] stored with this histogram + */ + public long getStartTimeStamp() { + return integerValuesHistogram.getStartTimeStamp(); + } + + /** + * Set the start time stamp value associated with this histogram to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + public void setStartTimeStamp(final long timeStampMsec) { + integerValuesHistogram.setStartTimeStamp(timeStampMsec); + } + + /** + * get the end time stamp [optionally] stored with this histogram + * @return the end time stamp [optionally] stored with this histogram + */ + public long getEndTimeStamp() { + return integerValuesHistogram.getEndTimeStamp(); + } + + /** + * Set the end time stamp value associated with this histogram to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + public void setEndTimeStamp(final long timeStampMsec) { + integerValuesHistogram.setEndTimeStamp(timeStampMsec); + } + + /** + * get the tag string [optionally] associated with this histogram + * @return tag string [optionally] associated with this histogram + */ + public String getTag() { + return integerValuesHistogram.getTag(); + } + + /** + * Set the tag string associated with this histogram + * @param tag the tag string to associate with this histogram + */ + public void setTag(String tag) { + integerValuesHistogram.setTag(tag); + } + + // + // + // + // Histogram Data access support: + // + // + // + + /** + * Get the lowest recorded value level in the histogram + * + * @return the Min value recorded in the histogram + */ + public double getMinValue() { + return integerValuesHistogram.getMinValue() * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the highest recorded value level in the histogram + * + * @return the Max value recorded in the histogram + */ + public double getMaxValue() { + return integerValuesHistogram.getMaxValue() * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the lowest recorded non-zero value level in the histogram + * + * @return the lowest recorded non-zero value level in the histogram + */ + public double getMinNonZeroValue() { + return integerValuesHistogram.getMinNonZeroValue() * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the highest recorded value level in the histogram as a double + * + * @return the highest recorded value level in the histogram as a double + */ + @Override + public double getMaxValueAsDouble() { + return getMaxValue(); + } + + /** + * Get the computed mean value of all recorded values in the histogram + * + * @return the mean value (in value units) of the histogram data + */ + public double getMean() { + return integerValuesHistogram.getMean() * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the computed standard deviation of all recorded values in the histogram + * + * @return the standard deviation (in value units) of the histogram data + */ + public double getStdDeviation() { + return integerValuesHistogram.getStdDeviation() * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the value at a given percentile. + * When the percentile is > 0.0, the value returned is the value that the given the given + * percentage of the overall recorded value entries in the histogram are either smaller than + * or equivalent to. When the percentile is 0.0, the value returned is the value that all value + * entries in the histogram are either larger than or equivalent to. + *

+ * Note that two values are "equivalent" in this statement if + * {@link DoubleHistogram#valuesAreEquivalent} would return true. + * + * @param percentile The percentile for which to return the associated value + * @return The value that the given percentage of the overall recorded value entries in the + * histogram are either smaller than or equivalent to. When the percentile is 0.0, returns the + * value that all value entries in the histogram are either larger than or equivalent to. + */ + public double getValueAtPercentile(final double percentile) { + return integerValuesHistogram.getValueAtPercentile(percentile) * getIntegerToDoubleValueConversionRatio(); + } + + /** + * Get the percentile at a given value. + * The percentile returned is the percentile of values recorded in the histogram that are smaller + * than or equivalent to the given value. + *

+ * Note that two values are "equivalent" in this statement if + * {@link DoubleHistogram#valuesAreEquivalent} would return true. + * + * @param value The value for which to return the associated percentile + * @return The percentile of values recorded in the histogram that are smaller than or equivalent + * to the given value. + */ + public double getPercentileAtOrBelowValue(final double value) { + return integerValuesHistogram.getPercentileAtOrBelowValue((long)(value * getDoubleToIntegerValueConversionRatio())); + } + + /** + * Get the count of recorded values within a range of value levels (inclusive to within the histogram's resolution). + * + * @param lowValue The lower value bound on the range for which + * to provide the recorded count. Will be rounded down with + * {@link DoubleHistogram#lowestEquivalentValue lowestEquivalentValue}. + * @param highValue The higher value bound on the range for which to provide the recorded count. + * Will be rounded up with {@link DoubleHistogram#highestEquivalentValue highestEquivalentValue}. + * @return the total count of values recorded in the histogram within the value range that is + * {@literal >=} lowestEquivalentValue(lowValue) and {@literal <=} highestEquivalentValue(highValue) + */ + public double getCountBetweenValues(final double lowValue, final double highValue) + throws ArrayIndexOutOfBoundsException { + return integerValuesHistogram.getCountBetweenValues( + (long)(lowValue * getDoubleToIntegerValueConversionRatio()), + (long)(highValue * getDoubleToIntegerValueConversionRatio()) + ); + } + + /** + * Get the count of recorded values at a specific value (to within the histogram resolution at the value level). + * + * @param value The value for which to provide the recorded count + * @return The total count of values recorded in the histogram within the value range that is + * {@literal >=} lowestEquivalentValue(value) and {@literal <=} highestEquivalentValue(value) + */ + public long getCountAtValue(final double value) throws ArrayIndexOutOfBoundsException { + return integerValuesHistogram.getCountAtValue((long)(value * getDoubleToIntegerValueConversionRatio())); + } + + /** + * Provide a means of iterating through histogram values according to percentile levels. The iteration is + * performed in steps that start at 0% and reduce their distance to 100% according to the + * percentileTicksPerHalfDistance parameter, ultimately reaching 100% when all recorded histogram + * values are exhausted. + *

+ * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. + * @return An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + * through the histogram using a + * {@link DoublePercentileIterator} + */ + public Percentiles percentiles(final int percentileTicksPerHalfDistance) { + return new Percentiles(this, percentileTicksPerHalfDistance); + } + + /** + * Provide a means of iterating through histogram values using linear steps. The iteration is + * performed in steps of valueUnitsPerBucket in size, terminating when all recorded histogram + * values are exhausted. + * + * @param valueUnitsPerBucket The size (in value units) of the linear buckets to use + * @return An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + * through the histogram using a + * {@link DoubleLinearIterator} + */ + public LinearBucketValues linearBucketValues(final double valueUnitsPerBucket) { + return new LinearBucketValues(this, valueUnitsPerBucket); + } + + /** + * Provide a means of iterating through histogram values at logarithmically increasing levels. The iteration is + * performed in steps that start at valueUnitsInFirstBucket and increase exponentially according to + * logBase, terminating when all recorded histogram values are exhausted. + * + * @param valueUnitsInFirstBucket The size (in value units) of the first bucket in the iteration + * @param logBase The multiplier by which bucket sizes will grow in each iteration step + * @return An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + * through the histogram using + * a {@link DoubleLogarithmicIterator} + */ + public LogarithmicBucketValues logarithmicBucketValues(final double valueUnitsInFirstBucket, + final double logBase) { + return new LogarithmicBucketValues(this, valueUnitsInFirstBucket, logBase); + } + + /** + * Provide a means of iterating through all recorded histogram values using the finest granularity steps + * supported by the underlying representation. The iteration steps through all non-zero recorded value counts, + * and terminates when all recorded histogram values are exhausted. + * + * @return An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + * through the histogram using + * a {@link DoubleRecordedValuesIterator} + */ + public RecordedValues recordedValues() { + return new RecordedValues(this); + } + + /** + * Provide a means of iterating through all histogram values using the finest granularity steps supported by + * the underlying representation. The iteration steps through all possible unit value levels, regardless of + * whether or not there were recorded values for that value level, and terminates when all recorded histogram + * values are exhausted. + * + * @return An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + * through the histogram using a {@link DoubleAllValuesIterator} + */ + public AllValues allValues() { + return new AllValues(this); + } + + + // Percentile iterator support: + + /** + * An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through + * the histogram using a {@link DoublePercentileIterator} + */ + public class Percentiles implements Iterable { + final DoubleHistogram histogram; + final int percentileTicksPerHalfDistance; + + private Percentiles(final DoubleHistogram histogram, final int percentileTicksPerHalfDistance) { + this.histogram = histogram; + this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; + } + + /** + * @return A {@link DoublePercentileIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new DoublePercentileIterator(histogram, percentileTicksPerHalfDistance); + } + } + + // Linear iterator support: + + /** + * An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through + * the histogram using a {@link DoubleLinearIterator} + */ + public class LinearBucketValues implements Iterable { + final DoubleHistogram histogram; + final double valueUnitsPerBucket; + + private LinearBucketValues(final DoubleHistogram histogram, final double valueUnitsPerBucket) { + this.histogram = histogram; + this.valueUnitsPerBucket = valueUnitsPerBucket; + } + + /** + * @return A {@link DoubleLinearIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new DoubleLinearIterator(histogram, valueUnitsPerBucket); + } + } + + // Logarithmic iterator support: + + /** + * An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through + * the histogram using a {@link DoubleLogarithmicIterator} + */ + public class LogarithmicBucketValues implements Iterable { + final DoubleHistogram histogram; + final double valueUnitsInFirstBucket; + final double logBase; + + private LogarithmicBucketValues(final DoubleHistogram histogram, + final double valueUnitsInFirstBucket, final double logBase) { + this.histogram = histogram; + this.valueUnitsInFirstBucket = valueUnitsInFirstBucket; + this.logBase = logBase; + } + + /** + * @return A {@link DoubleLogarithmicIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new DoubleLogarithmicIterator(histogram, valueUnitsInFirstBucket, logBase); + } + } + + // Recorded value iterator support: + + /** + * An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through + * the histogram using a {@link DoubleRecordedValuesIterator} + */ + public class RecordedValues implements Iterable { + final DoubleHistogram histogram; + + private RecordedValues(final DoubleHistogram histogram) { + this.histogram = histogram; + } + + /** + * @return A {@link DoubleRecordedValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new DoubleRecordedValuesIterator(histogram); + } + } + + // AllValues iterator support: + + /** + * An {@link Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through + * the histogram using a {@link DoubleAllValuesIterator} + */ + public class AllValues implements Iterable { + final DoubleHistogram histogram; + + private AllValues(final DoubleHistogram histogram) { + this.histogram = histogram; + } + + /** + * @return A {@link DoubleAllValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} + */ + public Iterator iterator() { + return new DoubleAllValuesIterator(histogram); + } + } + + + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * five (5) percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + */ + public void outputPercentileDistribution(final PrintStream printStream, + final Double outputValueUnitScalingRatio) { + outputPercentileDistribution(printStream, 5, outputValueUnitScalingRatio); + } + + // + // + // + // Textual percentile output support: + // + // + // + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * dumpTicksPerHalf percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + */ + public void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio) { + outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, false); + } + + /** + * Produce textual representation of the value distribution of histogram data by percentile. The distribution is + * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing + * dumpTicksPerHalf percentile reporting tick points. + * + * @param printStream Stream into which the distribution will be output + *

+ * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance + *

+ * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in + * output + * @param useCsvFormat Output in CSV format if true. Otherwise use plain text form. + */ + public void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio, + final boolean useCsvFormat) { + integerValuesHistogram.outputPercentileDistribution(printStream, + percentileTicksPerHalfDistance, + outputValueUnitScalingRatio / getIntegerToDoubleValueConversionRatio(), + useCsvFormat); + } + + // + // + // + // Serialization support: + // + // + // + + private static final long serialVersionUID = 42L; + + private void writeObject(final ObjectOutputStream o) + throws IOException + { + o.writeLong(configuredHighestToLowestValueRatio); + o.writeDouble(currentLowestValueInAutoRange); + o.writeObject(integerValuesHistogram); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + final long configuredHighestToLowestValueRatio = o.readLong(); + final double lowestValueInAutoRange = o.readDouble(); + AbstractHistogram integerValuesHistogram = (AbstractHistogram) o.readObject(); + init(configuredHighestToLowestValueRatio, lowestValueInAutoRange, integerValuesHistogram); + } + + // + // + // + // Encoding/Decoding support: + // + // + // + + /** + * Get the capacity needed to encode this histogram into a ByteBuffer + * @return the capacity needed to encode this histogram into a ByteBuffer + */ + @Override + public int getNeededByteBufferCapacity() { + return integerValuesHistogram.getNeededByteBufferCapacity(); + } + + private int getNeededByteBufferCapacity(final int relevantLength) { + return integerValuesHistogram.getNeededByteBufferCapacity(relevantLength); + } + + private static final int DHIST_encodingCookie = 0x0c72124e; + private static final int DHIST_compressedEncodingCookie = 0x0c72124f; + + static boolean isDoubleHistogramCookie(int cookie) { + return isCompressedDoubleHistogramCookie(cookie) || isNonCompressedDoubleHistogramCookie(cookie); + } + + static boolean isCompressedDoubleHistogramCookie(int cookie) { + return (cookie == DHIST_compressedEncodingCookie); + } + + static boolean isNonCompressedDoubleHistogramCookie(int cookie) { + return (cookie == DHIST_encodingCookie); + } + + /** + * Encode this histogram into a ByteBuffer + * @param buffer The buffer to encode into + * @return The number of bytes written to the buffer + */ + synchronized public int encodeIntoByteBuffer(final ByteBuffer buffer) { + long maxValue = integerValuesHistogram.getMaxValue(); + int relevantLength = integerValuesHistogram.getLengthForNumberOfBuckets( + integerValuesHistogram.getBucketsNeededToCoverValue(maxValue)); + if (buffer.capacity() < getNeededByteBufferCapacity(relevantLength)) { + throw new ArrayIndexOutOfBoundsException("buffer does not have capacity for " + + getNeededByteBufferCapacity(relevantLength) + " bytes"); + } + buffer.putInt(DHIST_encodingCookie); + buffer.putInt(getNumberOfSignificantValueDigits()); + buffer.putLong(configuredHighestToLowestValueRatio); + return integerValuesHistogram.encodeIntoByteBuffer(buffer) + 16; + } + + /** + * Encode this histogram in compressed form into a byte array + * @param targetBuffer The buffer to encode into + * @param compressionLevel Compression level (for java.util.zip.Deflater). + * @return The number of bytes written to the buffer + */ + @Override + synchronized public int encodeIntoCompressedByteBuffer( + final ByteBuffer targetBuffer, + final int compressionLevel) { + targetBuffer.putInt(DHIST_compressedEncodingCookie); + targetBuffer.putInt(getNumberOfSignificantValueDigits()); + targetBuffer.putLong(configuredHighestToLowestValueRatio); + return integerValuesHistogram.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel) + 16; + } + + /** + * Encode this histogram in compressed form into a byte array + * @param targetBuffer The buffer to encode into + * @return The number of bytes written to the array + */ + public int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { + return encodeIntoCompressedByteBuffer(targetBuffer, Deflater.DEFAULT_COMPRESSION); + } + + private static final Class[] constructorArgTypes = {long.class, int.class, Class.class, AbstractHistogram.class}; + + static T constructHistogramFromBuffer( + int cookie, + final ByteBuffer buffer, + final Class doubleHistogramClass, + final Class histogramClass, + final long minBarForHighestToLowestValueRatio) throws DataFormatException { + int numberOfSignificantValueDigits = buffer.getInt(); + long configuredHighestToLowestValueRatio = buffer.getLong(); + final AbstractHistogram valuesHistogram; + if (isNonCompressedDoubleHistogramCookie(cookie)) { + valuesHistogram = + AbstractHistogram.decodeFromByteBuffer(buffer, histogramClass, minBarForHighestToLowestValueRatio); + } else if (isCompressedDoubleHistogramCookie(cookie)) { + valuesHistogram = + AbstractHistogram.decodeFromCompressedByteBuffer(buffer, histogramClass, minBarForHighestToLowestValueRatio); + } else { + throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); + } + + try { + Constructor doubleHistogramConstructor = + doubleHistogramClass.getDeclaredConstructor(constructorArgTypes); + + T histogram = + doubleHistogramConstructor.newInstance( + configuredHighestToLowestValueRatio, + numberOfSignificantValueDigits, + histogramClass, + valuesHistogram + ); + histogram.setAutoResize(true); + return histogram; + } catch (NoSuchMethodException ex) { + throw new IllegalStateException("Unable to construct DoubleHistogram of type " + doubleHistogramClass); + } catch (InstantiationException ex) { + throw new IllegalStateException("Unable to construct DoubleHistogram of type " + doubleHistogramClass); + } catch (IllegalAccessException ex) { + throw new IllegalStateException("Unable to construct DoubleHistogram of type " + doubleHistogramClass); + } catch (InvocationTargetException ex) { + throw new IllegalStateException("Unable to construct DoubleHistogram of type " + doubleHistogramClass); + } + } + + /** + * Construct a new DoubleHistogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed DoubleHistogram + */ + public static DoubleHistogram decodeFromByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) { + return decodeFromByteBuffer(buffer, Histogram.class, minBarForHighestToLowestValueRatio); + } + + /** + * Construct a new DoubleHistogram by decoding it from a ByteBuffer, using a + * specified AbstractHistogram subclass for tracking internal counts (e.g. {@link Histogram}, + * {@link ConcurrentHistogram}, {@link SynchronizedHistogram}, + * {@link IntCountsHistogram}, {@link ShortCountsHistogram}). + * + * @param buffer The buffer to decode from + * @param internalCountsHistogramClass The class to use for internal counts tracking + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed DoubleHistogram + */ + public static DoubleHistogram decodeFromByteBuffer( + final ByteBuffer buffer, + final Class internalCountsHistogramClass, + long minBarForHighestToLowestValueRatio) { + try { + int cookie = buffer.getInt(); + if (!isNonCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); + } + DoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + DoubleHistogram.class, internalCountsHistogramClass, + minBarForHighestToLowestValueRatio); + return histogram; + } catch (DataFormatException ex) { + throw new RuntimeException(ex); + } + } + + /** + * Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed DoubleHistogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static DoubleHistogram decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, Histogram.class, minBarForHighestToLowestValueRatio); + } + + /** + * Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer, using a + * specified AbstractHistogram subclass for tracking internal counts (e.g. {@link Histogram}, + * {@link AtomicHistogram}, {@link SynchronizedHistogram}, + * {@link IntCountsHistogram}, {@link ShortCountsHistogram}). + * + * @param buffer The buffer to decode from + * @param internalCountsHistogramClass The class to use for internal counts tracking + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed DoubleHistogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static DoubleHistogram decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + Class internalCountsHistogramClass, + long minBarForHighestToLowestValueRatio) throws DataFormatException { + int cookie = buffer.getInt(); + if (!isCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); + } + DoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + DoubleHistogram.class, internalCountsHistogramClass, + minBarForHighestToLowestValueRatio); + return histogram; + } + + /** + * Construct a new DoubleHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A DoubleHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static DoubleHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + // + // + // + // Internal helper methods: + // + // + // + + private long deriveInternalHighestToLowestValueRatio(final long externalHighestToLowestValueRatio) { + // Internal dynamic range needs to be 1 order of magnitude larger than the containing order of magnitude. + // e.g. the dynamic range that covers [0.9, 2.1) is 2.33x, which on it's own would require 4x range to + // cover the contained order of magnitude. But (if 1.0 was a bucket boundary, for example, the range + // will actually need to cover [0.5..1.0) [1.0..2.0) [2.0..4.0), mapping to an 8x internal dynamic range. + long internalHighestToLowestValueRatio = + 1L << (findContainingBinaryOrderOfMagnitude(externalHighestToLowestValueRatio) + 1); + return internalHighestToLowestValueRatio; + } + + private long deriveIntegerValueRange(final long externalHighestToLowestValueRatio, + final int numberOfSignificantValueDigits) { + long internalHighestToLowestValueRatio = + deriveInternalHighestToLowestValueRatio(externalHighestToLowestValueRatio); + + // We cannot use the bottom half of bucket 0 in an integer values histogram to represent double + // values, because the required precision does not exist there. We therefore need the integer + // range to be bigger, such that the entire double value range can fit in the upper halves of + // all buckets. Compute the integer value range that will achieve this: + + long lowestTackingIntegerValue = AbstractHistogram.numberOfSubBuckets(numberOfSignificantValueDigits) / 2; + long integerValueRange = lowestTackingIntegerValue * internalHighestToLowestValueRatio; + + return integerValueRange; + } + + private long getLowestTrackingIntegerValue() { + return integerValuesHistogram.subBucketHalfCount; + } + + private static int findContainingBinaryOrderOfMagnitude(final long longNumber) { + int pow2ceiling = 64 - Long.numberOfLeadingZeros(longNumber); // smallest power of 2 containing value + return pow2ceiling; + } + + private static int findContainingBinaryOrderOfMagnitude(final double doubleNumber) { + long longNumber = (long) Math.ceil(doubleNumber); + return findContainingBinaryOrderOfMagnitude(longNumber); + } + + private int findCappedContainingBinaryOrderOfMagnitude(final double doubleNumber) { + if (doubleNumber > configuredHighestToLowestValueRatio) { + return (int) (Math.log(configuredHighestToLowestValueRatio)/Math.log(2)); + } + if (doubleNumber > Math.pow(2.0, 50)) { + return 50; + } + return findContainingBinaryOrderOfMagnitude(doubleNumber); + } + + static { + // We don't want to allow the histogram to shift and expand into value ranges that could equate + // to infinity (e.g. 1024.0 * (Double.MAX_VALUE / 1024.0) == Infinity). So lets makes sure the + // highestAllowedValueEver cap is a couple of binary orders of magnitude away from MAX_VALUE: + + // Choose a highestAllowedValueEver that is a nice power of 2 multiple of 1.0 : + double value = 1.0; + while (value < Double.MAX_VALUE / 4.0) { + value *= 2; + } + highestAllowedValueEver = value; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogramIterationValue.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogramIterationValue.java new file mode 100644 index 000000000..ae0500297 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleHistogramIterationValue.java @@ -0,0 +1,96 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +/** + * Represents a value point iterated through in a {@link DoubleHistogram}, with associated stats. + *

    + *
  • valueIteratedTo :
    The actual value level that was iterated to by the iterator
  • + *
  • prevValueIteratedTo :
    The actual value level that was iterated from by the iterator
  • + *
  • countAtValueIteratedTo :
    The count of recorded values in the histogram that + * exactly match this [lowestEquivalentValue(valueIteratedTo)...highestEquivalentValue(valueIteratedTo)] value + * range.
  • + *
  • countAddedInThisIterationStep :
    The count of recorded values in the histogram that + * were added to the totalCountToThisValue (below) as a result on this iteration step. Since multiple iteration + * steps may occur with overlapping equivalent value ranges, the count may be lower than the count found at + * the value (e.g. multiple linear steps or percentile levels can occur within a single equivalent value range)
  • + *
  • totalCountToThisValue :
    The total count of all recorded values in the histogram at + * values equal or smaller than valueIteratedTo.
  • + *
  • totalValueToThisValue :
    The sum of all recorded values in the histogram at values + * equal or smaller than valueIteratedTo.
  • + *
  • percentile :
    The percentile of recorded values in the histogram at values equal + * or smaller than valueIteratedTo.
  • + *
  • percentileLevelIteratedTo :
    The percentile level that the iterator returning this + * HistogramIterationValue had iterated to. Generally, percentileLevelIteratedTo will be equal to or smaller than + * percentile, but the same value point can contain multiple iteration levels for some iterators. E.g. a + * PercentileIterator can stop multiple times in the exact same value point (if the count at that value covers a + * range of multiple percentiles in the requested percentile iteration points).
  • + *
+ */ + +public class DoubleHistogramIterationValue { + private final HistogramIterationValue integerHistogramIterationValue; + + void reset() { + integerHistogramIterationValue.reset(); + } + + DoubleHistogramIterationValue(HistogramIterationValue integerHistogramIterationValue) { + this.integerHistogramIterationValue = integerHistogramIterationValue; + } + + public String toString() { + return "valueIteratedTo:" + getValueIteratedTo() + + ", prevValueIteratedTo:" + getValueIteratedFrom() + + ", countAtValueIteratedTo:" + getCountAtValueIteratedTo() + + ", countAddedInThisIterationStep:" + getCountAddedInThisIterationStep() + + ", totalCountToThisValue:" + getTotalCountToThisValue() + + ", totalValueToThisValue:" + getTotalValueToThisValue() + + ", percentile:" + getPercentile() + + ", percentileLevelIteratedTo:" + getPercentileLevelIteratedTo(); + } + + public double getValueIteratedTo() { + return integerHistogramIterationValue.getValueIteratedTo() * + integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); + } + + public double getValueIteratedFrom() { + return integerHistogramIterationValue.getValueIteratedFrom() * + integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); + } + + public long getCountAtValueIteratedTo() { + return integerHistogramIterationValue.getCountAtValueIteratedTo(); + } + + public long getCountAddedInThisIterationStep() { + return integerHistogramIterationValue.getCountAddedInThisIterationStep(); + } + + public long getTotalCountToThisValue() { + return integerHistogramIterationValue.getTotalCountToThisValue(); + } + + public double getTotalValueToThisValue() { + return integerHistogramIterationValue.getTotalValueToThisValue() * + integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); + } + + public double getPercentile() { + return integerHistogramIterationValue.getPercentile(); + } + + public double getPercentileLevelIteratedTo() { + return integerHistogramIterationValue.getPercentileLevelIteratedTo(); + } + + public HistogramIterationValue getIntegerHistogramIterationValue() { + return integerHistogramIterationValue; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLinearIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLinearIterator.java new file mode 100644 index 000000000..e9127ad98 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLinearIterator.java @@ -0,0 +1,59 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through {@link DoubleHistogram} values in linear steps. The iteration is + * performed in steps of valueUnitsPerBucket in size, terminating when all recorded histogram + * values are exhausted. Note that each iteration "bucket" includes values up to and including + * the next bucket boundary value. + */ +public class DoubleLinearIterator implements Iterator { + private final LinearIterator integerLinearIterator; + private final DoubleHistogramIterationValue iterationValue; + DoubleHistogram histogram; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. + */ + public void reset(final double valueUnitsPerBucket) { + integerLinearIterator.reset((long) (valueUnitsPerBucket * histogram.getDoubleToIntegerValueConversionRatio())); + } + + /** + * @param histogram The histogram this iterator will operate on + * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. + */ + public DoubleLinearIterator(final DoubleHistogram histogram, final double valueUnitsPerBucket) { + this.histogram = histogram; + integerLinearIterator = new LinearIterator( + histogram.integerValuesHistogram, + (long) (valueUnitsPerBucket * histogram.getDoubleToIntegerValueConversionRatio()) + ); + iterationValue = new DoubleHistogramIterationValue(integerLinearIterator.currentIterationValue); + } + + @Override + public boolean hasNext() { + return integerLinearIterator.hasNext(); + } + + @Override + public DoubleHistogramIterationValue next() { + integerLinearIterator.next(); + return iterationValue; + } + + @Override + public void remove() { + integerLinearIterator.remove(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLogarithmicIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLogarithmicIterator.java new file mode 100644 index 000000000..5998fedc0 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleLogarithmicIterator.java @@ -0,0 +1,66 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through {@link DoubleHistogram} values values in logarithmically increasing levels. The + * iteration is performed in steps that start at valueUnitsInFirstBucket and increase exponentially according to + * logBase, terminating when all recorded histogram values are exhausted. Note that each iteration "bucket" + * includes values up to and including the next bucket boundary value. + */ +public class DoubleLogarithmicIterator implements Iterator { + private final LogarithmicIterator integerLogarithmicIterator; + private final DoubleHistogramIterationValue iterationValue; + DoubleHistogram histogram; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step + * @param logBase the multiplier by which the bucket size is expanded in each iteration step. + */ + public void reset(final double valueUnitsInFirstBucket, final double logBase) { + integerLogarithmicIterator.reset( + (long) (valueUnitsInFirstBucket * histogram.getDoubleToIntegerValueConversionRatio()), + logBase + ); + } + + /** + * @param histogram The histogram this iterator will operate on + * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step + * @param logBase the multiplier by which the bucket size is expanded in each iteration step. + */ + public DoubleLogarithmicIterator(final DoubleHistogram histogram, final double valueUnitsInFirstBucket, + final double logBase) { + this.histogram = histogram; + integerLogarithmicIterator = new LogarithmicIterator( + histogram.integerValuesHistogram, + (long) (valueUnitsInFirstBucket * histogram.getDoubleToIntegerValueConversionRatio()), + logBase + ); + iterationValue = new DoubleHistogramIterationValue(integerLogarithmicIterator.currentIterationValue); + } + + @Override + public boolean hasNext() { + return integerLogarithmicIterator.hasNext(); + } + + @Override + public DoubleHistogramIterationValue next() { + integerLogarithmicIterator.next(); + return iterationValue; + } + + @Override + public void remove() { + integerLogarithmicIterator.remove(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoublePercentileIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoublePercentileIterator.java new file mode 100644 index 000000000..d14270cf4 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoublePercentileIterator.java @@ -0,0 +1,60 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through {@link DoubleHistogram} values values according to percentile levels. The iteration is + * performed in steps that start at 0% and reduce their distance to 100% according to the + * percentileTicksPerHalfDistance parameter, ultimately reaching 100% when all recorded histogram + * values are exhausted. + */ +public class DoublePercentileIterator implements Iterator { + private final PercentileIterator integerPercentileIterator; + private final DoubleHistogramIterationValue iterationValue; + DoubleHistogram histogram; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * + * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. + */ + public void reset(final int percentileTicksPerHalfDistance) { + integerPercentileIterator.reset(percentileTicksPerHalfDistance); + } + + /** + * @param histogram The histogram this iterator will operate on + * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. + */ + public DoublePercentileIterator(final DoubleHistogram histogram, final int percentileTicksPerHalfDistance) { + this.histogram = histogram; + integerPercentileIterator = new PercentileIterator( + histogram.integerValuesHistogram, + percentileTicksPerHalfDistance + ); + iterationValue = new DoubleHistogramIterationValue(integerPercentileIterator.currentIterationValue); + } + + @Override + public boolean hasNext() { + return integerPercentileIterator.hasNext(); + } + + @Override + public DoubleHistogramIterationValue next() { + integerPercentileIterator.next(); + return iterationValue; + } + + @Override + public void remove() { + integerPercentileIterator.remove(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecordedValuesIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecordedValuesIterator.java new file mode 100644 index 000000000..9282461bd --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecordedValuesIterator.java @@ -0,0 +1,54 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through {@link DoubleHistogram} values values using the finest granularity steps supported by + * the underlying representation. The iteration steps through all possible unit value levels, regardless of whether + * or not there were recorded values for that value level, and terminates when all recorded histogram values are + * exhausted. + */ +public class DoubleRecordedValuesIterator implements Iterator { + private final RecordedValuesIterator integerRecordedValuesIterator; + private final DoubleHistogramIterationValue iterationValue; + DoubleHistogram histogram; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + */ + public void reset() { + integerRecordedValuesIterator.reset(); + } + + /** + * @param histogram The histogram this iterator will operate on + */ + public DoubleRecordedValuesIterator(final DoubleHistogram histogram) { + this.histogram = histogram; + integerRecordedValuesIterator = new RecordedValuesIterator(histogram.integerValuesHistogram); + iterationValue = new DoubleHistogramIterationValue(integerRecordedValuesIterator.currentIterationValue); + } + + @Override + public boolean hasNext() { + return integerRecordedValuesIterator.hasNext(); + } + + @Override + public DoubleHistogramIterationValue next() { + integerRecordedValuesIterator.next(); + return iterationValue; + } + + @Override + public void remove() { + integerRecordedValuesIterator.remove(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecorder.java new file mode 100644 index 000000000..f92656cb2 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleRecorder.java @@ -0,0 +1,362 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records floating point (double) values, and provides stable + * interval {@link DoubleHistogram} samples from live recorded data without interrupting or stalling active recording + * of values. Each interval histogram provided contains all value counts accumulated since the + * previous interval histogram was taken. + *

+ * This pattern is commonly used in logging interval histogram information while recording is ongoing. + *

+ * {@link DoubleRecorder} supports concurrent + * {@link DoubleRecorder#recordValue} or + * {@link DoubleRecorder#recordValueWithExpectedInterval} calls. + * Recording calls are wait-free on architectures that support atomic increment operations, and + * are lock-free on architectures that do not. + *

+ * A common pattern for using a {@link DoubleRecorder} looks like this: + *


+ * DoubleRecorder recorder = new DoubleRecorder(2); // Two decimal point accuracy
+ * DoubleHistogram intervalHistogram = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval histogram]
+ *   ...
+ *   // Get interval histogram, recycling previous interval histogram:
+ *   intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
+ *   histogramLogWriter.outputIntervalHistogram(intervalHistogram);
+ *   ...
+ * [end of loop construct]
+ * 
+ */ + +public class DoubleRecorder implements DoubleValueRecorder { + private static AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile ConcurrentDoubleHistogram activeHistogram; + private ConcurrentDoubleHistogram inactiveHistogram; + + /** + * Construct an auto-resizing {@link DoubleRecorder} using a precision stated as a number + * of significant decimal digits. + *

+ * Depending on the valuer of the packed parameter {@link DoubleRecorder} can be configured to + * track value counts in a packed internal representation optimized for typical histogram recoded values are + * sparse in the value range and tend to be incremented in small unit counts. This packed representation tends + * to require significantly smaller amounts of storage when compared to unpacked representations, but can incur + * additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param packed Specifies whether the recorder will uses a packed internal representation or not. + */ + public DoubleRecorder(final int numberOfSignificantValueDigits, boolean packed) { + activeHistogram = packed ? + new PackedInternalConcurrentDoubleHistogram(instanceId, numberOfSignificantValueDigits) : + new InternalConcurrentDoubleHistogram(instanceId, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct an auto-resizing {@link DoubleRecorder} using a precision stated as a number + * of significant decimal digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public DoubleRecorder(final int numberOfSignificantValueDigits) { + this(numberOfSignificantValueDigits, false); + } + + /** + * Construct a {@link DoubleRecorder} dynamic range of values to cover and a number of significant + * decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use (as a ratio) + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public DoubleRecorder(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits) { + activeHistogram = new InternalConcurrentDoubleHistogram( + instanceId, highestToLowestValueRatio, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Record a value + * @param value the value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValue(final double value) { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValue(value); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithCount(value, count); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * See related notes {@link DoubleHistogram#recordValueWithExpectedInterval(double, double)} + * for more explanations about coordinated omission and expected interval correction. + * * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get a new instance of an interval histogram, which will include a stable, consistent view of all value + * counts accumulated since the last interval histogram was taken. + *

+ * Calling {@code getIntervalHistogram()} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram() { + return getIntervalHistogram(null); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@code getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link DoubleRecorder#getIntervalHistogram()} and + * {@link DoubleRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@code getIntervalHistogram(histogramToRecycle)} or {@link DoubleRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@code getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start + * accumulating value counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram (from this instance of + * {@link DoubleRecorder}) that may be recycled to avoid allocation and + * copy operations. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle) { + return getIntervalHistogram(histogramToRecycle, true); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@link DoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link DoubleRecorder#getIntervalHistogram()} and + * {@link DoubleRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@link DoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} or + * {@link DoubleRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link DoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value + * counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of histograms previously returned from this + * instance of {@link DoubleRecorder}. If false, will allow recycling histograms + * previously returned by other instances of {@link DoubleRecorder}. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle, + boolean enforceContainingInstance) { + // Verify that replacement histogram can validly be used as an inactive histogram replacement: + validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); + inactiveHistogram = (ConcurrentDoubleHistogram) histogramToRecycle; + performIntervalSample(); + DoubleHistogram sampledHistogram = inactiveHistogram; + inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled + return sampledHistogram; + } + + /** + * Place a copy of the value counts accumulated since accumulated (since the last interval histogram + * was taken) into {@code targetHistogram}. + * + * Calling {@code getIntervalHistogramInto(targetHistogram)} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @param targetHistogram the histogram into which the interval histogram's data should be copied + */ + public synchronized void getIntervalHistogramInto(DoubleHistogram targetHistogram) { + performIntervalSample(); + inactiveHistogram.copyInto(targetHistogram); + } + + /** + * Reset any value counts accumulated thus far. + */ + @Override + public synchronized void reset() { + // the currently inactive histogram is reset each time we flip. So flipping twice resets both: + performIntervalSample(); + performIntervalSample(); + } + + private void performIntervalSample() { + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveHistogram == null) { + if (activeHistogram instanceof InternalConcurrentDoubleHistogram) { + inactiveHistogram = new InternalConcurrentDoubleHistogram( + (InternalConcurrentDoubleHistogram) activeHistogram); + } else if (activeHistogram instanceof PackedInternalConcurrentDoubleHistogram) { + inactiveHistogram = new PackedInternalConcurrentDoubleHistogram( + instanceId, activeHistogram.getNumberOfSignificantValueDigits()); + } else { + throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); + } + } + + inactiveHistogram.reset(); + + // Swap active and inactive histograms: + final ConcurrentDoubleHistogram tempHistogram = inactiveHistogram; + inactiveHistogram = activeHistogram; + activeHistogram = tempHistogram; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeHistogram.setStartTimeStamp(now); + inactiveHistogram.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active histogram: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + } + + private static class InternalConcurrentDoubleHistogram extends ConcurrentDoubleHistogram { + private final long containingInstanceId; + + private InternalConcurrentDoubleHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalConcurrentDoubleHistogram(long id, + long highestToLowestValueRatio, + int numberOfSignificantValueDigits) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalConcurrentDoubleHistogram(InternalConcurrentDoubleHistogram source) { + super(source); + this.containingInstanceId = source.containingInstanceId; + } + } + + private static class PackedInternalConcurrentDoubleHistogram extends PackedConcurrentDoubleHistogram { + private final long containingInstanceId; + + private PackedInternalConcurrentDoubleHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementHistogram(DoubleHistogram replacementHistogram, + boolean enforceContainingInstance) { + boolean bad = true; + if (replacementHistogram == null) { + bad = false; + } else if ((replacementHistogram instanceof InternalConcurrentDoubleHistogram) + && + ((!enforceContainingInstance) || + (((InternalConcurrentDoubleHistogram) replacementHistogram).containingInstanceId == + ((InternalConcurrentDoubleHistogram) activeHistogram).containingInstanceId) + )) { + bad = false; + } else if ((replacementHistogram instanceof PackedInternalConcurrentDoubleHistogram) + && + ((!enforceContainingInstance) || + (((PackedInternalConcurrentDoubleHistogram) replacementHistogram).containingInstanceId == + ((PackedInternalConcurrentDoubleHistogram) activeHistogram).containingInstanceId) + )) { + bad = false; + } + + if (bad) { + throw new IllegalArgumentException("replacement histogram must have been obtained via a previous" + + " getIntervalHistogram() call from this " + this.getClass().getName() +" instance"); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleValueRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleValueRecorder.java new file mode 100644 index 000000000..d731a3a8e --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/DoubleValueRecorder.java @@ -0,0 +1,47 @@ +package io.prometheus.client.HdrHistogram; + +public interface DoubleValueRecorder { + + /** + * Record a value + * + * @param value The value to be recorded + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValue(double value) throws ArrayIndexOutOfBoundsException; + + /** + * Record a value (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValueWithCount(double value, long count) throws ArrayIndexOutOfBoundsException; + + /** + * Record a value. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided + * by {@link DoubleHistogram#copyCorrectedForCoordinatedOmission(double)}. + * The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValueWithExpectedInterval(double value, double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException; + + /** + * Reset the contents and collected stats + */ + void reset(); +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/EncodableHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/EncodableHistogram.java new file mode 100644 index 000000000..43b95a103 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/EncodableHistogram.java @@ -0,0 +1,62 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + * A base class for all encodable (and decodable) histogram classes. Log readers and writers + * will generally use this base class to provide common log processing across the integer value + * based AbstractHistogram subclasses and the double value based DoubleHistogram class. + * + */ +public abstract class EncodableHistogram { + + public abstract int getNeededByteBufferCapacity(); + + public abstract int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer, int compressionLevel); + + public abstract long getStartTimeStamp(); + + public abstract void setStartTimeStamp(long startTimeStamp); + + public abstract long getEndTimeStamp(); + + public abstract void setEndTimeStamp(long endTimestamp); + + public abstract String getTag(); + + public abstract void setTag(String tag); + + public abstract double getMaxValueAsDouble(); + + /** + * Decode a {@link EncodableHistogram} from a compressed byte buffer. Will return either a + * {@link Histogram} or {@link DoubleHistogram} depending + * on the format found in the supplied buffer. + * + * @param buffer The input buffer to decode from. + * @param minBarForHighestTrackableValue A lower bound either on the highestTrackableValue of + * the created Histogram, or on the HighestToLowestValueRatio + * of the created DoubleHistogram. + * @return The decoded {@link Histogram} or {@link DoubleHistogram} + * @throws DataFormatException on errors in decoding the buffer compression. + */ + static EncodableHistogram decodeFromCompressedByteBuffer( + ByteBuffer buffer, + final long minBarForHighestTrackableValue) throws DataFormatException { + // Peek iun buffer to see the cookie: + int cookie = buffer.getInt(buffer.position()); + if (DoubleHistogram.isDoubleHistogramCookie(cookie)) { + return DoubleHistogram.decodeFromCompressedByteBuffer(buffer, minBarForHighestTrackableValue); + } else { + return Histogram.decodeFromCompressedByteBuffer(buffer, minBarForHighestTrackableValue); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Histogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Histogram.java new file mode 100644 index 000000000..6f2c38638 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Histogram.java @@ -0,0 +1,282 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.zip.DataFormatException; + +/** + *

A High Dynamic Range (HDR) Histogram

+ *

+ * {@link Histogram} supports the recording and analyzing sampled data value counts across a configurable integer value + * range with configurable value precision within the range. Value precision is expressed as the number of significant + * digits in the value recording, and provides control over value quantization behavior across the value range and the + * subsequent value resolution at any given level. + *

+ * For example, a Histogram could be configured to track the counts of observed integer values between 0 and + * 3,600,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization + * within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could + * be used to track and analyze the counts of observed response times ranging between 1 microsecond and 1 hour + * in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of + * 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At its + * maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). + *

+ * Histogram tracks value counts in long fields. Smaller field types are available in the + * {@link IntCountsHistogram} and {@link ShortCountsHistogram} implementations of + * {@link AbstractHistogram}. + *

+ * Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link + * Histogram#setAutoResize}) a {@link Histogram} will auto-resize its dynamic range to include recorded values as + * they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing + * incurs allocation and copying of internal data structures. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class Histogram extends AbstractHistogram { + long totalCount; + long[] counts; + int normalizingIndexOffset; + + @Override + long getCountAtIndex(final int index) { + return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]; + } + + @Override + long getCountAtNormalizedIndex(final int index) { + return counts[index]; + } + + @Override + void incrementCountAtIndex(final int index) { + counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]++; + } + + @Override + void addToCountAtIndex(final int index, final long value) { + counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)] += value; + } + + @Override + void setCountAtIndex(int index, long value) { + counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)] = value; + } + + @Override + void setCountAtNormalizedIndex(int index, long value) { + counts[index] = value; + } + + @Override + int getNormalizingIndexOffset() { + return normalizingIndexOffset; + } + + @Override + void setNormalizingIndexOffset(int normalizingIndexOffset) { + this.normalizingIndexOffset = normalizingIndexOffset; + } + + @Override + void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { + nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + } + + @Override + void shiftNormalizingIndexByOffset(int offsetToAdd, + boolean lowestHalfBucketPopulated, + double newIntegerToDoubleValueConversionRatio) { + nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated); + } + + @Override + void clearCounts() { + Arrays.fill(counts, 0); + totalCount = 0; + } + + @Override + public Histogram copy() { + Histogram copy = new Histogram(this); + copy.add(this); + return copy; + } + + @Override + public Histogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + Histogram copy = new Histogram(this); + copy.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return copy; + } + + @Override + public long getTotalCount() { + return totalCount; + } + + @Override + void setTotalCount(final long totalCount) { + this.totalCount = totalCount; + } + + @Override + void incrementTotalCount() { + totalCount++; + } + + @Override + void addToTotalCount(final long value) { + totalCount += value; + } + + @Override + int _getEstimatedFootprintInBytes() { + return (512 + (8 * counts.length)); + } + + @Override + void resize(long newHighestTrackableValue) { + int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); + + establishSize(newHighestTrackableValue); + + int countsDelta = countsArrayLength - counts.length; + + counts = Arrays.copyOf(counts, countsArrayLength); + + if (oldNormalizedZeroIndex != 0) { + // We need to shift the stuff from the zero index and up to the end of the array: + int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta; + int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex; + System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy); + Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, 0); + } + } + + /** + * Construct an auto-resizing histogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Histogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a Histogram given the Highest value to be tracked and a number of significant decimal digits. The + * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Histogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a Histogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be discerned (distinguished from 0) by the + * histogram. Must be a positive integer that is {@literal >=} 1. May be + * internally rounded down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Histogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, true); + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT its contents) + * @param source The source histogram to duplicate + */ + public Histogram(final AbstractHistogram source) { + this(source, true); + } + + Histogram(final AbstractHistogram source, boolean allocateCountsArray) { + super(source); + if (allocateCountsArray) { + counts = new long[countsArrayLength]; + } + wordSizeInBytes = 8; + } + + Histogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits, boolean allocateCountsArray) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + if (allocateCountsArray) { + counts = new long[countsArrayLength]; + } + wordSizeInBytes = 8; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static Histogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, Histogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static Histogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, Histogram.class, minBarForHighestTrackableValue); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } + + /** + * Construct a new Histogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A Histogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static Histogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramIterationValue.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramIterationValue.java new file mode 100644 index 000000000..03a65e4f6 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramIterationValue.java @@ -0,0 +1,128 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +/** + * Represents a value point iterated through in a Histogram, with associated stats. + *

    + *
  • valueIteratedTo :
    The actual value level that was iterated to by the iterator
  • + *
  • prevValueIteratedTo :
    The actual value level that was iterated from by the iterator
  • + *
  • countAtValueIteratedTo :
    The count of recorded values in the histogram that + * exactly match this [lowestEquivalentValue(valueIteratedTo)...highestEquivalentValue(valueIteratedTo)] value + * range.
  • + *
  • countAddedInThisIterationStep :
    The count of recorded values in the histogram that + * were added to the totalCountToThisValue (below) as a result on this iteration step. Since multiple iteration + * steps may occur with overlapping equivalent value ranges, the count may be lower than the count found at + * the value (e.g. multiple linear steps or percentile levels can occur within a single equivalent value range)
  • + *
  • totalCountToThisValue :
    The total count of all recorded values in the histogram at + * values equal or smaller than valueIteratedTo.
  • + *
  • totalValueToThisValue :
    The sum of all recorded values in the histogram at values + * equal or smaller than valueIteratedTo.
  • + *
  • percentile :
    The percentile of recorded values in the histogram at values equal + * or smaller than valueIteratedTo.
  • + *
  • percentileLevelIteratedTo :
    The percentile level that the iterator returning this + * HistogramIterationValue had iterated to. Generally, percentileLevelIteratedTo will be equal to or smaller than + * percentile, but the same value point can contain multiple iteration levels for some iterators. E.g. a + * PercentileIterator can stop multiple times in the exact same value point (if the count at that value covers a + * range of multiple percentiles in the requested percentile iteration points).
  • + *
+ */ + +public class HistogramIterationValue { + private long valueIteratedTo; + private long valueIteratedFrom; + private long countAtValueIteratedTo; + private long countAddedInThisIterationStep; + private long totalCountToThisValue; + private long totalValueToThisValue; + private double percentile; + private double percentileLevelIteratedTo; + private double integerToDoubleValueConversionRatio; + + // Set is all-or-nothing to avoid the potential for accidental omission of some values... + void set(final long valueIteratedTo, final long valueIteratedFrom, final long countAtValueIteratedTo, + final long countInThisIterationStep, final long totalCountToThisValue, final long totalValueToThisValue, + final double percentile, final double percentileLevelIteratedTo, double integerToDoubleValueConversionRatio) { + this.valueIteratedTo = valueIteratedTo; + this.valueIteratedFrom = valueIteratedFrom; + this.countAtValueIteratedTo = countAtValueIteratedTo; + this.countAddedInThisIterationStep = countInThisIterationStep; + this.totalCountToThisValue = totalCountToThisValue; + this.totalValueToThisValue = totalValueToThisValue; + this.percentile = percentile; + this.percentileLevelIteratedTo = percentileLevelIteratedTo; + this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; + } + + void reset() { + this.valueIteratedTo = 0; + this.valueIteratedFrom = 0; + this.countAtValueIteratedTo = 0; + this.countAddedInThisIterationStep = 0; + this.totalCountToThisValue = 0; + this.totalValueToThisValue = 0; + this.percentile = 0.0; + this.percentileLevelIteratedTo = 0.0; + } + + HistogramIterationValue() { + } + + public String toString() { + return "valueIteratedTo:" + valueIteratedTo + + ", prevValueIteratedTo:" + valueIteratedFrom + + ", countAtValueIteratedTo:" + countAtValueIteratedTo + + ", countAddedInThisIterationStep:" + countAddedInThisIterationStep + + ", totalCountToThisValue:" + totalCountToThisValue + + ", totalValueToThisValue:" + totalValueToThisValue + + ", percentile:" + percentile + + ", percentileLevelIteratedTo:" + percentileLevelIteratedTo; + } + + public long getValueIteratedTo() { + return valueIteratedTo; + } + + public double getDoubleValueIteratedTo() { + return valueIteratedTo * integerToDoubleValueConversionRatio; + } + + public long getValueIteratedFrom() { + return valueIteratedFrom; + } + + public double getDoubleValueIteratedFrom() { + return valueIteratedFrom * integerToDoubleValueConversionRatio; + } + + public long getCountAtValueIteratedTo() { + return countAtValueIteratedTo; + } + + public long getCountAddedInThisIterationStep() { + return countAddedInThisIterationStep; + } + + public long getTotalCountToThisValue() { + return totalCountToThisValue; + } + + public long getTotalValueToThisValue() { + return totalValueToThisValue; + } + + public double getPercentile() { + return percentile; + } + + public double getPercentileLevelIteratedTo() { + return percentileLevelIteratedTo; + } + + public double getIntegerToDoubleValueConversionRatio() { return integerToDoubleValueConversionRatio; } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogProcessor.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogProcessor.java new file mode 100644 index 000000000..e0d652e36 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogProcessor.java @@ -0,0 +1,534 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.PrintStream; +import java.util.*; + +/** + * {@link HistogramLogProcessor} will process an input log and + * [can] generate two separate log files from a single histogram log file: a + * sequential interval log file and a histogram percentile distribution log file. + *

+ * The sequential interval log file logs a single stats summary line for + * each reporting interval. + *

+ * The histogram percentile distribution log file includes a detailed percentiles + * and fine grained distribution of the entire log file range processed. + *

+ * HistogramLogProcessor will process an input log file when provided with + * the {@code -i } option. When no -i option is provided, standard input + * will be processed. + *

+ * When provided with an output file name {@code } with the -o option + * (e.g. "-o mylog"), HistogramLogProcessor will produce both output files + * under the names {@code } and {@code .hgrm} (e.g. mylog and mylog.hgrm). + *

+ * When not provided with an output file name, HistogramLogProcessor will + * produce [only] the histogram percentile distribution log output to + * standard output. + *

+ * By default, HistogramLogProcessor only processes hlog file lines lines + * with no tag specified [aka "default tagged" lines]. An optional -tag + * parameter can be used to process lines of a [single] specific tag. The + * -listtags option can be used to list all the tags found in the input file. + *

+ * HistogramLogProcessor accepts optional -start and -end time range + * parameters. When provided, the output will only reflect the portion + * of the input log with timestamps that fall within the provided start + * and end time range parameters. + *

+ * HistogramLogProcessor also accepts and optional -csv parameter, which + * will cause the output formatting (of both output file forms) to use + * a CSV file format. + */ +public class HistogramLogProcessor extends Thread { + + private final HistogramLogProcessorConfiguration config; + + private HistogramLogReader logReader; + + private static class HistogramLogProcessorConfiguration { + boolean verbose = false; + String outputFileName = null; + String inputFileName = null; + String tag = null; + + double rangeStartTimeSec = 0.0; + double rangeEndTimeSec = Double.MAX_VALUE; + + boolean logFormatCsv = false; + boolean listTags = false; + boolean allTags = false; + + boolean movingWindow = false; + double movingWindowPercentileToReport = 99.0; + long movingWindowLengthInMsec = 60000; // 1 minute + + int percentilesOutputTicksPerHalf = 5; + Double outputValueUnitRatio = 1000000.0; // default to msec units for output. + + double expectedIntervalForCoordinatedOmissionCorrection = 0.0; + + String errorMessage = ""; + + HistogramLogProcessorConfiguration(final String[] args) { + boolean askedForHelp= false; + try { + for (int i = 0; i < args.length; ++i) { + if (args[i].equals("-csv")) { + logFormatCsv = true; + } else if (args[i].equals("-v")) { + verbose = true; + } else if (args[i].equals("-listtags")) { + listTags = true; + } else if (args[i].equals("-alltags")) { + allTags = true; + } else if (args[i].equals("-i")) { + inputFileName = args[++i]; // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-tag")) { + tag = args[++i]; // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-mwp")) { + movingWindowPercentileToReport = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds] + movingWindow = true; + } else if (args[i].equals("-mwpl")) { + movingWindowLengthInMsec = Long.parseLong(args[++i]); // lgtm [java/index-out-of-bounds] + movingWindow = true; + } else if (args[i].equals("-start")) { + rangeStartTimeSec = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-end")) { + rangeEndTimeSec = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-o")) { + outputFileName = args[++i]; // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-percentilesOutputTicksPerHalf")) { + percentilesOutputTicksPerHalf = Integer.parseInt(args[++i]); // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-outputValueUnitRatio")) { + outputValueUnitRatio = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-correctLogWithKnownCoordinatedOmission")) { + expectedIntervalForCoordinatedOmissionCorrection = + Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds] + } else if (args[i].equals("-h")) { + askedForHelp = true; + throw new Exception("Help: " + args[i]); + } else { + throw new Exception("Invalid args: " + args[i]); + } + } + } catch (Exception e) { + errorMessage = "Error: Histogram Log Processor launched with the following args:\n"; + + for (String arg : args) { + errorMessage += arg + " "; + } + if (!askedForHelp) { + errorMessage += "\nWhich was parsed as an error, indicated by the following exception:\n" + e; + System.err.println(errorMessage); + } + + final String validArgs = + "\"[-csv] [-v] [-i inputFileName] [-o outputFileName] [-tag tag] " + + "[-start rangeStartTimeSec] [-end rangeEndTimeSec] " + + "[-outputValueUnitRatio r] [-correctLogWithKnownCoordinatedOmission i] [-listtags]"; + + System.err.println("valid arguments = " + validArgs); + + System.err.println( + " [-h] help\n" + + " [-v] Provide verbose error output\n" + + " [-csv] Use CSV format for output log files\n" + + " [-i logFileName] File name of Histogram Log to process (default is standard input)\n" + + " [-o outputFileName] File name to output to (default is standard output)\n" + + " [-tag tag] The tag (default no tag) of the histogram lines to be processed\n" + + " [-start rangeStartTimeSec] The start time for the range in the file, in seconds (default 0.0)\n" + + " [-end rangeEndTimeSec] The end time for the range in the file, in seconds (default is infinite)\n" + + " [-outputValueUnitRatio r] The scaling factor by which to divide histogram recorded values units\n" + + " in output. [default = 1000000.0 (1 msec in nsec)]\n" + + " [-correctLogWithKnownCoordinatedOmission i] When the supplied expected interval i is than 0, performs coordinated\n" + + " omission correction on the input log's interval histograms by adding\n" + + " missing values as appropriate based on the supplied expected interval\n" + + " value i (in whatever units the log histograms were recorded with). This\n" + + " feature should only be used when the input log is known to have been\n" + + " recorded with coordinated omissions, and when an expected interval is known.\n" + + " [-listtags] list all tags found on histogram lines the input file." + ); + System.exit(1); + } + } + } + + private void outputTimeRange(final PrintStream log, final String title) { + log.format(Locale.US, "#[%s between %.3f and", title, config.rangeStartTimeSec); + if (config.rangeEndTimeSec < Double.MAX_VALUE) { + log.format(" %.3f", config.rangeEndTimeSec); + } else { + log.format(" %s", ""); + } + log.format(" seconds (relative to StartTime)]\n"); + } + + private void outputStartTime(final PrintStream log, final Double startTime) { + log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n", + startTime, (new Date((long) (startTime * 1000))).toString()); + } + + EncodableHistogram copyCorrectedForCoordinatedOmission(final EncodableHistogram inputHistogram) { + EncodableHistogram histogram = inputHistogram; + if (histogram instanceof DoubleHistogram) { + if (config.expectedIntervalForCoordinatedOmissionCorrection > 0.0) { + histogram = ((DoubleHistogram) histogram).copyCorrectedForCoordinatedOmission( + config.expectedIntervalForCoordinatedOmissionCorrection); + } + } else if (histogram instanceof Histogram) { + long expectedInterval = (long) config.expectedIntervalForCoordinatedOmissionCorrection; + if (expectedInterval > 0) { + histogram = ((Histogram) histogram).copyCorrectedForCoordinatedOmission(expectedInterval); + } + } + return histogram; + } + + private int lineNumber = 0; + + private EncodableHistogram getIntervalHistogram() { + EncodableHistogram histogram = null; + try { + histogram = logReader.nextIntervalHistogram(config.rangeStartTimeSec, config.rangeEndTimeSec); + if (config.expectedIntervalForCoordinatedOmissionCorrection > 0.0) { + // Apply Coordinated Omission correction to log histograms when arguments indicate that + // such correction is desired, and an expected interval is provided. + histogram = copyCorrectedForCoordinatedOmission(histogram); + } + } catch (RuntimeException ex) { + System.err.println("Log file parsing error at line number " + lineNumber + + ": line appears to be malformed."); + if (config.verbose) { + throw ex; + } else { + System.exit(1); + } + } + lineNumber++; + return histogram; + } + + private EncodableHistogram getIntervalHistogram(String tag) { + EncodableHistogram histogram; + if (tag == null) { + do { + histogram = getIntervalHistogram(); + } while ((histogram != null) && histogram.getTag() != null); + } else { + do { + histogram = getIntervalHistogram(); + } while ((histogram != null) && !tag.equals(histogram.getTag())); + } + return histogram; + } + + /** + * Run the log processor with the currently provided arguments. + */ + @Override + public void run() { + PrintStream timeIntervalLog = null; + PrintStream movingWindowLog = null; + PrintStream histogramPercentileLog = System.out; + double firstStartTime = 0.0; + boolean timeIntervalLogLegendWritten = false; + boolean movingWindowLogLegendWritten = false; + + Queue movingWindowQueue = new LinkedList(); + + if (config.listTags) { + Set tags = new TreeSet(); + EncodableHistogram histogram; + boolean nullTagFound = false; + while ((histogram = getIntervalHistogram()) != null) { + String tag = histogram.getTag(); + if (tag != null) { + tags.add(histogram.getTag()); + } else { + nullTagFound = true; + } + } + System.out.println("Tags found in input file:"); + if (nullTagFound) { + System.out.println("[NO TAG (default)]"); + } + for (String tag : tags) { + System.out.println(tag); + } + // listtags does nothing other than list tags: + return; + } + + final String logFormat; + final String movingWindowLogFormat; + if (config.logFormatCsv) { + logFormat = "%.3f,%d,%.3f,%.3f,%.3f,%d,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f\n"; + movingWindowLogFormat = "%.3f,%d,%.3f,%.3f\n"; + } else { + logFormat = "%4.3f: I:%d ( %7.3f %7.3f %7.3f ) T:%d ( %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f )\n"; + movingWindowLogFormat = "%4.3f: I:%d P:%7.3f M:%7.3f\n"; + } + + try { + if (config.outputFileName != null) { + try { + timeIntervalLog = new PrintStream(new FileOutputStream(config.outputFileName), false); + outputTimeRange(timeIntervalLog, "Interval percentile log"); + } catch (FileNotFoundException ex) { + System.err.println("Failed to open output file " + config.outputFileName); + } + String hgrmOutputFileName = config.outputFileName + ".hgrm"; + try { + histogramPercentileLog = new PrintStream(new FileOutputStream(hgrmOutputFileName), false); + outputTimeRange(histogramPercentileLog, "Overall percentile distribution"); + } catch (FileNotFoundException ex) { + System.err.println("Failed to open percentiles histogram output file " + hgrmOutputFileName); + } + if (config.movingWindow) { + String movingWindowOutputFileName = config.outputFileName + ".mwp"; + try { + movingWindowLog = new PrintStream(new FileOutputStream(movingWindowOutputFileName), false); + outputTimeRange(movingWindowLog, "Moving window log for " + + config.movingWindowPercentileToReport + " percentile"); + } catch (FileNotFoundException ex) { + System.err.println("Failed to open moving window output file " + movingWindowOutputFileName); + } + } + } + + EncodableHistogram intervalHistogram = getIntervalHistogram(config.tag); + boolean logUsesDoubleHistograms = (intervalHistogram instanceof DoubleHistogram); + + Histogram accumulatedRegularHistogram = logUsesDoubleHistograms ? + new Histogram(3) : + ((Histogram) intervalHistogram).copy(); + accumulatedRegularHistogram.reset(); + accumulatedRegularHistogram.setAutoResize(true); + + DoubleHistogram accumulatedDoubleHistogram = logUsesDoubleHistograms ? + ((DoubleHistogram) intervalHistogram).copy() : + new DoubleHistogram(3); + accumulatedDoubleHistogram.reset(); + accumulatedDoubleHistogram.setAutoResize(true); + + + EncodableHistogram movingWindowSumHistogram = logUsesDoubleHistograms ? + new DoubleHistogram(3) : + new Histogram(3); + + + while (intervalHistogram != null) { + + // handle accumulated histogram: + if (intervalHistogram instanceof DoubleHistogram) { + if (!logUsesDoubleHistograms) { + throw new IllegalStateException("Encountered a DoubleHistogram line in a log of Histograms."); + } + accumulatedDoubleHistogram.add((DoubleHistogram) intervalHistogram); + } else { + if (logUsesDoubleHistograms) { + throw new IllegalStateException("Encountered a Histogram line in a log of DoubleHistograms."); + } + accumulatedRegularHistogram.add((Histogram) intervalHistogram); + } + + long windowCutOffTimeStamp = intervalHistogram.getEndTimeStamp() - config.movingWindowLengthInMsec; + // handle moving window: + if (config.movingWindow) { + // Add the current interval histogram to the moving window sums: + if ((movingWindowSumHistogram instanceof DoubleHistogram) && + (intervalHistogram instanceof DoubleHistogram)){ + ((DoubleHistogram) movingWindowSumHistogram).add((DoubleHistogram) intervalHistogram); + } else if ((movingWindowSumHistogram instanceof Histogram) && + (intervalHistogram instanceof Histogram)){ + ((Histogram) movingWindowSumHistogram).add((Histogram) intervalHistogram); + } + // Remove previous, now-out-of-window interval histograms from moving window: + EncodableHistogram head; + while (((head = movingWindowQueue.peek()) != null) && + (head.getEndTimeStamp() <= windowCutOffTimeStamp)) { + EncodableHistogram prevHist = movingWindowQueue.remove(); + if (movingWindowSumHistogram instanceof DoubleHistogram) { + if (prevHist != null) { + ((DoubleHistogram) movingWindowSumHistogram).subtract((DoubleHistogram) prevHist); + } + } else if (movingWindowSumHistogram instanceof Histogram) { + if (prevHist != null) { + ((Histogram) movingWindowSumHistogram).subtract((Histogram) prevHist); + } + } + } + // Add interval histogram to moving window previous intervals memory: + movingWindowQueue.add(intervalHistogram); + } + + if ((firstStartTime == 0.0) && (logReader.getStartTimeSec() != 0.0)) { + firstStartTime = logReader.getStartTimeSec(); + + outputStartTime(histogramPercentileLog, firstStartTime); + + if (timeIntervalLog != null) { + outputStartTime(timeIntervalLog, firstStartTime); + } + } + + if (timeIntervalLog != null) { + if (!timeIntervalLogLegendWritten) { + timeIntervalLogLegendWritten = true; + if (config.logFormatCsv) { + timeIntervalLog.println("\"Timestamp\",\"Int_Count\",\"Int_50%\",\"Int_90%\",\"Int_Max\",\"Total_Count\"," + + "\"Total_50%\",\"Total_90%\",\"Total_99%\",\"Total_99.9%\",\"Total_99.99%\",\"Total_Max\""); + } else { + timeIntervalLog.println("Time: IntervalPercentiles:count ( 50% 90% Max ) TotalPercentiles:count ( 50% 90% 99% 99.9% 99.99% Max )"); + } + } + + if (logUsesDoubleHistograms) { + timeIntervalLog.format(Locale.US, logFormat, + ((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()), + // values recorded during the last reporting interval + ((DoubleHistogram) intervalHistogram).getTotalCount(), + ((DoubleHistogram) intervalHistogram).getValueAtPercentile(50.0) / config.outputValueUnitRatio, + ((DoubleHistogram) intervalHistogram).getValueAtPercentile(90.0) / config.outputValueUnitRatio, + ((DoubleHistogram) intervalHistogram).getMaxValue() / config.outputValueUnitRatio, + // values recorded from the beginning until now + accumulatedDoubleHistogram.getTotalCount(), + accumulatedDoubleHistogram.getValueAtPercentile(50.0) / config.outputValueUnitRatio, + accumulatedDoubleHistogram.getValueAtPercentile(90.0) / config.outputValueUnitRatio, + accumulatedDoubleHistogram.getValueAtPercentile(99.0) / config.outputValueUnitRatio, + accumulatedDoubleHistogram.getValueAtPercentile(99.9) / config.outputValueUnitRatio, + accumulatedDoubleHistogram.getValueAtPercentile(99.99) / config.outputValueUnitRatio, + accumulatedDoubleHistogram.getMaxValue() / config.outputValueUnitRatio + ); + } else { + timeIntervalLog.format(Locale.US, logFormat, + ((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()), + // values recorded during the last reporting interval + ((Histogram) intervalHistogram).getTotalCount(), + ((Histogram) intervalHistogram).getValueAtPercentile(50.0) / config.outputValueUnitRatio, + ((Histogram) intervalHistogram).getValueAtPercentile(90.0) / config.outputValueUnitRatio, + ((Histogram) intervalHistogram).getMaxValue() / config.outputValueUnitRatio, + // values recorded from the beginning until now + accumulatedRegularHistogram.getTotalCount(), + accumulatedRegularHistogram.getValueAtPercentile(50.0) / config.outputValueUnitRatio, + accumulatedRegularHistogram.getValueAtPercentile(90.0) / config.outputValueUnitRatio, + accumulatedRegularHistogram.getValueAtPercentile(99.0) / config.outputValueUnitRatio, + accumulatedRegularHistogram.getValueAtPercentile(99.9) / config.outputValueUnitRatio, + accumulatedRegularHistogram.getValueAtPercentile(99.99) / config.outputValueUnitRatio, + accumulatedRegularHistogram.getMaxValue() / config.outputValueUnitRatio + ); + } + } + + if (movingWindowLog != null) { + if (!movingWindowLogLegendWritten) { + movingWindowLogLegendWritten = true; + if (config.logFormatCsv) { + movingWindowLog.println("\"Timestamp\",\"Window_Count\",\"" + + config.movingWindowPercentileToReport +"%'ile\",\"Max\""); + } else { + movingWindowLog.println("Time: WindowCount " + config.movingWindowPercentileToReport + "%'ile Max"); + } + } + if (intervalHistogram instanceof DoubleHistogram) { + movingWindowLog.format(Locale.US, movingWindowLogFormat, + ((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()), + // values recorded during the last reporting interval + ((DoubleHistogram) movingWindowSumHistogram).getTotalCount(), + ((DoubleHistogram) movingWindowSumHistogram).getValueAtPercentile(config.movingWindowPercentileToReport) / config.outputValueUnitRatio, + ((DoubleHistogram) movingWindowSumHistogram).getMaxValue() / config.outputValueUnitRatio + ); + } else { + movingWindowLog.format(Locale.US, movingWindowLogFormat, + ((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()), + // values recorded during the last reporting interval + ((Histogram) movingWindowSumHistogram).getTotalCount(), + ((Histogram) movingWindowSumHistogram).getValueAtPercentile(config.movingWindowPercentileToReport) / config.outputValueUnitRatio, + ((Histogram) movingWindowSumHistogram).getMaxValue() / config.outputValueUnitRatio + ); + } + + } + + intervalHistogram = getIntervalHistogram(config.tag); + } + + if (logUsesDoubleHistograms) { + accumulatedDoubleHistogram.outputPercentileDistribution(histogramPercentileLog, + config.percentilesOutputTicksPerHalf, config.outputValueUnitRatio, config.logFormatCsv); + } else { + accumulatedRegularHistogram.outputPercentileDistribution(histogramPercentileLog, + config.percentilesOutputTicksPerHalf, config.outputValueUnitRatio, config.logFormatCsv); + } + } finally { + if (timeIntervalLog != null) { + timeIntervalLog.close(); + } + if (movingWindowLog != null) { + movingWindowLog.close(); + } + if (histogramPercentileLog != System.out) { + histogramPercentileLog.close(); + } + } + } + + /** + * Construct a {@link HistogramLogProcessor} with the given arguments + * (provided in command line style). + *

+     * [-h]                                                        help
+     * [-csv]                                                      Use CSV format for output log files
+     * [-i logFileName]                                            File name of Histogram Log to process (default is standard input)
+     * [-o outputFileName]                                         File name to output to (default is standard output)
+     *                                                             (will replace occurrences of %pid and %date with appropriate information)
+     * [-tag tag]                                                  The tag (default no tag) of the histogram lines to be processed\n
+     * [-start rangeStartTimeSec]                                  The start time for the range in the file, in seconds (default 0.0)
+     * [-end rangeEndTimeSec]                                      The end time for the range in the file, in seconds (default is infinite)
+     * [-correctLogWithKnownCoordinatedOmission expectedInterval]  When the supplied expected interval i is than 0, performs coordinated
+     *                                                             omission correction on the input log's interval histograms by adding
+     *                                                             missing values as appropriate based on the supplied expected interval
+     *                                                             value i (in whatever units the log histograms were recorded with). This
+     *                                                             feature should only be used when the input log is known to have been
+     *                                                             recorded with coordinated omissions, and when an expected interval is known.
+     * [-outputValueUnitRatio r]                                   The scaling factor by which to divide histogram recorded values units
+     *                                                             in output. [default = 1000000.0 (1 msec in nsec)]"
+     * 
+ * @param args command line arguments + * @throws FileNotFoundException if specified input file is not found + */ + public HistogramLogProcessor(final String[] args) throws FileNotFoundException { + this.setName("HistogramLogProcessor"); + config = new HistogramLogProcessorConfiguration(args); + if (config.inputFileName != null) { + logReader = new HistogramLogReader(config.inputFileName); + } else { + logReader = new HistogramLogReader(System.in); + } + } + + /** + * main() method. + * + * @param args command line arguments + */ + public static void main(final String[] args) { + final HistogramLogProcessor processor; + try { + processor = new HistogramLogProcessor(args); + processor.start(); + } catch (FileNotFoundException ex) { + System.err.println("failed to open input file."); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogReader.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogReader.java new file mode 100644 index 000000000..7e9bb8e12 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogReader.java @@ -0,0 +1,310 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.util.zip.DataFormatException; + +/** + * A histogram log reader. + *

+ * Histogram logs are used to capture full fidelity, per-time-interval + * histograms of a recorded value. + *

+ * For example, a histogram log can be used to capture high fidelity + * reaction-time logs for some measured system or subsystem component. + * Such a log would capture a full reaction time histogram for each + * logged interval, and could be used to later reconstruct a full + * HdrHistogram of the measured reaction time behavior for any arbitrary + * time range within the log, by adding [only] the relevant interval + * histograms. + *

Histogram log format:

+ * A histogram log file consists of text lines. Lines beginning with + * the "#" character are optional and treated as comments. Lines + * containing the legend (starting with "Timestamp") are also optional + * and ignored in parsing the histogram log. All other lines must + * be valid interval description lines. Text fields are delimited by + * commas, spaces. + *

+ * A valid interval description line contains an optional Tag=tagString + * text field, followed by an interval description. + *

+ * A valid interval description must contain exactly four text fields: + *

    + *
  • StartTimestamp: The first field must contain a number parse-able as a Double value, + * representing the start timestamp of the interval in seconds.
  • + *
  • intervalLength: The second field must contain a number parse-able as a Double value, + * representing the length of the interval in seconds.
  • + *
  • Interval_Max: The third field must contain a number parse-able as a Double value, + * which generally represents the maximum value of the interval histogram.
  • + *
  • Interval_Compressed_Histogram: The fourth field must contain a text field + * parse-able as a Base64 text representation of a compressed HdrHistogram.
  • + *
+ * The log file may contain an optional indication of a starting time. Starting time + * is indicated using a special comments starting with "#[StartTime: " and followed + * by a number parse-able as a double, representing the start time (in seconds) + * that may be added to timestamps in the file to determine an absolute + * timestamp (e.g. since the epoch) for each interval. + */ +public class HistogramLogReader implements Closeable { + + private final HistogramLogScanner scanner; + private final HistogramLogScanner.EventHandler handler = new HistogramLogScanner.EventHandler() { + @Override + public boolean onComment(String comment) + { + return false; + } + + @Override + public boolean onBaseTime(double secondsSinceEpoch) + { + baseTimeSec = secondsSinceEpoch; // base time represented as seconds since epoch + observedBaseTime = true; + return false; + } + + @Override + public boolean onStartTime(double secondsSinceEpoch) + { + startTimeSec = secondsSinceEpoch; // start time represented as seconds since epoch + observedStartTime = true; + return false; + } + + @Override + public boolean onHistogram(String tag, double timestamp, double length, + HistogramLogScanner.EncodableHistogramSupplier lazyReader) { + final double logTimeStampInSec = timestamp; // Timestamp is expected to be in seconds + + if (!observedStartTime) { + // No explicit start time noted. Use 1st observed time: + startTimeSec = logTimeStampInSec; + observedStartTime = true; + } + if (!observedBaseTime) { + // No explicit base time noted. Deduce from 1st observed time (compared to start time): + if (logTimeStampInSec < startTimeSec - (365 * 24 * 3600.0)) { + // Criteria Note: if log timestamp is more than a year in the past (compared to + // StartTime), we assume that timestamps in the log are not absolute + baseTimeSec = startTimeSec; + } else { + // Timestamps are absolute + baseTimeSec = 0.0; + } + observedBaseTime = true; + } + + final double absoluteStartTimeStampSec = logTimeStampInSec + baseTimeSec; + final double offsetStartTimeStampSec = absoluteStartTimeStampSec - startTimeSec; + + final double intervalLengthSec = length; // Timestamp length is expect to be in seconds + final double absoluteEndTimeStampSec = absoluteStartTimeStampSec + intervalLengthSec; + + final double startTimeStampToCheckRangeOn = absolute ? absoluteStartTimeStampSec : offsetStartTimeStampSec; + + if (startTimeStampToCheckRangeOn < rangeStartTimeSec) { + // keep on trucking + return false; + } + + if (startTimeStampToCheckRangeOn > rangeEndTimeSec) { + // after limit we stop on each line + return true; + } + EncodableHistogram histogram; + try { + histogram = lazyReader.read(); + } catch (DataFormatException e) { + // stop after exception + return true; + } + + histogram.setStartTimeStamp((long) (absoluteStartTimeStampSec * 1000.0)); + histogram.setEndTimeStamp((long) (absoluteEndTimeStampSec * 1000.0)); + histogram.setTag(tag); + nextHistogram = histogram; + return true; + } + + @Override + public boolean onException(Throwable t) { + + // We ignore NoSuchElementException, but stop processing. + // Next call to nextIntervalHistogram may return null. + if (t instanceof java.util.NoSuchElementException){ + return true; + } + // rethrow + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } else { + throw new RuntimeException(t); + } + } + }; + + private double startTimeSec = 0.0; + private boolean observedStartTime = false; + private double baseTimeSec = 0.0; + private boolean observedBaseTime = false; + + // scanner handling state + private boolean absolute; + private double rangeStartTimeSec; + private double rangeEndTimeSec; + private EncodableHistogram nextHistogram; + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified file name. + * @param inputFileName The name of the file to read from + * @throws FileNotFoundException when unable to find inputFileName + */ + public HistogramLogReader(final String inputFileName) throws FileNotFoundException { + scanner = new HistogramLogScanner(new File(inputFileName)); + } + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified InputStream. + * @param inputStream The InputStream to read from + */ + public HistogramLogReader(final InputStream inputStream) { + scanner = new HistogramLogScanner(inputStream); + } + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified file. + * @param inputFile The File to read from + * @throws FileNotFoundException when unable to find inputFile + */ + public HistogramLogReader(final File inputFile) throws FileNotFoundException { + scanner = new HistogramLogScanner(inputFile); + } + + /** + * get the latest start time found in the file so far (or 0.0), + * per the log file format explained above. Assuming the "#[StartTime:" comment + * line precedes the actual intervals recorded in the file, getStartTimeSec() can + * be safely used after each interval is read to determine the offset of that + * interval's timestamp from the epoch. + * @return latest Start Time found in the file (or 0.0 if non found) + */ + public double getStartTimeSec() { + return startTimeSec; + } + + /** + * Read the next interval histogram from the log, if interval falls within a time range. + *

+ * Returns a histogram object if an interval line was found with an + * associated start timestamp value that falls between startTimeSec and + * endTimeSec, or null if no such interval line is found. Note that + * the range is assumed to be in seconds relative to the actual + * timestamp value found in each interval line in the log, and not + * in absolute time. + *

+ * Timestamps are assumed to appear in order in the log file, and as such + * this method will return a null upon encountering a timestamp larger than + * rangeEndTimeSec. + *

+ * The histogram returned will have it's timestamp set to the absolute + * timestamp calculated from adding the interval's indicated timestamp + * value to the latest [optional] start time found in the log. + *

+ * Upon encountering any unexpected format errors in reading the next + * interval from the file, this method will return a null. Use {@link #hasNext} to determine + * whether or not additional intervals may be available for reading in the log input. + * + * @param startTimeSec The (non-absolute time) start of the expected + * time range, in seconds. + * @param endTimeSec The (non-absolute time) end of the expected time + * range, in seconds. + * @return a histogram, or a null if no appropriate interval found + */ + public EncodableHistogram nextIntervalHistogram(final double startTimeSec, + final double endTimeSec) { + return nextIntervalHistogram(startTimeSec, endTimeSec, false); + } + + /** + * Read the next interval histogram from the log, if interval falls within an absolute time range + *

+ * Returns a histogram object if an interval line was found with an + * associated absolute start timestamp value that falls between + * absoluteStartTimeSec and absoluteEndTimeSec, or null if no such + * interval line is found. + *

+ * Timestamps are assumed to appear in order in the log file, and as such + * this method will return a null upon encountering a timestamp larger than + * rangeEndTimeSec. + *

+ * The histogram returned will have it's timestamp set to the absolute + * timestamp calculated from adding the interval's indicated timestamp + * value to the latest [optional] start time found in the log. + *

+ * Absolute timestamps are calculated by adding the timestamp found + * with the recorded interval to the [latest, optional] start time + * found in the log. The start time is indicated in the log with + * a "#[StartTime: " followed by the start time in seconds. + *

+ * Upon encountering any unexpected format errors in reading the next + * interval from the file, this method will return a null. Use {@link #hasNext} to determine + * whether or not additional intervals may be available for reading in the log input. + * + * @param absoluteStartTimeSec The (absolute time) start of the expected + * time range, in seconds. + * @param absoluteEndTimeSec The (absolute time) end of the expected + * time range, in seconds. + * @return A histogram, or a null if no appropriate interval found + */ + public EncodableHistogram nextAbsoluteIntervalHistogram(final double absoluteStartTimeSec, + final double absoluteEndTimeSec) { + return nextIntervalHistogram(absoluteStartTimeSec, absoluteEndTimeSec, true); + } + + + /** + * Read the next interval histogram from the log. Returns a Histogram object if + * an interval line was found, or null if not. + *

Upon encountering any unexpected format errors in reading the next interval + * from the input, this method will return a null. Use {@link #hasNext} to determine + * whether or not additional intervals may be available for reading in the log input. + * @return a DecodedInterval, or a null if no appropriately formatted interval was found + */ + public EncodableHistogram nextIntervalHistogram() { + return nextIntervalHistogram(0.0, Long.MAX_VALUE * 1.0, true); + } + + private EncodableHistogram nextIntervalHistogram(final double rangeStartTimeSec, + final double rangeEndTimeSec, boolean absolute) { + this.rangeStartTimeSec = rangeStartTimeSec; + this.rangeEndTimeSec = rangeEndTimeSec; + this.absolute = absolute; + scanner.process(handler); + EncodableHistogram histogram = this.nextHistogram; + nextHistogram = null; + return histogram; + } + + /** + * Indicates whether or not additional intervals may exist in the log + * @return true if additional intervals may exist in the log + */ + public boolean hasNext() { + return scanner.hasNextLine(); + } + + @Override + public void close() + { + scanner.close(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogScanner.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogScanner.java new file mode 100644 index 000000000..6bcc7f3d1 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogScanner.java @@ -0,0 +1,201 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.Locale; +import java.util.Scanner; +import java.util.zip.DataFormatException; + +public class HistogramLogScanner implements Closeable { + + // can't use lambdas, and anyway we need to let the handler take the exception + public interface EncodableHistogramSupplier { + EncodableHistogram read() throws DataFormatException; + } + + /** + * Handles log events, return true to stop processing. + */ + public interface EventHandler { + boolean onComment(String comment); + boolean onBaseTime(double secondsSinceEpoch); + boolean onStartTime(double secondsSinceEpoch); + + /** + * A lazy reader is provided to allow fast skipping of bulk of work where tag or timestamp are to be used as + * a basis for filtering the {@link EncodableHistogram} anyway. The reader is to be called only once. + * + * @param tag histogram tag or null if none exist + * @param timestamp logged timestamp + * @param length logged interval length + * @param lazyReader to be called if the histogram needs to be deserialized, given the tag/timestamp etc. + * @return true to stop processing, false to continue. + */ + boolean onHistogram(String tag, double timestamp, double length, EncodableHistogramSupplier lazyReader); + boolean onException(Throwable t); + } + + private static class LazyHistogramReader implements EncodableHistogramSupplier { + + private final Scanner scanner; + private boolean gotIt = true; + + private LazyHistogramReader(Scanner scanner) + { + this.scanner = scanner; + } + + private void allowGet() + { + gotIt = false; + } + + @Override + public EncodableHistogram read() throws DataFormatException + { + // prevent double calls to this method + if (gotIt) { + throw new IllegalStateException(); + } + gotIt = true; + + final String compressedPayloadString = scanner.next(); + final ByteBuffer buffer = ByteBuffer.wrap(Base64Helper.parseBase64Binary(compressedPayloadString)); + + EncodableHistogram histogram = EncodableHistogram.decodeFromCompressedByteBuffer(buffer, 0); + + return histogram; + } + } + + private final LazyHistogramReader lazyReader; + protected final Scanner scanner; + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified file name. + * @param inputFileName The name of the file to read from + * @throws FileNotFoundException when unable to find inputFileName + */ + public HistogramLogScanner(final String inputFileName) throws FileNotFoundException { + this(new Scanner(new File(inputFileName))); + } + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified InputStream. Note that + * log readers constructed through this constructor do not assume ownership of stream and will not close it on + * {@link #close()}. + * + * @param inputStream The InputStream to read from + */ + public HistogramLogScanner(final InputStream inputStream) { + this(new Scanner(inputStream)); + } + + /** + * Constructs a new HistogramLogReader that produces intervals read from the specified file. + * @param inputFile The File to read from + * @throws FileNotFoundException when unable to find inputFile + */ + public HistogramLogScanner(final File inputFile) throws FileNotFoundException { + this(new Scanner(inputFile)); + } + + private HistogramLogScanner(Scanner scanner) + { + this.scanner = scanner; + this.lazyReader = new LazyHistogramReader(scanner); + initScanner(); + } + + private void initScanner() { + scanner.useLocale(Locale.US); + scanner.useDelimiter("[ ,\\r\\n]"); + } + + /** + * Close underlying scanner. + */ + @Override + public void close() + { + scanner.close(); + } + + public void process(EventHandler handler) { + while (scanner.hasNextLine()) { + try { + if (scanner.hasNext("\\#.*")) { + // comment line. + // Look for explicit start time or base time notes in comments: + if (scanner.hasNext("#\\[StartTime:")) { + scanner.next("#\\[StartTime:"); + if (scanner.hasNextDouble()) { + double startTimeSec = scanner.nextDouble(); // start time represented as seconds since epoch + if (handler.onStartTime(startTimeSec)) { + return; + } + } + } else if (scanner.hasNext("#\\[BaseTime:")) { + scanner.next("#\\[BaseTime:"); + if (scanner.hasNextDouble()) { + double baseTimeSec = scanner.nextDouble(); // base time represented as seconds since epoch + if (handler.onBaseTime(baseTimeSec)) + { + return; + } + } + } else if (handler.onComment(scanner.next("\\#.*"))) { + return; + } + continue; + } + + if (scanner.hasNext("\"StartTimestamp\".*")) { + // Legend line + continue; + } + + String tagString = null; + if (scanner.hasNext("Tag\\=.*")) { + tagString = scanner.next("Tag\\=.*").substring(4); + } + + // Decode: startTimestamp, intervalLength, maxTime, histogramPayload + final double logTimeStampInSec = scanner.nextDouble(); // Timestamp is expected to be in seconds + final double intervalLengthSec = scanner.nextDouble(); // Timestamp length is expect to be in seconds + scanner.nextDouble(); // Skip maxTime field, as max time can be deduced from the histogram. + + lazyReader.allowGet(); + if (handler.onHistogram(tagString, logTimeStampInSec, intervalLengthSec, lazyReader)) { + return; + } + + } catch (Throwable ex) { + if (handler.onException(ex)) { + return; + } + } finally { + scanner.nextLine(); // Move to next line. + } + } + } + + /** + * Indicates whether or not additional intervals may exist in the log + * + * @return true if additional intervals may exist in the log + */ + public boolean hasNextLine() { + return scanner.hasNextLine(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogWriter.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogWriter.java new file mode 100644 index 000000000..f4ef8af5e --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/HistogramLogWriter.java @@ -0,0 +1,244 @@ +package io.prometheus.client.HdrHistogram; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Date; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.Deflater; + +import static java.nio.ByteOrder.BIG_ENDIAN; + +/** + * A histogram log writer. + *

+ * A Histogram logs are used to capture full fidelity, per-time-interval + * histograms of a recorded value. + *

+ * For example, a histogram log can be used to capture high fidelity + * reaction-time logs for some measured system or subsystem component. + * Such a log would capture a full reaction time histogram for each + * logged interval, and could be used to later reconstruct a full + * HdrHistogram of the measured reaction time behavior for any arbitrary + * time range within the log, by adding [only] the relevant interval + * histograms. + *

+ * This log writer will produce histogram logs that adhere to the + * histogram log format (see {{@link HistogramLogReader} for log format + * details). Optional comments, start time, legend, and format version + * can be logged. + *

+ * The log writer will use the + *

+ * By convention, it is typical for the logging application + * to use a comment to indicate the logging application at the head + * of the log, followed by the log format version, a start time, + * and a legend (in that order). + * + */ +public class HistogramLogWriter { + private static final String HISTOGRAM_LOG_FORMAT_VERSION = "1.3"; + + private static Pattern containsDelimiterPattern = Pattern.compile(".[, \\r\\n]."); + private Matcher containsDelimiterMatcher = containsDelimiterPattern.matcher(""); + + private final PrintStream log; + + private ByteBuffer targetBuffer; + + private long baseTime = 0; + + /** + * Constructs a new HistogramLogWriter around a newly created file with the specified file name. + * @param outputFileName The name of the file to create + * @throws FileNotFoundException when unable to open outputFileName + */ + public HistogramLogWriter(final String outputFileName) throws FileNotFoundException { + log = new PrintStream(outputFileName); + } + + /** + * Constructs a new HistogramLogWriter that will write into the specified file. + * @param outputFile The File to write to + * @throws FileNotFoundException when unable to open outputFile + */ + public HistogramLogWriter(final File outputFile) throws FileNotFoundException { + log = new PrintStream(outputFile); + } + + /** + * Constructs a new HistogramLogWriter that will write into the specified output stream. + * @param outputStream The OutputStream to write to + */ + public HistogramLogWriter(final OutputStream outputStream) { + log = new PrintStream(outputStream); + } + + /** + * Constructs a new HistogramLogWriter that will write into the specified print stream. + * @param printStream The PrintStream to write to + */ + public HistogramLogWriter(final PrintStream printStream) { + log = printStream; + } + + /** + * Closes the file or output stream for this log writer. + */ + public void close() { + log.close(); + } + + /** + * Output an interval histogram, with the given timestamp information and the [optional] tag + * associated with the histogram, using a configurable maxValueUnitRatio. (note that the + * specified timestamp information will be used, and the timestamp information in the actual + * histogram will be ignored). + * The max value reported with the interval line will be scaled by the given maxValueUnitRatio. + * @param startTimeStampSec The start timestamp to log with the interval histogram, in seconds. + * @param endTimeStampSec The end timestamp to log with the interval histogram, in seconds. + * @param histogram The interval histogram to log. + * @param maxValueUnitRatio The ratio by which to divide the histogram's max value when reporting on it. + */ + public synchronized void outputIntervalHistogram(final double startTimeStampSec, + final double endTimeStampSec, + final EncodableHistogram histogram, + final double maxValueUnitRatio) { + if ((targetBuffer == null) || targetBuffer.capacity() < histogram.getNeededByteBufferCapacity()) { + targetBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()).order(BIG_ENDIAN); + } + targetBuffer.clear(); + + int compressedLength = histogram.encodeIntoCompressedByteBuffer(targetBuffer, Deflater.BEST_COMPRESSION); + byte[] compressedArray = Arrays.copyOf(targetBuffer.array(), compressedLength); + + String tag = histogram.getTag(); + if (tag == null) { + log.format(Locale.US, "%.3f,%.3f,%.3f,%s\n", + startTimeStampSec, + endTimeStampSec - startTimeStampSec, + histogram.getMaxValueAsDouble() / maxValueUnitRatio, + Base64Helper.printBase64Binary(compressedArray) + ); + } else { + containsDelimiterMatcher.reset(tag); + if (containsDelimiterMatcher.matches()) { + throw new IllegalArgumentException("Tag string cannot contain commas, spaces, or line breaks"); + } + log.format(Locale.US, "Tag=%s,%.3f,%.3f,%.3f,%s\n", + tag, + startTimeStampSec, + endTimeStampSec - startTimeStampSec, + histogram.getMaxValueAsDouble() / maxValueUnitRatio, + Base64Helper.printBase64Binary(compressedArray) + ); + } + } + + /** + * Output an interval histogram, with the given timestamp information, and the [optional] tag + * associated with the histogram. (note that the specified timestamp information will be used, + * and the timestamp information in the actual histogram will be ignored). + * The max value in the histogram will be reported scaled down by a default maxValueUnitRatio of + * 1,000,000 (which is the msec : nsec ratio). Caller should use the direct form specifying + * maxValueUnitRatio some other ratio is needed for the max value output. + * @param startTimeStampSec The start timestamp to log with the interval histogram, in seconds. + * @param endTimeStampSec The end timestamp to log with the interval histogram, in seconds. + * @param histogram The interval histogram to log. + */ + public void outputIntervalHistogram(final double startTimeStampSec, + final double endTimeStampSec, + final EncodableHistogram histogram) { + outputIntervalHistogram(startTimeStampSec, endTimeStampSec, histogram, 1000000.0); + } + + /** + * Output an interval histogram, using the start/end timestamp indicated in the histogram, + * and the [optional] tag associated with the histogram. + * The histogram start and end timestamps are assumed to be in msec units. Logging will be + * in seconds, relative by a base time (if set via {@link HistogramLogWriter#setBaseTime}). + * The default base time is 0. + *

+ * By convention, histogram start/end time are generally stamped with absolute times in msec + * since the epoch. For logging with absolute time stamps, the base time would remain zero. For + * logging with relative time stamps (time since a start point), the base time should be set + * with {@link HistogramLogWriter#setBaseTime}. + *

+ * The max value in the histogram will be reported scaled down by a default maxValueUnitRatio of + * 1,000,000 (which is the msec : nsec ratio). Caller should use the direct form specifying + * maxValueUnitRatio if some other ratio is needed for the max value output. + * @param histogram The interval histogram to log. + */ + public void outputIntervalHistogram(final EncodableHistogram histogram) { + outputIntervalHistogram((histogram.getStartTimeStamp() - baseTime)/1000.0, + (histogram.getEndTimeStamp() - baseTime)/1000.0, + histogram); + } + + /** + * Log a start time in the log. + * @param startTimeMsec time (in milliseconds) since the absolute start time (the epoch) + */ + public void outputStartTime(final long startTimeMsec) { + log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n", + startTimeMsec / 1000.0, + (new Date(startTimeMsec)).toString()); + } + + + /** + * Log a base time in the log. + * @param baseTimeMsec time (in milliseconds) since the absolute start time (the epoch) + */ + public void outputBaseTime(final long baseTimeMsec) { + log.format(Locale.US, "#[BaseTime: %.3f (seconds since epoch)]\n", + baseTimeMsec/1000.0); + } + + /** + * Log a comment to the log. + * Comments will be preceded with with the '#' character. + * @param comment the comment string. + */ + public void outputComment(final String comment) { + log.format("#%s\n", comment); + } + + /** + * Output a legend line to the log. + */ + public void outputLegend() { + log.println("\"StartTimestamp\",\"Interval_Length\",\"Interval_Max\",\"Interval_Compressed_Histogram\""); + } + + /** + * Output a log format version to the log. + */ + public void outputLogFormatVersion() { + outputComment("[Histogram log format version " + HISTOGRAM_LOG_FORMAT_VERSION +"]"); + } + + /** + * Set a base time to subtract from supplied histogram start/end timestamps when + * logging based on histogram timestamps. + * Base time is expected to be in msec since the epoch, as histogram start/end times + * are typically stamped with absolute times in msec since the epoch. + * @param baseTimeMsec base time to calculate timestamp deltas from + */ + public void setBaseTime(long baseTimeMsec) { + this.baseTime = baseTimeMsec; + } + + /** + * return the current base time offset (see {@link HistogramLogWriter#setBaseTime}). + * @return the current base time + */ + public long getBaseTime() { + return baseTime; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/IntCountsHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/IntCountsHistogram.java new file mode 100644 index 000000000..a64146cc7 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/IntCountsHistogram.java @@ -0,0 +1,264 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.zip.DataFormatException; + +/** + *

A High Dynamic Range (HDR) Histogram using an int count type

+ *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class IntCountsHistogram extends AbstractHistogram { + long totalCount; + int[] counts; + int normalizingIndexOffset; + + @Override + long getCountAtIndex(final int index) { + return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]; + } + + @Override + long getCountAtNormalizedIndex(final int index) { + return counts[index]; + } + + @Override + void incrementCountAtIndex(final int index) { + int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); + int currentCount = counts[normalizedIndex]; + int newCount = currentCount + 1; + if (newCount < 0) { + throw new IllegalStateException("would overflow integer count"); + } + counts[normalizedIndex] = newCount; + } + + @Override + void addToCountAtIndex(final int index, final long value) { + int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); + + long currentCount = counts[normalizedIndex]; + long newCount = (currentCount + value); + if ((newCount < Integer.MIN_VALUE) || (newCount > Integer.MAX_VALUE)) { + throw new IllegalStateException("would overflow integer count"); + } + counts[normalizedIndex] = (int) newCount; + } + + @Override + void setCountAtIndex(int index, long value) { + setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); + } + + @Override + void setCountAtNormalizedIndex(int index, long value) { + if ((value < 0) || (value > Integer.MAX_VALUE)) { + throw new IllegalStateException("would overflow integer count"); + } + counts[index] = (int) value; + } + + @Override + int getNormalizingIndexOffset() { + return normalizingIndexOffset; + } + + @Override + void setNormalizingIndexOffset(int normalizingIndexOffset) { + this.normalizingIndexOffset = normalizingIndexOffset; + } + + + @Override + void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { + nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + } + + @Override + void shiftNormalizingIndexByOffset(int offsetToAdd, + boolean lowestHalfBucketPopulated, + double newIntegerToDoubleValueConversionRatio) { + nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated); + } + + @Override + void clearCounts() { + Arrays.fill(counts, 0); + totalCount = 0; + } + + @Override + public IntCountsHistogram copy() { + IntCountsHistogram copy = new IntCountsHistogram(this); + copy.add(this); + return copy; + } + + @Override + public IntCountsHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + IntCountsHistogram toHistogram = new IntCountsHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + public long getTotalCount() { + return totalCount; + } + + @Override + void setTotalCount(final long totalCount) { + this.totalCount = totalCount; + } + + @Override + void incrementTotalCount() { + totalCount++; + } + + @Override + void addToTotalCount(long value) { + totalCount += value; + } + + @Override + int _getEstimatedFootprintInBytes() { + return (512 + (4 * counts.length)); + } + + @Override + void resize(long newHighestTrackableValue) { + int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); + + establishSize(newHighestTrackableValue); + + int countsDelta = countsArrayLength - counts.length; + + counts = Arrays.copyOf(counts, countsArrayLength); + + if (oldNormalizedZeroIndex != 0) { + // We need to shift the stuff from the zero index and up to the end of the array: + int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta; + int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex; + System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy); + Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, 0); + } + } + + /** + * Construct an auto-resizing IntCountsHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public IntCountsHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a IntCountsHistogram given the Highest value to be tracked and a number of significant decimal digits. The + * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public IntCountsHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a IntCountsHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public IntCountsHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + counts = new int[countsArrayLength]; + wordSizeInBytes = 4; + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public IntCountsHistogram(final AbstractHistogram source) { + super(source); + counts = new int[countsArrayLength]; + wordSizeInBytes = 4; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static IntCountsHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, IntCountsHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static IntCountsHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, IntCountsHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new IntCountsHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A IntCountsHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static IntCountsHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } +} \ No newline at end of file diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LinearIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LinearIterator.java new file mode 100644 index 000000000..e777e95c1 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LinearIterator.java @@ -0,0 +1,77 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through histogram values in linear steps. The iteration is + * performed in steps of valueUnitsPerBucket in size, terminating when all recorded histogram + * values are exhausted. Note that each iteration "bucket" includes values up to and including + * the next bucket boundary value. + */ +public class LinearIterator extends AbstractHistogramIterator implements Iterator { + private long valueUnitsPerBucket; + private long currentStepHighestValueReportingLevel; + private long currentStepLowestValueReportingLevel; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. + */ + public void reset(final long valueUnitsPerBucket) { + reset(histogram, valueUnitsPerBucket); + } + + private void reset(final AbstractHistogram histogram, final long valueUnitsPerBucket) { + super.resetIterator(histogram); + this.valueUnitsPerBucket = valueUnitsPerBucket; + this.currentStepHighestValueReportingLevel = valueUnitsPerBucket - 1; + this.currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); + } + + /** + * @param histogram The histogram this iterator will operate on + * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. + */ + public LinearIterator(final AbstractHistogram histogram, final long valueUnitsPerBucket) { + reset(histogram, valueUnitsPerBucket); + } + + @Override + public boolean hasNext() { + if (super.hasNext()) { + return true; + } + // If the next iteration will not move to the next sub bucket index (which is empty if + // if we reached this point), then we are not yet done iterating (we want to iterate + // until we are no longer on a value that has a count, rather than util we first reach + // the last value that has a count. The difference is subtle but important)... + // When this is called, we're about to begin the "next" iteration, so + // currentStepHighestValueReportingLevel has already been incremented, and we use it + // without incrementing its value. + return (currentStepHighestValueReportingLevel < nextValueAtIndex); + } + + @Override + void incrementIterationLevel() { + currentStepHighestValueReportingLevel += valueUnitsPerBucket; + currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); + } + + @Override + long getValueIteratedTo() { + return currentStepHighestValueReportingLevel; + } + + @Override + boolean reachedIterationLevel() { + return ((currentValueAtIndex >= currentStepLowestValueReportingLevel) || + (currentIndex >= histogram.countsArrayLength - 1)) ; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LogarithmicIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LogarithmicIterator.java new file mode 100644 index 000000000..5a1443e06 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/LogarithmicIterator.java @@ -0,0 +1,81 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through histogram values in logarithmically increasing levels. The iteration is + * performed in steps that start at valueUnitsInFirstBucket and increase exponentially according to + * logBase, terminating when all recorded histogram values are exhausted. Note that each iteration "bucket" + * includes values up to and including the next bucket boundary value. + */ +public class LogarithmicIterator extends AbstractHistogramIterator implements Iterator { + long valueUnitsInFirstBucket; + double logBase; + double nextValueReportingLevel; + long currentStepHighestValueReportingLevel; + long currentStepLowestValueReportingLevel; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step + * @param logBase the multiplier by which the bucket size is expanded in each iteration step. + */ + public void reset(final long valueUnitsInFirstBucket, final double logBase) { + reset(histogram, valueUnitsInFirstBucket, logBase); + } + + private void reset(final AbstractHistogram histogram, final long valueUnitsInFirstBucket, final double logBase) { + super.resetIterator(histogram); + this.logBase = logBase; + this.valueUnitsInFirstBucket = valueUnitsInFirstBucket; + nextValueReportingLevel = valueUnitsInFirstBucket; + this.currentStepHighestValueReportingLevel = ((long) nextValueReportingLevel) - 1; + this.currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); + } + + /** + * @param histogram The histogram this iterator will operate on + * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step + * @param logBase the multiplier by which the bucket size is expanded in each iteration step. + */ + public LogarithmicIterator(final AbstractHistogram histogram, final long valueUnitsInFirstBucket, final double logBase) { + reset(histogram, valueUnitsInFirstBucket, logBase); + } + + @Override + public boolean hasNext() { + if (super.hasNext()) { + return true; + } + // If the next iterate will not move to the next sub bucket index (which is empty if + // if we reached this point), then we are not yet done iterating (we want to iterate + // until we are no longer on a value that has a count, rather than util we first reach + // the last value that has a count. The difference is subtle but important)... + return (histogram.lowestEquivalentValue((long) nextValueReportingLevel) < nextValueAtIndex); + } + + @Override + void incrementIterationLevel() { + nextValueReportingLevel *= logBase; + this.currentStepHighestValueReportingLevel = ((long)nextValueReportingLevel) - 1; + currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); + } + + @Override + long getValueIteratedTo() { + return currentStepHighestValueReportingLevel; + } + + @Override + boolean reachedIterationLevel() { + return ((currentValueAtIndex >= currentStepLowestValueReportingLevel) || + (currentIndex >= histogram.countsArrayLength - 1)) ; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentDoubleHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentDoubleHistogram.java new file mode 100644 index 000000000..7b31a59c0 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentDoubleHistogram.java @@ -0,0 +1,160 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

A floating point values High Dynamic Range (HDR) Histogram that uses a packed internal representation and + * supports safe concurrent recording operations.

+ *

+ * A {@link PackedConcurrentDoubleHistogram} is a variant of {@link DoubleHistogram} that guarantees + * lossless recording of values into the histogram even when the histogram is updated by multiple threads, and + * supports auto-resize and auto-ranging operations that may occur concurrently as a result of recording operations. + *

+ * {@link PackedConcurrentDoubleHistogram} tracks value counts in a packed internal representation optimized + * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. + * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked + * representations, but can incur additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + *

+ * It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe behaviors + * provided by {@link PackedConcurrentDoubleHistogram}, and that it is not otherwise synchronized. Specifically, {@link + * PackedConcurrentDoubleHistogram} provides no implicit synchronization that would prevent the contents of the histogram + * from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make + * potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or + * additions of histogram objects should either take care to externally synchronize and/or order their access, + * use the {@link DoubleRecorder} or {@link SingleWriterDoubleRecorder} which are intended for this purpose. + *

+ * {@link PackedConcurrentDoubleHistogram} supports the recording and analyzing sampled data value counts across a + * configurable dynamic range of floating point (double) values, with configurable value precision within the range. + * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram + * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, + * and provides control over value quantization behavior across the value range and the subsequent value resolution at + * any given level. + *

+ * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link + * PackedConcurrentDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is + * (optionally) specified. E.g. When a {@link PackedConcurrentDoubleHistogram} is created to track a dynamic range of + * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any + * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the + * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 + * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. + *

+ * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link + * PackedConcurrentDoubleHistogram#setAutoResize}) a {@link PackedConcurrentDoubleHistogram} will auto-resize its dynamic range to + * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take + * longer to execute, as resizing incurs allocation and copying of internal data structures. + *

+ * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of + * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either + * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have + * resulted in discarding or losing the required value precision of values already recorded in the histogram. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class PackedConcurrentDoubleHistogram extends ConcurrentDoubleHistogram { + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal + * digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public PackedConcurrentDoubleHistogram(final int numberOfSignificantValueDigits) { + this(2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio}) + * and using a precision stated as a number of significant decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { + this(highestToLowestValueRatio, numberOfSignificantValueDigits, PackedConcurrentHistogram.class); + } + + /** + * Construct a {@link PackedConcurrentDoubleHistogram} with the same range settings as a given source, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public PackedConcurrentDoubleHistogram(final DoubleHistogram source) { + super(source); + } + + PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass); + } + + PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass, + AbstractHistogram internalCountsHistogram) { + super( + highestToLowestValueRatio, + numberOfSignificantValueDigits, + internalCountsHistogramClass, + internalCountsHistogram + ); + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + */ + public static PackedConcurrentDoubleHistogram decodeFromByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) { + try { + int cookie = buffer.getInt(); + if (!isNonCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); + } + PackedConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + PackedConcurrentDoubleHistogram.class, PackedConcurrentHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } catch (DataFormatException ex) { + throw new RuntimeException(ex); + } + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static PackedConcurrentDoubleHistogram decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) throws DataFormatException { + int cookie = buffer.getInt(); + if (!isCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); + } + PackedConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + PackedConcurrentDoubleHistogram.class, PackedConcurrentHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentHistogram.java new file mode 100644 index 000000000..9b0383bfa --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedConcurrentHistogram.java @@ -0,0 +1,309 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import io.prometheus.client.HdrHistogram.packedarray.ConcurrentPackedLongArray; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

An integer values High Dynamic Range (HDR) Histogram that uses a packed internal representation + * and supports safe concurrent recording operations.

+ * A {@link PackedConcurrentHistogram} guarantees lossless recording of values into the histogram even when the + * histogram is updated by multiple threads, and supports auto-resize and shift operations that may + * result from or occur concurrently with other recording operations. + *

+ * {@link PackedConcurrentHistogram} tracks value counts in a packed internal representation optimized + * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. + * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked + * representations, but can incur additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + *

+ * It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe + * behaviors provided by {@link PackedConcurrentHistogram}, and that it is not otherwise synchronized. Specifically, + * {@link PackedConcurrentHistogram} provides no implicit synchronization that would prevent the contents of the + * histogram from changing during queries, iterations, copies, or addition operations on the histogram. Callers + * wishing to make potentially concurrent, multi-threaded updates that would safely work in the presence of + * queries, copies, or additions of histogram objects should either take care to externally synchronize and/or + * order their access, use {@link Recorder} or {@link SingleWriterRecorder} which are intended for + * this purpose. + *

+ * Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link + * Histogram#setAutoResize}) a {@link PackedConcurrentHistogram} will auto-resize its dynamic range to include recorded + * values as they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as + * resizing incurs allocation and copying of internal data structures. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class PackedConcurrentHistogram extends ConcurrentHistogram { + + @Override + ConcurrentArrayWithNormalizingOffset allocateArray(int length, int normalizingIndexOffset) { + return new ConcurrentPackedArrayWithNormalizingOffset(length, normalizingIndexOffset); + } + + @Override + void clearCounts() { + try { + wrp.readerLock(); + assert (countsArrayLength == activeCounts.length()); + assert (countsArrayLength == inactiveCounts.length()); + for (int i = 0; i < activeCounts.length(); i++) { + activeCounts.lazySet(i, 0); + inactiveCounts.lazySet(i, 0); + } + totalCountUpdater.set(this, 0); + } finally { + wrp.readerUnlock(); + } + } + + @Override + public PackedConcurrentHistogram copy() { + PackedConcurrentHistogram copy = new PackedConcurrentHistogram(this); + copy.add(this); + return copy; + } + + @Override + public PackedConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + PackedConcurrentHistogram toHistogram = new PackedConcurrentHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + public long getTotalCount() { + return totalCountUpdater.get(this); + } + + @Override + void setTotalCount(final long totalCount) { + totalCountUpdater.set(this, totalCount); + } + + @Override + void incrementTotalCount() { + totalCountUpdater.incrementAndGet(this); + } + + @Override + void addToTotalCount(final long value) { + totalCountUpdater.addAndGet(this, value); + } + + + @Override + int _getEstimatedFootprintInBytes() { + try { + wrp.readerLock(); + return 128 + activeCounts.getEstimatedFootprintInBytes() + inactiveCounts.getEstimatedFootprintInBytes(); + } finally { + wrp.readerUnlock(); + } + } + + /** + * Construct an auto-resizing ConcurrentHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedConcurrentHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a ConcurrentHistogram given the Highest value to be tracked and a number of significant decimal + * digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedConcurrentHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a ConcurrentHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, + true); + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public PackedConcurrentHistogram(final AbstractHistogram source) { + this(source, true); + } + + + PackedConcurrentHistogram(final AbstractHistogram source, boolean allocateCountsArray) { + super(source,false); + if (allocateCountsArray) { + activeCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0); + inactiveCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0); + } + wordSizeInBytes = 8; + } + + PackedConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits, boolean allocateCountsArray) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, + false); + if (allocateCountsArray) { + activeCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0); + inactiveCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0); + } + wordSizeInBytes = 8; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static PackedConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, PackedConcurrentHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static PackedConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, PackedConcurrentHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new ConcurrentHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A ConcurrentHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static PackedConcurrentHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + wrp = new WriterReaderPhaser(); + } + + @Override + synchronized void fillBufferFromCountsArray(final ByteBuffer buffer) { + try { + wrp.readerLock(); + super.fillBufferFromCountsArray(buffer); + } finally { + wrp.readerUnlock(); + } + } + + static class ConcurrentPackedArrayWithNormalizingOffset + implements ConcurrentArrayWithNormalizingOffset, Serializable { + + private ConcurrentPackedLongArray packedCounts; + + private int normalizingIndexOffset; + private double doubleToIntegerValueConversionRatio; + + ConcurrentPackedArrayWithNormalizingOffset(int length, int normalizingIndexOffset) { + packedCounts = new ConcurrentPackedLongArray(length); + this.normalizingIndexOffset = normalizingIndexOffset; + } + + public int getNormalizingIndexOffset() { + return normalizingIndexOffset; + } + + public void setNormalizingIndexOffset(int normalizingIndexOffset) { + this.normalizingIndexOffset = normalizingIndexOffset; + } + + public double getDoubleToIntegerValueConversionRatio() { + return doubleToIntegerValueConversionRatio; + } + + public void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio) { + this.doubleToIntegerValueConversionRatio = doubleToIntegerValueConversionRatio; + } + + @Override + public long get(int index) { + return packedCounts.get(index); + } + + @Override + public void atomicIncrement(int index) { + packedCounts.increment(index); + } + + @Override + public void atomicAdd(int index, long valueToAdd) { + packedCounts.add(index, valueToAdd); + } + + @Override + public void lazySet(int index, long newValue) { + packedCounts.set(index, newValue); + } + + @Override + public int length() { + return packedCounts.length(); + } + + @Override + public int getEstimatedFootprintInBytes() { + return 128 + (8 * packedCounts.getPhysicalLength()); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedDoubleHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedDoubleHistogram.java new file mode 100644 index 000000000..780f64351 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedDoubleHistogram.java @@ -0,0 +1,152 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

A floating point values High Dynamic Range (HDR) Histogram that uses a packed internal representation

+ *

+ * It is important to note that {@link PackedDoubleHistogram} is not thread-safe, and does not support safe concurrent + * recording by multiple threads. If concurrent operation is required, consider using + * {@link PackedConcurrentDoubleHistogram}, or (recommended) {@link DoubleRecorder} or + * {@link SingleWriterDoubleRecorder} which are intended for this purpose. + *

+ * {@link PackedDoubleHistogram} tracks value counts in a packed internal representation optimized + * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. + * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked + * representations, but can incur additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + *

+ * {@link PackedDoubleHistogram} supports the recording and analyzing sampled data value counts across a + * configurable dynamic range of floating point (double) values, with configurable value precision within the range. + * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram + * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, + * and provides control over value quantization behavior across the value range and the subsequent value resolution at + * any given level. + *

+ * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link + * PackedDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is + * (optionally) specified. E.g. When a {@link PackedDoubleHistogram} is created to track a dynamic range of + * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any + * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the + * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 + * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. + *

+ * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link + * DoubleHistogram#setAutoResize}) a {@link DoubleHistogram} will auto-resize its dynamic range to + * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take + * longer to execute, as resizing incurs allocation and copying of internal data structures. + *

+ * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of + * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either + * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have + * resulted in discarding or losing the required value precision of values already recorded in the histogram. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class PackedDoubleHistogram extends DoubleHistogram { + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal + * digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public PackedDoubleHistogram(final int numberOfSignificantValueDigits) { + this(2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio}) + * and using a precision stated as a number of significant decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal + * digits to which the histogram will maintain value resolution and + * separation. Must be a non-negative integer between 0 and 5. + */ + public PackedDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { + this(highestToLowestValueRatio, numberOfSignificantValueDigits, PackedHistogram.class); + } + + /** + * Construct a {@link PackedDoubleHistogram} with the same range settings as a given source, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public PackedDoubleHistogram(final DoubleHistogram source) { + super(source); + } + + PackedDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass); + } + + PackedDoubleHistogram(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits, + final Class internalCountsHistogramClass, + AbstractHistogram internalCountsHistogram) { + super( + highestToLowestValueRatio, + numberOfSignificantValueDigits, + internalCountsHistogramClass, + internalCountsHistogram + ); + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + */ + public static PackedDoubleHistogram decodeFromByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) { + try { + int cookie = buffer.getInt(); + if (!isNonCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); + } + PackedDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + PackedDoubleHistogram.class, PackedHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } catch (DataFormatException ex) { + throw new RuntimeException(ex); + } + } + + /** + * Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high + * @return The newly constructed ConcurrentDoubleHistogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static PackedDoubleHistogram decodeFromCompressedByteBuffer( + final ByteBuffer buffer, + final long minBarForHighestToLowestValueRatio) throws DataFormatException { + int cookie = buffer.getInt(); + if (!isCompressedDoubleHistogramCookie(cookie)) { + throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); + } + PackedDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, + PackedDoubleHistogram.class, PackedHistogram.class, + minBarForHighestToLowestValueRatio); + return histogram; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedHistogram.java new file mode 100644 index 000000000..58b08dee5 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PackedHistogram.java @@ -0,0 +1,235 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import io.prometheus.client.HdrHistogram.packedarray.PackedLongArray; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

A High Dynamic Range (HDR) Histogram that uses a packed internal representation

+ *

+ * {@link PackedHistogram} supports the recording and analyzing sampled data value counts across a configurable + * integer value range with configurable value precision within the range. Value precision is expressed as the + * number of significant digits in the value recording, and provides control over value quantization behavior + * across the value range and the subsequent value resolution at any given level. + *

+ * {@link PackedHistogram} tracks value counts in a packed internal representation optimized + * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. + * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked + * representations, but can incur additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + *

+ * For example, a {@link PackedHistogram} could be configured to track the counts of observed integer values between 0 and + * 3,600,000,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization + * within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could + * be used to track and analyze the counts of observed response times ranging between 1 nanosecond and 1 hour + * in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of + * 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At its + * maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). + *

+ * Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link + * Histogram#setAutoResize}) a {@link PackedHistogram} will auto-resize its dynamic range to include recorded values as + * they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing + * incurs allocation and copying of internal data structures. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class PackedHistogram extends Histogram { + + private PackedLongArray packedCounts; + + @Override + long getCountAtIndex(final int index) { + return getCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength)); + } + + @Override + long getCountAtNormalizedIndex(final int index) { + long count = packedCounts.get(index); + return count; + } + + @Override + void incrementCountAtIndex(final int index) { + packedCounts.increment(normalizeIndex(index, normalizingIndexOffset, countsArrayLength)); + } + + @Override + void addToCountAtIndex(final int index, final long value) { + packedCounts.add(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); + } + + @Override + void setCountAtIndex(int index, long value) { + setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); + } + + @Override + void setCountAtNormalizedIndex(int index, long value) { + packedCounts.set(index, value); + } + + @Override + void clearCounts() { + packedCounts.clear(); + packedCounts.setVirtualLength(countsArrayLength); + totalCount = 0; + } + + @Override + public PackedHistogram copy() { + PackedHistogram copy = new PackedHistogram(this); + copy.add(this); + return copy; + } + + @Override + public PackedHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + PackedHistogram toHistogram = new PackedHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + void resize(long newHighestTrackableValue) { + int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); + int oldCountsArrayLength = countsArrayLength; + + establishSize(newHighestTrackableValue); + + if (oldNormalizedZeroIndex != 0) { + // We need to shift the stuff from the zero index and up to the end of the array: + + // When things are shifted in a packed array its not simple to identify the region shifted, + // so re-record everything from the old normalized indexes to the new normalized indexes: + + PackedLongArray newPackedCounts = new PackedLongArray(countsArrayLength, packedCounts.getPhysicalLength()); + // Copy everything up to the oldNormalizedZeroIndex in place: + for (int fromIndex = 0; fromIndex < oldNormalizedZeroIndex; fromIndex++) { + long value = packedCounts.get(fromIndex); + if (value != 0) { + newPackedCounts.set(fromIndex, value); + } + } + // Copy everything from the oldNormalizedZeroIndex to the end with an index delta shift: + int countsDelta = countsArrayLength - oldCountsArrayLength; + + for (int fromIndex = oldNormalizedZeroIndex; fromIndex < oldCountsArrayLength; fromIndex++) { + long value = packedCounts.get(fromIndex); + if (value != 0) { + int toIndex = fromIndex + countsDelta; + newPackedCounts.set(toIndex, value); + } + } + // All unrecorded values are implicitly zero in the packed array + + packedCounts = newPackedCounts; + } else { + packedCounts.setVirtualLength(countsArrayLength); + } + } + + @Override + int _getEstimatedFootprintInBytes() { + return 192 + (8 * packedCounts.getPhysicalLength()); + } + + /** + * Construct an auto-resizing PackedHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a PackedHistogram given the Highest value to be tracked and a number of significant decimal digits. The + * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a PackedHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public PackedHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, false); + packedCounts = new PackedLongArray(countsArrayLength); + wordSizeInBytes = 8; + } + + /** + * Construct a PackedHistogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public PackedHistogram(final AbstractHistogram source) { + super(source, false); + packedCounts = new PackedLongArray(countsArrayLength); + wordSizeInBytes = 8; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static PackedHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return (PackedHistogram) decodeFromByteBuffer(buffer, PackedHistogram.class, + minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static PackedHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, PackedHistogram.class, minBarForHighestTrackableValue); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PercentileIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PercentileIterator.java new file mode 100644 index 000000000..779aed7d6 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/PercentileIterator.java @@ -0,0 +1,102 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through histogram values according to percentile levels. The iteration is + * performed in steps that start at 0% and reduce their distance to 100% according to the + * percentileTicksPerHalfDistance parameter, ultimately reaching 100% when all recorded histogram + * values are exhausted. +*/ +public class PercentileIterator extends AbstractHistogramIterator implements Iterator { + int percentileTicksPerHalfDistance; + double percentileLevelToIterateTo; + double percentileLevelToIterateFrom; + boolean reachedLastRecordedValue; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + * + * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. + */ + public void reset(final int percentileTicksPerHalfDistance) { + reset(histogram, percentileTicksPerHalfDistance); + } + + private void reset(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { + super.resetIterator(histogram); + this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; + this.percentileLevelToIterateTo = 0.0; + this.percentileLevelToIterateFrom = 0.0; + this.reachedLastRecordedValue = false; + } + + /** + * @param histogram The histogram this iterator will operate on + * @param percentileTicksPerHalfDistance The number of equal-sized iteration steps per half-distance to 100%. + */ + public PercentileIterator(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { + reset(histogram, percentileTicksPerHalfDistance); + } + + @Override + public boolean hasNext() { + if (super.hasNext()) + return true; + // We want one additional last step to 100% + if (!reachedLastRecordedValue && (arrayTotalCount > 0)) { + percentileLevelToIterateTo = 100.0; + reachedLastRecordedValue = true; + return true; + } + return false; + } + + @Override + void incrementIterationLevel() { + percentileLevelToIterateFrom = percentileLevelToIterateTo; + + // The choice to maintain fixed-sized "ticks" in each half-distance to 100% [starting + // from 0%], as opposed to a "tick" size that varies with each interval, was made to + // make the steps easily comprehensible and readable to humans. The resulting percentile + // steps are much easier to browse through in a percentile distribution output, for example. + // + // We calculate the number of equal-sized "ticks" that the 0-100 range will be divided + // by at the current scale. The scale is determined by the percentile level we are + // iterating to. The following math determines the tick size for the current scale, + // and maintain a fixed tick size for the remaining "half the distance to 100%" + // [from either 0% or from the previous half-distance]. When that half-distance is + // crossed, the scale changes and the tick size is effectively cut in half. + + long percentileReportingTicks = + percentileTicksPerHalfDistance * + (long) Math.pow(2, + (long) (Math.log(100.0 / (100.0 - (percentileLevelToIterateTo))) / Math.log(2)) + 1); + percentileLevelToIterateTo += 100.0 / percentileReportingTicks; + } + + @Override + boolean reachedIterationLevel() { + if (countAtThisValue == 0) + return false; + double currentPercentile = (100.0 * (double) totalCountToCurrentIndex) / arrayTotalCount; + return (currentPercentile >= percentileLevelToIterateTo); + } + + @Override + double getPercentileIteratedTo() { + return percentileLevelToIterateTo; + } + + @Override + double getPercentileIteratedFrom() { + return percentileLevelToIterateFrom; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/RecordedValuesIterator.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/RecordedValuesIterator.java new file mode 100644 index 000000000..f0ea08ac0 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/RecordedValuesIterator.java @@ -0,0 +1,50 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.Iterator; + +/** + * Used for iterating through all recorded histogram values using the finest granularity steps supported by the + * underlying representation. The iteration steps through all non-zero recorded value counts, and terminates when + * all recorded histogram values are exhausted. + */ + +public class RecordedValuesIterator extends AbstractHistogramIterator implements Iterator { + int visitedIndex; + + /** + * Reset iterator for re-use in a fresh iteration over the same histogram data set. + */ + public void reset() { + reset(histogram); + } + + private void reset(final AbstractHistogram histogram) { + super.resetIterator(histogram); + visitedIndex = -1; + } + + /** + * @param histogram The histogram this iterator will operate on + */ + public RecordedValuesIterator(final AbstractHistogram histogram) { + reset(histogram); + } + + @Override + void incrementIterationLevel() { + visitedIndex = currentIndex; + } + + @Override + boolean reachedIterationLevel() { + long currentCount = histogram.getCountAtIndex(currentIndex); + return (currentCount != 0) && (visitedIndex != currentIndex); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Recorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Recorder.java new file mode 100644 index 000000000..4005fd95f --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/Recorder.java @@ -0,0 +1,408 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records integer values, and provides stable interval {@link Histogram} samples from + * live recorded data without interrupting or stalling active recording of values. Each interval + * histogram provided contains all value counts accumulated since the previous interval histogram + * was taken. + *

+ * This pattern is commonly used in logging interval histogram information while recording is ongoing. + *

+ * {@link Recorder} supports concurrent + * {@link Recorder#recordValue} or + * {@link Recorder#recordValueWithExpectedInterval} calls. + * Recording calls are wait-free on architectures that support atomic increment operations, and + * are lock-free on architectures that do not. + *

+ * A common pattern for using a {@link Recorder} looks like this: + *


+ * Recorder recorder = new Recorder(2); // Two decimal point accuracy
+ * Histogram intervalHistogram = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval histogram]
+ *   ...
+ *   // Get interval histogram, recycling previous interval histogram:
+ *   intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
+ *   histogramLogWriter.outputIntervalHistogram(intervalHistogram);
+ *   ...
+ * [end of loop construct]
+ * 
+ * + */ + +public class Recorder implements ValueRecorder { + private static AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile Histogram activeHistogram; + private Histogram inactiveHistogram; + + /** + * Construct an auto-resizing {@link Recorder} with a lowest discernible value of + * 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + *

+ * Depending on the valuer of the packed parameter {@link Recorder} can be configured to + * track value counts in a packed internal representation optimized for typical histogram recoded values are + * sparse in the value range and tend to be incremented in small unit counts. This packed representation tends + * to require significantly smaller amounts of storage when compared to unpacked representations, but can incur + * additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param packed Specifies whether the recorder will uses a packed internal representation or not. + */ + public Recorder(final int numberOfSignificantValueDigits, boolean packed) { + activeHistogram = packed ? + new InternalPackedConcurrentHistogram(instanceId, numberOfSignificantValueDigits) : + new InternalConcurrentHistogram(instanceId, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct an auto-resizing {@link Recorder} with a lowest discernible value of + * 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Recorder(final int numberOfSignificantValueDigits) { + this(numberOfSignificantValueDigits, false); + } + + /** + * Construct a {@link Recorder} given the highest value to be tracked and a number of significant + * decimal digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Recorder(final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a {@link Recorder} given the Lowest and highest values to be tracked and a number + * of significant decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public Recorder(final long lowestDiscernibleValue, + final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + activeHistogram = new InternalAtomicHistogram( + instanceId, lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Record a value + * @param value the value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValue(final long value) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValue(value); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithCount(value, count); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * See related notes {@link AbstractHistogram#recordValueWithExpectedInterval(long, long)} + * for more explanations about coordinated omission and expected interval correction. + * * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get a new instance of an interval histogram, which will include a stable, consistent view of all value + * counts accumulated since the last interval histogram was taken. + *

+ * Calling {@code getIntervalHistogram()} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram() { + return getIntervalHistogram(null); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@code getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link Recorder#getIntervalHistogram()} and + * {@link Recorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@code getIntervalHistogram(histogramToRecycle)} or + * {@link Recorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@code getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start + * accumulating value counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram (from this instance of + * {@link Recorder}) that may be recycled to avoid allocation and + * copy operations. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle) { + return getIntervalHistogram(histogramToRecycle, true); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@link Recorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link Recorder#getIntervalHistogram()} and + * {@link Recorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@link Recorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} or + * {@link Recorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link Recorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value + * counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of histograms previously returned from this + * instance of {@link Recorder}. If false, will allow recycling histograms + * previously returned by other instances of {@link Recorder}. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle, + boolean enforceContainingInstance) { + // Verify that replacement histogram can validly be used as an inactive histogram replacement: + validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); + inactiveHistogram = histogramToRecycle; + performIntervalSample(); + Histogram sampledHistogram = inactiveHistogram; + inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled + return sampledHistogram; + } + + /** + * Place a copy of the value counts accumulated since accumulated (since the last interval histogram + * was taken) into {@code targetHistogram}. + * + * Calling {@code getIntervalHistogramInto(targetHistogram)} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @param targetHistogram the histogram into which the interval histogram's data should be copied + */ + public synchronized void getIntervalHistogramInto(Histogram targetHistogram) { + performIntervalSample(); + inactiveHistogram.copyInto(targetHistogram); + } + + /** + * Reset any value counts accumulated thus far. + */ + @Override + public synchronized void reset() { + // the currently inactive histogram is reset each time we flip. So flipping twice resets both: + performIntervalSample(); + performIntervalSample(); + } + + private void performIntervalSample() { + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveHistogram == null) { + if (activeHistogram instanceof InternalAtomicHistogram) { + inactiveHistogram = new InternalAtomicHistogram( + instanceId, + activeHistogram.getLowestDiscernibleValue(), + activeHistogram.getHighestTrackableValue(), + activeHistogram.getNumberOfSignificantValueDigits()); + } else if (activeHistogram instanceof InternalConcurrentHistogram) { + inactiveHistogram = new InternalConcurrentHistogram( + instanceId, + activeHistogram.getNumberOfSignificantValueDigits()); + } else if (activeHistogram instanceof InternalPackedConcurrentHistogram) { + inactiveHistogram = new InternalPackedConcurrentHistogram( + instanceId, + activeHistogram.getNumberOfSignificantValueDigits()); + } else { + throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); + } + } + + inactiveHistogram.reset(); + + // Swap active and inactive histograms: + final Histogram tempHistogram = inactiveHistogram; + inactiveHistogram = activeHistogram; + activeHistogram = tempHistogram; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeHistogram.setStartTimeStamp(now); + inactiveHistogram.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active histogram: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + } + + private static class InternalAtomicHistogram extends AtomicHistogram { + private final long containingInstanceId; + + private InternalAtomicHistogram(long id, + long lowestDiscernibleValue, + long highestTrackableValue, + int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private static class InternalConcurrentHistogram extends ConcurrentHistogram { + private final long containingInstanceId; + + private InternalConcurrentHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private static class InternalPackedConcurrentHistogram extends PackedConcurrentHistogram { + private final long containingInstanceId; + + private InternalPackedConcurrentHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementHistogram(Histogram replacementHistogram, + boolean enforceContainingInstance) { + boolean bad = true; + if (replacementHistogram == null) { + bad = false; + } else if (replacementHistogram instanceof InternalAtomicHistogram) { + if ((activeHistogram instanceof InternalAtomicHistogram) + && + ((!enforceContainingInstance) || + (((InternalAtomicHistogram)replacementHistogram).containingInstanceId == + ((InternalAtomicHistogram)activeHistogram).containingInstanceId) + )) { + bad = false; + } + } else if (replacementHistogram instanceof InternalConcurrentHistogram) { + if ((activeHistogram instanceof InternalConcurrentHistogram) + && + ((!enforceContainingInstance) || + (((InternalConcurrentHistogram)replacementHistogram).containingInstanceId == + ((InternalConcurrentHistogram)activeHistogram).containingInstanceId) + )) { + bad = false; + } + } else if (replacementHistogram instanceof InternalPackedConcurrentHistogram) { + if ((activeHistogram instanceof InternalPackedConcurrentHistogram) + && + ((!enforceContainingInstance) || + (((InternalPackedConcurrentHistogram)replacementHistogram).containingInstanceId == + ((InternalPackedConcurrentHistogram)activeHistogram).containingInstanceId) + )) { + bad = false; + } + } + if (bad) { + throw new IllegalArgumentException("replacement histogram must have been obtained via a previous" + + " getIntervalHistogram() call from this " + this.getClass().getName() + + (enforceContainingInstance ? " instance" : " class")); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ShortCountsHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ShortCountsHistogram.java new file mode 100644 index 000000000..f6dba64fc --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ShortCountsHistogram.java @@ -0,0 +1,261 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.zip.DataFormatException; + +/** + *

A High Dynamic Range (HDR) Histogram using a short count type

+ *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class ShortCountsHistogram extends AbstractHistogram { + long totalCount; + short[] counts; + int normalizingIndexOffset; + + @Override + long getCountAtIndex(final int index) { + return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]; + } + + @Override + long getCountAtNormalizedIndex(final int index) { + return counts[index]; + } + + @Override + void incrementCountAtIndex(final int index) { + int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); + short currentCount = counts[normalizedIndex]; + short newCount = (short) (currentCount + 1); + if (newCount < 0) { + throw new IllegalStateException("would overflow short integer count"); + } + counts[normalizedIndex] = newCount; + } + + @Override + void addToCountAtIndex(final int index, final long value) { + int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); + long currentCount = counts[normalizedIndex]; + long newCount = (currentCount + value); + if ((newCount < Short.MIN_VALUE) || (newCount > Short.MAX_VALUE)) { + throw new IllegalStateException("would overflow short integer count"); + } + counts[normalizedIndex] = (short) newCount; + } + + @Override + void setCountAtIndex(int index, long value) { + setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); + } + + @Override + void setCountAtNormalizedIndex(int index, long value) { + if ((value < 0) || (value > Short.MAX_VALUE)) { + throw new IllegalStateException("would overflow short integer count"); + } + counts[index] = (short) value; + } + + @Override + int getNormalizingIndexOffset() { + return normalizingIndexOffset; + } + + @Override + void setNormalizingIndexOffset(int normalizingIndexOffset) { + this.normalizingIndexOffset = normalizingIndexOffset; + } + + @Override + void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { + nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); + } + + @Override + void shiftNormalizingIndexByOffset(int offsetToAdd, + boolean lowestHalfBucketPopulated, + double newIntegerToDoubleValueConversionRatio) { + nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated); + } + + @Override + void clearCounts() { + Arrays.fill(counts, (short) 0); + totalCount = 0; + } + + @Override public ShortCountsHistogram copy() { + ShortCountsHistogram copy = new ShortCountsHistogram(this); + copy.add(this); + return copy; + } + + @Override + public ShortCountsHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { + ShortCountsHistogram toHistogram = new ShortCountsHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + @Override + public long getTotalCount() { + return totalCount; + } + + @Override + void setTotalCount(final long totalCount) { + this.totalCount = totalCount; + } + + @Override + void incrementTotalCount() { + totalCount++; + } + + @Override + void addToTotalCount(long value) { + totalCount += value; + } + + @Override + int _getEstimatedFootprintInBytes() { + return (512 + (2 * counts.length)); + } + + @Override + void resize(long newHighestTrackableValue) { + int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); + + establishSize(newHighestTrackableValue); + + int countsDelta = countsArrayLength - counts.length; + + counts = Arrays.copyOf(counts, countsArrayLength); + + if (oldNormalizedZeroIndex != 0) { + // We need to shift the stuff from the zero index and up to the end of the array: + int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta; + int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex; + System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy); + Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, (short) 0); + } + } + + /** + * Construct an auto-resizing ShortCountsHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ShortCountsHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a ShortCountsHistogram given the Highest value to be tracked and a number of significant decimal + * digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ShortCountsHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a ShortCountsHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public ShortCountsHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + counts = new short[countsArrayLength]; + wordSizeInBytes = 2; + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public ShortCountsHistogram(final AbstractHistogram source) { + super(source); + counts = new short[countsArrayLength]; + wordSizeInBytes = 2; + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static ShortCountsHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, ShortCountsHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static ShortCountsHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) + throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, ShortCountsHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new ShortCountsHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A ShortCountsHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static ShortCountsHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } +} \ No newline at end of file diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterDoubleRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterDoubleRecorder.java new file mode 100644 index 000000000..82377e93d --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterDoubleRecorder.java @@ -0,0 +1,360 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records floating point values, and provides stable interval {@link DoubleHistogram} samples from live recorded data + * without interrupting or stalling active recording of values. Each interval histogram provided contains all + * value counts accumulated since the previous interval histogram was taken. + *

+ * This pattern is commonly used in logging interval histogram information while recording is ongoing. + *

+ * {@link SingleWriterDoubleRecorder} expects only a single thread (the "single writer") to + * call {@link SingleWriterDoubleRecorder#recordValue} or + * {@link SingleWriterDoubleRecorder#recordValueWithExpectedInterval} at any point in time. + * It DOES NOT support concurrent recording calls. + * Recording calls are wait-free on architectures that support atomic increment operations, and + * are lock-free on architectures that do not. + *

+ * A common pattern for using a {@link SingleWriterDoubleRecorder} looks like this: + *


+ * SingleWriterDoubleRecorder recorder = new SingleWriterDoubleRecorder(2); // Two decimal point accuracy
+ * DoubleHistogram intervalHistogram = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval histogram]
+ *   ...
+ *   // Get interval histogram, recycling previous interval histogram:
+ *   intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
+ *   histogramLogWriter.outputIntervalHistogram(intervalHistogram);
+ *   ...
+ * [end of loop construct]
+ * 
+ */ + +public class SingleWriterDoubleRecorder implements DoubleValueRecorder { + private static AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile DoubleHistogram activeHistogram; + private DoubleHistogram inactiveHistogram; + + /** + * Construct an auto-resizing {@link SingleWriterDoubleRecorder} using a precision stated as a + * number of significant decimal digits. + *

+ * Depending on the valuer of the packed parameter {@link SingleWriterDoubleRecorder} can + * be configured to track value counts in a packed internal representation optimized for typical histogram + * recoded values are sparse in the value range and tend to be incremented in small unit counts. This packed + * representation tends to require significantly smaller amounts of storage when compared to unpacked + * representations, but can incur additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param packed Specifies whether the recorder will uses a packed internal representation or not. + */ + public SingleWriterDoubleRecorder(final int numberOfSignificantValueDigits, final boolean packed) { + activeHistogram = packed ? + new PackedInternalDoubleHistogram(instanceId, numberOfSignificantValueDigits) : + new InternalDoubleHistogram(instanceId, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct an auto-resizing {@link SingleWriterDoubleRecorder} using a precision stated as a + * number of significant decimal digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SingleWriterDoubleRecorder(final int numberOfSignificantValueDigits) { + this(numberOfSignificantValueDigits, false); + } + + /** + * Construct a {@link SingleWriterDoubleRecorder} dynamic range of values to cover and a number + * of significant decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use (as a ratio) + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SingleWriterDoubleRecorder(final long highestToLowestValueRatio, + final int numberOfSignificantValueDigits) { + activeHistogram = new InternalDoubleHistogram( + instanceId, highestToLowestValueRatio, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Record a value + * @param value the value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + public void recordValue(final double value) { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValue(value); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithCount(value, count); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * See related notes {@link DoubleHistogram#recordValueWithExpectedInterval(double, double)} + * for more explanations about coordinated omission and expected interval correction. + * * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get a new instance of an interval histogram, which will include a stable, consistent view of all value + * counts accumulated since the last interval histogram was taken. + *

+ * Calling {@code getIntervalHistogram()} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram() { + return getIntervalHistogram(null); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@code getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link SingleWriterDoubleRecorder#getIntervalHistogram()} and + * {@link SingleWriterDoubleRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The + * provided {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@code getIntervalHistogram(histogramToRecycle)} or + * {@link SingleWriterDoubleRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling + * {@code getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value + * counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram (from this instance of + * {@link SingleWriterDoubleRecorder}) that may be recycled to avoid allocation and + * copy operations. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle) { + return getIntervalHistogram(histogramToRecycle, true); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@link SingleWriterDoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link SingleWriterDoubleRecorder#getIntervalHistogram()} and + * {@link SingleWriterDoubleRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The + * provided {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@link SingleWriterDoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} or + * {@link SingleWriterDoubleRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling + * {@link SingleWriterDoubleRecorder#getIntervalHistogram(DoubleHistogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value + * counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of histograms previously returned from this + * instance of {@link SingleWriterDoubleRecorder}. If false, will allow recycling histograms + * previously returned by other instances of {@link SingleWriterDoubleRecorder}. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle, + boolean enforceContainingInstance) { + // Verify that replacement histogram can validly be used as an inactive histogram replacement: + validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); + inactiveHistogram = histogramToRecycle; + performIntervalSample(); + DoubleHistogram sampledHistogram = inactiveHistogram; + inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled + return sampledHistogram; + } + + /** + * Place a copy of the value counts accumulated since accumulated (since the last interval histogram + * was taken) into {@code targetHistogram}. + * + * Calling {@code getIntervalHistogramInto(targetHistogram)} will + * reset the value counts, and start accumulating value counts for the next interval. + * + * @param targetHistogram the histogram into which the interval histogram's data should be copied + */ + public synchronized void getIntervalHistogramInto(DoubleHistogram targetHistogram) { + performIntervalSample(); + inactiveHistogram.copyInto(targetHistogram); + } + + /** + * Reset any value counts accumulated thus far. + */ + public synchronized void reset() { + // the currently inactive histogram is reset each time we flip. So flipping twice resets both: + performIntervalSample(); + performIntervalSample(); + } + + private void performIntervalSample() { + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveHistogram == null) { + if (activeHistogram instanceof InternalDoubleHistogram) { + inactiveHistogram = new InternalDoubleHistogram((InternalDoubleHistogram) activeHistogram); + } else if (activeHistogram instanceof PackedInternalDoubleHistogram) { + inactiveHistogram = new PackedInternalDoubleHistogram( + instanceId, activeHistogram.getNumberOfSignificantValueDigits()); + } else { + throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); + } + } + + inactiveHistogram.reset(); + + // Swap active and inactive histograms: + final DoubleHistogram tempHistogram = inactiveHistogram; + inactiveHistogram = activeHistogram; + activeHistogram = tempHistogram; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeHistogram.setStartTimeStamp(now); + inactiveHistogram.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active histogram: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + } + + private class InternalDoubleHistogram extends DoubleHistogram { + private final long containingInstanceId; + + private InternalDoubleHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalDoubleHistogram(long id, + long highestToLowestValueRatio, + int numberOfSignificantValueDigits) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalDoubleHistogram(InternalDoubleHistogram source) { + super(source); + this.containingInstanceId = source.containingInstanceId; + } + } + + private class PackedInternalDoubleHistogram extends PackedDoubleHistogram { + private final long containingInstanceId; + + private PackedInternalDoubleHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementHistogram(DoubleHistogram replacementHistogram, + boolean enforceContainingInstance) { + boolean bad = true; + if (replacementHistogram == null) { + bad = false; + } else if ((replacementHistogram instanceof InternalDoubleHistogram) + && + ((!enforceContainingInstance) || + (((InternalDoubleHistogram) replacementHistogram).containingInstanceId == + ((InternalDoubleHistogram)activeHistogram).containingInstanceId) + )) { + bad = false; + } else if ((replacementHistogram instanceof PackedInternalDoubleHistogram) + && + ((!enforceContainingInstance) || + (((PackedInternalDoubleHistogram) replacementHistogram).containingInstanceId == + ((PackedInternalDoubleHistogram)activeHistogram).containingInstanceId) + )) { + bad = false; + } + + if (bad) { + throw new IllegalArgumentException("replacement histogram must have been obtained via a previous " + + "getIntervalHistogram() call from this " + this.getClass().getName() +" instance"); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterRecorder.java new file mode 100644 index 000000000..3bef21acd --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SingleWriterRecorder.java @@ -0,0 +1,389 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records integer values, and provides stable interval {@link Histogram} samples from + * live recorded data without interrupting or stalling active recording of values. Each interval + * histogram provided contains all value counts accumulated since the previous interval histogram + * was taken. + *

+ * This pattern is commonly used in logging interval histogram information while recording is ongoing. + *

+ * {@link SingleWriterRecorder} expects only a single thread (the "single writer") to + * call {@link SingleWriterRecorder#recordValue} or + * {@link SingleWriterRecorder#recordValueWithExpectedInterval} at any point in time. + * It DOES NOT safely support concurrent recording calls. + * Recording calls are wait-free on architectures that support atomic increment operations, and + * re lock-free on architectures that do not. + * *

+ * A common pattern for using a {@link SingleWriterRecorder} looks like this: + *


+ * SingleWriterRecorder recorder = new SingleWriterRecorder(2); // Two decimal point accuracy
+ * Histogram intervalHistogram = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval histogram]
+ *   ...
+ *   // Get interval histogram, recycling previous interval histogram:
+ *   intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
+ *   histogramLogWriter.outputIntervalHistogram(intervalHistogram);
+ *   ...
+ * [end of loop construct]
+ * 
+ */ + +public class SingleWriterRecorder implements ValueRecorder { + private static AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile Histogram activeHistogram; + private Histogram inactiveHistogram; + + /** + * Construct an auto-resizing {@link SingleWriterRecorder} with a lowest discernible value of + * 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + *

+ * Depending on the valuer of the packed parameter {@link SingleWriterRecorder} can be configured to + * track value counts in a packed internal representation optimized for typical histogram recoded values are + * sparse in the value range and tend to be incremented in small unit counts. This packed representation tends + * to require significantly smaller amounts of storage when compared to unpacked representations, but can incur + * additional recording cost due to resizing and repacking operations that may + * occur as previously unrecorded values are encountered. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + * @param packed Specifies whether the recorder will uses a packed internal representation or not. + */ + public SingleWriterRecorder(final int numberOfSignificantValueDigits, final boolean packed) { + activeHistogram = packed ? + new PackedInternalHistogram(instanceId, numberOfSignificantValueDigits) : + new InternalHistogram(instanceId, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct an auto-resizing {@link SingleWriterRecorder} with a lowest discernible value of + * 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SingleWriterRecorder(final int numberOfSignificantValueDigits) { + this(numberOfSignificantValueDigits, false); + } + + /** + * Construct a {@link SingleWriterRecorder} given the highest value to be tracked and a number + * of significant decimal digits. The histogram will be constructed to implicitly track (distinguish from 0) + * values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SingleWriterRecorder(final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a {@link SingleWriterRecorder} given the Lowest and highest values to be tracked + * and a number of significant decimal digits. Providing a lowestDiscernibleValue is useful is situations where + * the units used for the histogram's values are much smaller that the minimal accuracy required. E.g. when + * tracking time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SingleWriterRecorder(final long lowestDiscernibleValue, + final long highestTrackableValue, + final int numberOfSignificantValueDigits) { + activeHistogram = new InternalHistogram( + instanceId, lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + inactiveHistogram = null; + activeHistogram.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Record a value + * @param value the value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValue(final long value) { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValue(value); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value in the histogram (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithCount(value, count); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Record a value + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * See related notes {@link AbstractHistogram#recordValueWithExpectedInterval(long, long)} + * for more explanations about coordinated omission and expected interval correction. + * * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue + */ + @Override + public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get a new instance of an interval histogram, which will include a stable, consistent view of all value + * counts accumulated since the last interval histogram was taken. + *

+ * Calling {@code getIntervalHistogram()} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram() { + return getIntervalHistogram(null); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@code getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link SingleWriterRecorder#getIntervalHistogram()} and + * {@link SingleWriterRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@code getIntervalHistogram(histogramToRecycle)} or + * {@link SingleWriterRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@code getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start + * accumulating value counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram (from this instance of + * {@link SingleWriterRecorder}) that may be recycled to avoid allocation and + * copy operations. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle) { + return getIntervalHistogram(histogramToRecycle, true); + } + + /** + * Get an interval histogram, which will include a stable, consistent view of all value counts + * accumulated since the last interval histogram was taken. + *

+ * {@link SingleWriterRecorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} + * accepts a previously returned interval histogram that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link SingleWriterRecorder#getIntervalHistogram()} and + * {@link SingleWriterRecorder#getIntervalHistogramInto getIntervalHistogramInto()}. The provided + * {@code histogramToRecycle} must + * be either be null or an interval histogram returned by a previous call to + * {@link SingleWriterRecorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} or + * {@link SingleWriterRecorder#getIntervalHistogram()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If + * the same interval histogram instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link SingleWriterRecorder#getIntervalHistogram(Histogram histogramToRecycle) + * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value + * counts for the next interval + * + * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of histograms previously returned from this + * instance of {@link SingleWriterRecorder}. If false, will allow recycling histograms + * previously returned by other instances of {@link SingleWriterRecorder}. + * @return a histogram containing the value counts accumulated since the last interval histogram was taken. + */ + public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle, + boolean enforceContainingInstance) { + // Verify that replacement histogram can validly be used as an inactive histogram replacement: + validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); + inactiveHistogram = histogramToRecycle; + performIntervalSample(); + Histogram sampledHistogram = inactiveHistogram; + inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled + return sampledHistogram; + } + + /** + * Place a copy of the value counts accumulated since accumulated (since the last interval histogram + * was taken) into {@code targetHistogram}. + * + * Calling {@code getIntervalHistogramInto(targetHistogram)} will reset + * the value counts, and start accumulating value counts for the next interval. + * + * @param targetHistogram the histogram into which the interval histogram's data should be copied + */ + public synchronized void getIntervalHistogramInto(Histogram targetHistogram) { + performIntervalSample(); + inactiveHistogram.copyInto(targetHistogram); + } + + /** + * Reset any value counts accumulated thus far. + */ + @Override + public synchronized void reset() { + // the currently inactive histogram is reset each time we flip. So flipping twice resets both: + performIntervalSample(); + performIntervalSample(); + } + + private void performIntervalSample() { + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveHistogram == null) { + if (activeHistogram instanceof InternalHistogram) { + inactiveHistogram = new InternalHistogram((InternalHistogram) activeHistogram); + } else if (activeHistogram instanceof PackedInternalHistogram) { + inactiveHistogram = new PackedInternalHistogram( + instanceId, activeHistogram.getNumberOfSignificantValueDigits()); + } else { + throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); + } + } + + inactiveHistogram.reset(); + + // Swap active and inactive histograms: + final Histogram tempHistogram = inactiveHistogram; + inactiveHistogram = activeHistogram; + activeHistogram = tempHistogram; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeHistogram.setStartTimeStamp(now); + inactiveHistogram.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active histogram: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + } + + private static class InternalHistogram extends Histogram { + private final long containingInstanceId; + + private InternalHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalHistogram(long id, + long lowestDiscernibleValue, + long highestTrackableValue, + int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + + private InternalHistogram(InternalHistogram source) { + super(source); + this.containingInstanceId = source.containingInstanceId; + } + } + + private static class PackedInternalHistogram extends PackedHistogram { + private final long containingInstanceId; + + private PackedInternalHistogram(long id, int numberOfSignificantValueDigits) { + super(numberOfSignificantValueDigits); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementHistogram(Histogram replacementHistogram, + boolean enforceContainingInstance) { + boolean bad = true; + if (replacementHistogram == null) { + bad = false; + } else if ((replacementHistogram instanceof InternalHistogram) + && + ((!enforceContainingInstance) || + (((InternalHistogram) replacementHistogram).containingInstanceId == + ((InternalHistogram) activeHistogram).containingInstanceId) + )) { + bad = false; + } else if ((replacementHistogram instanceof PackedInternalHistogram) + && + ((!enforceContainingInstance) || + (((PackedInternalHistogram) replacementHistogram).containingInstanceId == + ((PackedInternalHistogram) activeHistogram).containingInstanceId) + )) { + bad = false; + } + + if (bad) { + throw new IllegalArgumentException("replacement histogram must have been obtained via a previous " + + "getIntervalHistogram() call from this " + this.getClass().getName() + + (enforceContainingInstance ? " instance" : " class")); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedDoubleHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedDoubleHistogram.java new file mode 100644 index 000000000..8a8397bb4 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedDoubleHistogram.java @@ -0,0 +1,464 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.PrintStream; +import java.nio.ByteBuffer; + +/** + *

A floating point values High Dynamic Range (HDR) Histogram that is synchronized as a whole

+ *

+ * A {@link SynchronizedDoubleHistogram} is a variant of {@link DoubleHistogram} that is + * synchronized as a whole, such that queries, copying, and addition operations are atomic with relation to + * modification on the {@link SynchronizedDoubleHistogram}, nd such that external accessors (e.g. iterations on the + * histogram data) that synchronize on the {@link SynchronizedDoubleHistogram} instance can safely assume that no + * modifications to the histogram data occur within their synchronized block. + *

+ * It is important to note that synchronization can result in blocking recoding calls. If non-blocking recoding + * operations are required, consider using {@link ConcurrentDoubleHistogram}, or (recommended) + * {@link DoubleRecorder} which were intended for concurrent operations. + *

+ * {@link SynchronizedDoubleHistogram} supports the recording and analyzing sampled data value counts across a + * configurable dynamic range of floating point (double) values, with configurable value precision within the range. + * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram + * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, + * and provides control over value quantization behavior across the value range and the subsequent value resolution at + * any given level. + *

+ * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link + * SynchronizedDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can + * cover is (optionally) specified. E.g. When a {@link ConcurrentDoubleHistogram} is created to track a dynamic range of + * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any + * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the + * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 + * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. + *

+ * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link + * SynchronizedDoubleHistogram#setAutoResize}) a {@link SynchronizedDoubleHistogram} will auto-resize its dynamic + * range to include recorded values as they are encountered. Note that recording calls that cause auto-resizing may + * take longer to execute, as resizing incurs allocation and copying of internal data structures. + *

+ * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of + * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either + * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have + * resulted in discarding or losing the required value precision of values already recorded in the histogram. + *

+ * See package description for {@link org.HdrHistogram} for details. + */ + +public class SynchronizedDoubleHistogram extends DoubleHistogram { + + /** + * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant + * decimal digits. + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SynchronizedDoubleHistogram(final int numberOfSignificantValueDigits) { + this(2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a new DoubleHistogram with the specified dynamic range (provided in + * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant + * decimal digits. + * + * @param highestToLowestValueRatio specifies the dynamic range to use + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SynchronizedDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { + super(highestToLowestValueRatio, numberOfSignificantValueDigits, SynchronizedHistogram.class); + } + + /** + * Construct a {@link SynchronizedDoubleHistogram} with the same range settings as a given source, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public SynchronizedDoubleHistogram(final ConcurrentDoubleHistogram source) { + super(source); + } + + @Override + public synchronized boolean isAutoResize() { + return super.isAutoResize(); + } + + @Override + public synchronized void setAutoResize(boolean autoResize) { + super.setAutoResize(autoResize); + } + + @Override + public synchronized void recordValue(final double value) throws ArrayIndexOutOfBoundsException { + super.recordValue(value); + } + + @Override + public synchronized void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { + super.recordValueWithCount(value, count); + } + + @Override + public synchronized void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + super.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } + + @Override + public synchronized void reset() { + super.reset(); + } + + @Override + public synchronized DoubleHistogram copy() { + final DoubleHistogram targetHistogram = + new DoubleHistogram(this); + integerValuesHistogram.copyInto(targetHistogram.integerValuesHistogram); + return targetHistogram; + } + + @Override + public synchronized DoubleHistogram copyCorrectedForCoordinatedOmission(final double expectedIntervalBetweenValueSamples) { + final DoubleHistogram targetHistogram = + new DoubleHistogram(this); + targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return targetHistogram; + } + + @Override + public synchronized void copyInto(final DoubleHistogram targetHistogram) { + // Synchronize copyInto(). Avoid deadlocks by synchronizing in order of construction identity count. + if (integerValuesHistogram.identity < targetHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (targetHistogram) { + super.copyInto(targetHistogram); + } + } + } else { + synchronized (targetHistogram) { + synchronized (this) { + super.copyInto(targetHistogram); + } + } + } + } + + @Override + public synchronized void copyIntoCorrectedForCoordinatedOmission(final DoubleHistogram targetHistogram, + final double expectedIntervalBetweenValueSamples) { + // Synchronize copyIntoCorrectedForCoordinatedOmission(). Avoid deadlocks by synchronizing in order + // of construction identity count. + if (integerValuesHistogram.identity < targetHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (targetHistogram) { + super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); + } + } + } else { + synchronized (targetHistogram) { + synchronized (this) { + super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); + } + } + } + } + + @Override + public synchronized void add(final DoubleHistogram fromHistogram) throws ArrayIndexOutOfBoundsException { + // Synchronize add(). Avoid deadlocks by synchronizing in order of construction identity count. + if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (fromHistogram) { + super.add(fromHistogram); + } + } + } else { + synchronized (fromHistogram) { + synchronized (this) { + super.add(fromHistogram); + } + } + } + } + + + @Override + public synchronized void subtract(final DoubleHistogram fromHistogram) { + // Synchronize subtract(). Avoid deadlocks by synchronizing in order of construction identity count. + if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (fromHistogram) { + super.subtract(fromHistogram); + } + } + } else { + synchronized (fromHistogram) { + synchronized (this) { + super.subtract(fromHistogram); + } + } + } + } + + @Override + public synchronized void addWhileCorrectingForCoordinatedOmission(final DoubleHistogram fromHistogram, + final double expectedIntervalBetweenValueSamples) { + // Synchronize addWhileCorrectingForCoordinatedOmission(). Avoid deadlocks by synchronizing in + // order of construction identity count. + if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (fromHistogram) { + super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); + } + } + } else { + synchronized (fromHistogram) { + synchronized (this) { + super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); + } + } + } + } + + @Override + public synchronized boolean equals(final Object other) { + if ( this == other ) { + return true; + } + if (other instanceof DoubleHistogram) { + DoubleHistogram otherHistogram = (DoubleHistogram) other; + if (integerValuesHistogram.identity < otherHistogram.integerValuesHistogram.identity) { + synchronized (this) { + synchronized (otherHistogram) { + return super.equals(otherHistogram); + } + } + } else { + synchronized (otherHistogram) { + synchronized (this) { + return super.equals(otherHistogram); + } + } + } + } else { + synchronized (this) { + return super.equals(other); + } + } + } + + @Override + public synchronized int hashCode() { + return super.hashCode(); + } + + @Override + public synchronized long getTotalCount() { + return super.getTotalCount(); + } + + @Override + public synchronized double getIntegerToDoubleValueConversionRatio() { + return super.getIntegerToDoubleValueConversionRatio(); + } + + @Override + public synchronized int getNumberOfSignificantValueDigits() { + return super.getNumberOfSignificantValueDigits(); + } + + @Override + public synchronized long getHighestToLowestValueRatio() { + return super.getHighestToLowestValueRatio(); + } + + @Override + public synchronized double sizeOfEquivalentValueRange(final double value) { + return super.sizeOfEquivalentValueRange(value); + } + + @Override + public synchronized double lowestEquivalentValue(final double value) { + return super.lowestEquivalentValue(value); + } + + @Override + public synchronized double highestEquivalentValue(final double value) { + return super.highestEquivalentValue(value); + } + + @Override + public synchronized double medianEquivalentValue(final double value) { + return super.medianEquivalentValue(value); + } + + @Override + public synchronized double nextNonEquivalentValue(final double value) { + return super.nextNonEquivalentValue(value); + } + + @Override + public synchronized boolean valuesAreEquivalent(final double value1, final double value2) { + return super.valuesAreEquivalent(value1, value2); + } + + @Override + public synchronized int getEstimatedFootprintInBytes() { + return super.getEstimatedFootprintInBytes(); + } + + @Override + public synchronized long getStartTimeStamp() { + return super.getStartTimeStamp(); + } + + @Override + public synchronized void setStartTimeStamp(final long timeStampMsec) { + super.setStartTimeStamp(timeStampMsec); + } + + @Override + public synchronized long getEndTimeStamp() { + return super.getEndTimeStamp(); + } + + @Override + public synchronized void setEndTimeStamp(final long timeStampMsec) { + super.setEndTimeStamp(timeStampMsec); + } + + @Override + public synchronized double getMinValue() { + return super.getMinValue(); + } + + @Override + public synchronized double getMaxValue() { + return super.getMaxValue(); + } + + @Override + public synchronized double getMinNonZeroValue() { + return super.getMinNonZeroValue(); + } + + @Override + public synchronized double getMaxValueAsDouble() { + return super.getMaxValueAsDouble(); + } + + @Override + public synchronized double getMean() { + return super.getMean(); + } + + @Override + public synchronized double getStdDeviation() { + return super.getStdDeviation(); + } + + @Override + public synchronized double getValueAtPercentile(final double percentile) { + return super.getValueAtPercentile(percentile); + } + + @Override + public synchronized double getPercentileAtOrBelowValue(final double value) { + return super.getPercentileAtOrBelowValue(value); + } + + @Override + public synchronized double getCountBetweenValues(final double lowValue, final double highValue) + throws ArrayIndexOutOfBoundsException { + return super.getCountBetweenValues(lowValue, highValue); + } + + @Override + public synchronized long getCountAtValue(final double value) throws ArrayIndexOutOfBoundsException { + return super.getCountAtValue(value); + } + + @Override + public synchronized Percentiles percentiles(final int percentileTicksPerHalfDistance) { + return super.percentiles(percentileTicksPerHalfDistance); + } + + @Override + public synchronized LinearBucketValues linearBucketValues(final double valueUnitsPerBucket) { + return super.linearBucketValues(valueUnitsPerBucket); + } + + @Override + public synchronized LogarithmicBucketValues logarithmicBucketValues(final double valueUnitsInFirstBucket, + final double logBase) { + return super.logarithmicBucketValues(valueUnitsInFirstBucket, logBase); + } + + @Override + public synchronized RecordedValues recordedValues() { + return super.recordedValues(); + } + + @Override + public synchronized AllValues allValues() { + return super.allValues(); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final Double outputValueUnitScalingRatio) { + super.outputPercentileDistribution(printStream, outputValueUnitScalingRatio); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio) { + super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio, + final boolean useCsvFormat) { + super.outputPercentileDistribution( + printStream, + percentileTicksPerHalfDistance, + outputValueUnitScalingRatio, + useCsvFormat); + } + + @Override + public synchronized int getNeededByteBufferCapacity() { + return super.getNeededByteBufferCapacity(); + } + + @Override + public synchronized int encodeIntoByteBuffer(final ByteBuffer buffer) { + return super.encodeIntoByteBuffer(buffer); + } + + @Override + public synchronized int encodeIntoCompressedByteBuffer( + final ByteBuffer targetBuffer, + final int compressionLevel) { + return super.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel); + } + + @Override + public synchronized int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { + return super.encodeIntoCompressedByteBuffer(targetBuffer); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedHistogram.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedHistogram.java new file mode 100644 index 000000000..fcaa4cc3d --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/SynchronizedHistogram.java @@ -0,0 +1,527 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.util.zip.DataFormatException; + +/** + *

An integer values High Dynamic Range (HDR) Histogram that is synchronized as a whole

+ *

+ * A {@link SynchronizedHistogram} is a variant of {@link Histogram} that is + * synchronized as a whole, such that queries, copying, and addition operations are atomic with relation to + * modification on the {@link SynchronizedHistogram}, and such that external accessors (e.g. iterations on the + * histogram data) that synchronize on the {@link SynchronizedHistogram} instance can safely assume that no + * modifications to the histogram data occur within their synchronized block. + *

+ * It is important to note that synchronization can result in blocking recoding calls. If non-blocking recoding + * operations are required, consider using {@link ConcurrentHistogram}, {@link AtomicHistogram}, or (recommended) + * {@link Recorder} or {@link SingleWriterRecorder} which were intended for concurrent operations. + *

+ * See package description for {@link org.HdrHistogram} and {@link Histogram} for more details. + */ + + +public class SynchronizedHistogram extends Histogram { + + /** + * Construct an auto-resizing SynchronizedHistogram with a lowest discernible value of 1 and an auto-adjusting + * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). + * + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SynchronizedHistogram(final int numberOfSignificantValueDigits) { + this(1, 2, numberOfSignificantValueDigits); + setAutoResize(true); + } + + /** + * Construct a SynchronizedHistogram given the Highest value to be tracked and a number of significant decimal digits. The + * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. + * + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} 2. + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SynchronizedHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { + this(1, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a SynchronizedHistogram given the Lowest and Highest values to be tracked and a number of significant + * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used + * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking + * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the + * proper value for lowestDiscernibleValue would be 1000. + * + * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. + * Must be a positive integer that is {@literal >=} 1. May be internally rounded + * down to nearest power of 2. + * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive + * integer that is {@literal >=} (2 * lowestDiscernibleValue). + * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant + * decimal digits to which the histogram will maintain value resolution + * and separation. Must be a non-negative integer between 0 and 5. + */ + public SynchronizedHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { + super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); + } + + /** + * Construct a histogram with the same range settings as a given source histogram, + * duplicating the source's start/end timestamps (but NOT it's contents) + * @param source The source histogram to duplicate + */ + public SynchronizedHistogram(final AbstractHistogram source) { + super(source); + } + + /** + * Construct a new histogram by decoding it from a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + */ + public static SynchronizedHistogram decodeFromByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) { + return decodeFromByteBuffer(buffer, SynchronizedHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. + * @param buffer The buffer to decode from + * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high + * @return The newly constructed histogram + * @throws DataFormatException on error parsing/decompressing the buffer + */ + public static SynchronizedHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, + final long minBarForHighestTrackableValue) throws DataFormatException { + return decodeFromCompressedByteBuffer(buffer, SynchronizedHistogram.class, minBarForHighestTrackableValue); + } + + /** + * Construct a new SynchronizedHistogram by decoding it from a String containing a base64 encoded + * compressed histogram representation. + * + * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram + * @return A SynchronizedHistogram decoded from the string + * @throws DataFormatException on error parsing/decompressing the input + */ + public static SynchronizedHistogram fromString(final String base64CompressedHistogramString) + throws DataFormatException { + return decodeFromCompressedByteBuffer( + ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), + 0); + } + + @Override + public synchronized long getTotalCount() { + return super.getTotalCount(); + } + + @Override + public synchronized boolean isAutoResize() { + return super.isAutoResize(); + } + + @Override + public synchronized void setAutoResize(boolean autoResize) { + super.setAutoResize(autoResize); + } + + @Override + public synchronized void recordValue(final long value) throws ArrayIndexOutOfBoundsException { + super.recordValue(value); + } + + @Override + public synchronized void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException { + super.recordValueWithCount(value, count); + } + + @Override + public synchronized void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + super.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); + } + + /** + * @deprecated + */ + @SuppressWarnings("deprecation") + @Override + public synchronized void recordValue(final long value, final long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException { + super.recordValue(value, expectedIntervalBetweenValueSamples); + } + + @Override + public synchronized void reset() { + super.reset(); + } + + @Override + public synchronized SynchronizedHistogram copy() { + SynchronizedHistogram toHistogram = new SynchronizedHistogram(this); + toHistogram.add(this); + return toHistogram; + } + + @Override + public synchronized SynchronizedHistogram copyCorrectedForCoordinatedOmission( + final long expectedIntervalBetweenValueSamples) { + SynchronizedHistogram toHistogram = new SynchronizedHistogram(this); + toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); + return toHistogram; + } + + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public void copyInto(final AbstractHistogram targetHistogram) { + // Synchronize copyInto(). Avoid deadlocks by synchronizing in order of construction identity count. + if (identity < targetHistogram.identity) { + synchronized (this) { + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (targetHistogram) { + super.copyInto(targetHistogram); + } + } + } else { + synchronized (targetHistogram) { + synchronized (this) { + super.copyInto(targetHistogram); + } + } + } + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public void copyIntoCorrectedForCoordinatedOmission(final AbstractHistogram targetHistogram, + final long expectedIntervalBetweenValueSamples) { + // Synchronize copyIntoCorrectedForCoordinatedOmission(). Avoid deadlocks by synchronizing in order + // of construction identity count. + if (identity < targetHistogram.identity) { + synchronized (this) { + synchronized (targetHistogram) { + super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); + } + } + } else { + synchronized (targetHistogram) { + synchronized (this) { + super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); + } + } + } + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public void add(final AbstractHistogram otherHistogram) { + // Synchronize add(). Avoid deadlocks by synchronizing in order of construction identity count. + if (identity < otherHistogram.identity) { + synchronized (this) { + synchronized (otherHistogram) { + super.add(otherHistogram); + } + } + } else { + synchronized (otherHistogram) { + synchronized (this) { + super.add(otherHistogram); + } + } + } + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public void subtract(final AbstractHistogram otherHistogram) + throws ArrayIndexOutOfBoundsException, IllegalArgumentException { + // Synchronize subtract(). Avoid deadlocks by synchronizing in order of construction identity count. + if (identity < otherHistogram.identity) { + synchronized (this) { + synchronized (otherHistogram) { + super.subtract(otherHistogram); + } + } + } else { + synchronized (otherHistogram) { + synchronized (this) { + super.subtract(otherHistogram); + } + } + } + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public void addWhileCorrectingForCoordinatedOmission(final AbstractHistogram fromHistogram, + final long expectedIntervalBetweenValueSamples) { + // Synchronize addWhileCorrectingForCoordinatedOmission(). Avoid deadlocks by synchronizing in + // order of construction identity count. + if (identity < fromHistogram.identity) { + synchronized (this) { + synchronized (fromHistogram) { + super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); + } + } + } else { + synchronized (fromHistogram) { + synchronized (this) { + super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); + } + } + } + } + @Override + public synchronized void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude) { + super.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude); + } + + @Override + public synchronized void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude) { + super.shiftValuesRight(numberOfBinaryOrdersOfMagnitude); + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + @Override + public boolean equals(final Object other){ + if ( this == other ) { + return true; + } + if (other instanceof AbstractHistogram) { + AbstractHistogram otherHistogram = (AbstractHistogram) other; + if (identity < otherHistogram.identity) { + synchronized (this) { + synchronized (otherHistogram) { + return super.equals(otherHistogram); + } + } + } else { + synchronized (otherHistogram) { + synchronized (this) { + return super.equals(otherHistogram); + } + } + } + } else { + synchronized (this) { + return super.equals(other); + } + } + } + + @Override + public synchronized int hashCode() { + return super.hashCode(); + } + + @Override + public synchronized long getLowestDiscernibleValue() { + return super.getLowestDiscernibleValue(); + } + + @Override + public synchronized long getHighestTrackableValue() { + return super.getHighestTrackableValue(); + } + + @Override + public synchronized int getNumberOfSignificantValueDigits() { + return super.getNumberOfSignificantValueDigits(); + } + + @Override + public synchronized long sizeOfEquivalentValueRange(final long value) { + return super.sizeOfEquivalentValueRange(value); + } + + @Override + public synchronized long lowestEquivalentValue(final long value) { + return super.lowestEquivalentValue(value); + } + + @Override + public synchronized long highestEquivalentValue(final long value) { + return super.highestEquivalentValue(value); + } + + @Override + public synchronized long medianEquivalentValue(final long value) { + return super.medianEquivalentValue(value); + } + + @Override + public synchronized long nextNonEquivalentValue(final long value) { + return super.nextNonEquivalentValue(value); + } + + @Override + public synchronized boolean valuesAreEquivalent(final long value1, final long value2) { + return super.valuesAreEquivalent(value1, value2); + } + + @Override + public synchronized int getEstimatedFootprintInBytes() { + return super.getEstimatedFootprintInBytes(); + } + + @Override + public synchronized long getStartTimeStamp() { + return super.getStartTimeStamp(); + } + + @Override + public synchronized void setStartTimeStamp(final long timeStampMsec) { + super.setStartTimeStamp(timeStampMsec); + } + + @Override + public synchronized long getEndTimeStamp() { + return super.getEndTimeStamp(); + } + + @Override + public synchronized void setEndTimeStamp(final long timeStampMsec) { + super.setEndTimeStamp(timeStampMsec); + } + + @Override + public synchronized long getMinValue() { + return super.getMinValue(); + } + + @Override + public synchronized long getMaxValue() { + return super.getMaxValue(); + } + + @Override + public synchronized long getMinNonZeroValue() { + return super.getMinNonZeroValue(); + } + + @Override + public synchronized double getMaxValueAsDouble() { + return super.getMaxValueAsDouble(); + } + + @Override + public synchronized double getMean() { + return super.getMean(); + } + + @Override + public synchronized double getStdDeviation() { + return super.getStdDeviation(); + } + + @Override + public synchronized long getValueAtPercentile(final double percentile) { + return super.getValueAtPercentile(percentile); + } + + @Override + public synchronized double getPercentileAtOrBelowValue(final long value) { + return super.getPercentileAtOrBelowValue(value); + } + + @Override + public synchronized long getCountBetweenValues(final long lowValue, final long highValue) throws ArrayIndexOutOfBoundsException { + return super.getCountBetweenValues(lowValue, highValue); + } + + @Override + public synchronized long getCountAtValue(final long value) throws ArrayIndexOutOfBoundsException { + return super.getCountAtValue(value); + } + + @Override + public synchronized Percentiles percentiles(final int percentileTicksPerHalfDistance) { + return super.percentiles(percentileTicksPerHalfDistance); + } + + @Override + public synchronized LinearBucketValues linearBucketValues(final long valueUnitsPerBucket) { + return super.linearBucketValues(valueUnitsPerBucket); + } + + @Override + public synchronized LogarithmicBucketValues logarithmicBucketValues(final long valueUnitsInFirstBucket, final double logBase) { + return super.logarithmicBucketValues(valueUnitsInFirstBucket, logBase); + } + + @Override + public synchronized RecordedValues recordedValues() { + return super.recordedValues(); + } + + @Override + public synchronized AllValues allValues() { + return super.allValues(); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final Double outputValueUnitScalingRatio) { + super.outputPercentileDistribution(printStream, outputValueUnitScalingRatio); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio) { + super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio); + } + + @Override + public synchronized void outputPercentileDistribution(final PrintStream printStream, + final int percentileTicksPerHalfDistance, + final Double outputValueUnitScalingRatio, + final boolean useCsvFormat) { + super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, useCsvFormat); + } + + @Override + public synchronized int getNeededByteBufferCapacity() { + return super.getNeededByteBufferCapacity(); + } + + + @Override + public synchronized int encodeIntoByteBuffer(final ByteBuffer buffer) { + return super.encodeIntoByteBuffer(buffer); + } + + @Override + public synchronized int encodeIntoCompressedByteBuffer( + final ByteBuffer targetBuffer, + final int compressionLevel) { + return super.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel); + } + + @Override + public synchronized int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { + return super.encodeIntoCompressedByteBuffer(targetBuffer); + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + } +} \ No newline at end of file diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ValueRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ValueRecorder.java new file mode 100644 index 000000000..346ab7f32 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ValueRecorder.java @@ -0,0 +1,47 @@ +package io.prometheus.client.HdrHistogram; + +public interface ValueRecorder { + + /** + * Record a value + * + * @param value The value to be recorded + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValue(long value) throws ArrayIndexOutOfBoundsException; + + /** + * Record a value (adding to the value's current count) + * + * @param value The value to be recorded + * @param count The number of occurrences of this value to record + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValueWithCount(long value, long count) throws ArrayIndexOutOfBoundsException; + + /** + * Record a value. + *

+ * To compensate for the loss of sampled values when a recorded value is larger than the expected + * interval between value samples, will auto-generate an additional series of decreasingly-smaller + * (down to the expectedIntervalBetweenValueSamples) value records. + *

+ * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided + * by {@link AbstractHistogram#copyCorrectedForCoordinatedOmission(long)}. + * The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct + * for the same coordinated omission issue. + * + * @param value The value to record + * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add + * auto-generated value records as appropriate if value is larger + * than expectedIntervalBetweenValueSamples + * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range + */ + void recordValueWithExpectedInterval(long value, long expectedIntervalBetweenValueSamples) + throws ArrayIndexOutOfBoundsException; + + /** + * Reset the contents and collected stats + */ + void reset(); +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/WriterReaderPhaser.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/WriterReaderPhaser.java new file mode 100644 index 000000000..b5a9d26ed --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/WriterReaderPhaser.java @@ -0,0 +1,271 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package io.prometheus.client.HdrHistogram; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.locks.ReentrantLock; + +/** + * {@link WriterReaderPhaser} provides an asymmetric means for + * synchronizing the execution of wait-free "writer" critical sections against + * a "reader phase flip" that needs to make sure no writer critical sections + * that were active at the beginning of the flip are still active after the + * flip is done. Multiple writers and multiple readers are supported. + *

+ * Using a {@link WriterReaderPhaser} for coordination, writers can continuously + * perform wait-free/lock-free updates to common data structures, while readers + * can get hold of atomic and inactive snapshots without stalling writers. + *

+ * While a {@link WriterReaderPhaser} can be useful in multiple scenarios, a + * specific and common use case is that of safely managing "double buffered" + * data stream access in which writers can proceed without being blocked, while + * readers gain access to stable and unchanging buffer samples. + * {@link WriterReaderPhaser} "writers" are wait free (on architectures that support + * wait free atomic increment operations), "readers" block for other + * "readers", and "readers" are only blocked by "writers" whose critical section + * was entered before the reader's + * {@link WriterReaderPhaser#flipPhase()} attempt. + *

Assumptions and Guarantees

+ *

+ * When used to protect an actively recording data structure, the assumptions on + * how readers and writers act are: + *

    + *
  1. There are two sets of data structures ("active" and "inactive")
  2. + *
  3. Writing is done to the perceived active version (as perceived by the + * writer), and only within critical sections delineated by + * {@link WriterReaderPhaser#writerCriticalSectionEnter} and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. + *
  4. + *
  5. Only readers switch the perceived roles of the active and inactive data + * structures. They do so only while under {@link WriterReaderPhaser#readerLock()} + * protection and only before calling {@link WriterReaderPhaser#flipPhase()}.
  6. + *
  7. Writers do not remain in their critical sections indefinitely.
  8. + *
  9. Only writers perform {@link WriterReaderPhaser#writerCriticalSectionEnter} + * and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. + *
  10. + *
  11. Readers do not hold onto readerLock indefinitely.
  12. + *
  13. Only readers perform {@link WriterReaderPhaser#readerLock()} and + * {@link WriterReaderPhaser#readerUnlock()}.
  14. + *
  15. Only readers perform {@link WriterReaderPhaser#flipPhase()} operations, + * and only while holding the readerLock.
  16. + *
+ *

+ * When the above assumptions are met, {@link WriterReaderPhaser} guarantees + * that the inactive data structures are not being modified by any writers while + * being read while under readerLock() protection after a + * {@link WriterReaderPhaser#flipPhase()}() operation. + *

+ * The following progress guarantees are provided to writers and readers that + * adhere to the above stated assumptions: + *

    + *
  1. Writers operations + * ({@link WriterReaderPhaser#writerCriticalSectionEnter writerCriticalSectionEnter} + * and {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit}) + * are wait free on architectures that + * support wait-free atomic increment operations (they remain lock-free [but not + * wait-free] on architectures that do not support wait-free atomic increment + * operations)
  2. + *
  3. {@link WriterReaderPhaser#flipPhase()} operations are guaranteed to + * make forward progress, and will only be blocked by writers whose critical sections + * were entered prior to the start of the reader's flipPhase operation, and have not + * yet exited their critical sections.
  4. + *
  5. {@link WriterReaderPhaser#readerLock()} only blocks for other + * readers that are holding the readerLock.
  6. + *
+ * + *

Example use

+ * Imagine a simple use case where a histogram (which is basically a large set of + * rapidly updated counters) is being modified by writers, and a reader needs to gain + * access to stable interval samples of the histogram for reporting or other analysis + * purposes. + *

+ *         final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser();
+ *
+ *         volatile Histogram activeHistogram;
+ *         Histogram inactiveHistogram;
+ *         ...
+ * 
+ * A writer may record values the histogram: + *

+ *         // Wait-free recording:
+ *         long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
+ *         try {
+ *             activeHistogram.recordValue(value);
+ *         } finally {
+ *             recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
+ *         }
+ * 
+ * A reader gains access to a stable histogram of values recorded during an interval, + * and reports on it: + *

+ *         try {
+ *             recordingPhaser.readerLock();
+ *
+ *             inactiveHistogram.reset();
+ *
+ *             // Swap active and inactive histograms:
+ *             final Histogram tempHistogram = inactiveHistogram;
+ *             inactiveHistogram = activeHistogram;
+ *             activeHistogram = tempHistogram;
+ *
+ *             recordingPhaser.flipPhase();
+ *             // At this point, inactiveHistogram content is guaranteed to be stable
+ *
+ *             logHistogram(inactiveHistogram);
+ *
+ *         } finally {
+ *             recordingPhaser.readerUnlock();
+ *         }
+ * 
+ */ +/* + * High level design: There are even and odd epochs; the epoch flips for each + * reader. Any number of writers can be in the same epoch (odd or even), but + * after a completed phase flip no writers will be still in the old epoch + * (and therefore are known to not be updating or observing the old, inactive + * data structure). Writers can always proceed at full speed in what they + * perceive to be the current (odd or even) epoch. The epoch flip is fast (a + * single atomic op). + */ + +public class WriterReaderPhaser { + private volatile long startEpoch = 0; + private volatile long evenEndEpoch = 0; + private volatile long oddEndEpoch = Long.MIN_VALUE; + + private final ReentrantLock readerLock = new ReentrantLock(); + + private static final AtomicLongFieldUpdater startEpochUpdater = + AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "startEpoch"); + private static final AtomicLongFieldUpdater evenEndEpochUpdater = + AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "evenEndEpoch"); + private static final AtomicLongFieldUpdater oddEndEpochUpdater = + AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "oddEndEpoch"); + + /** + * Indicate entry to a critical section containing a write operation. + *

+ * This call is wait-free on architectures that support wait free atomic increment operations, + * and is lock-free on architectures that do not. + *

+ * {@code writerCriticalSectionEnter()} must be matched with a subsequent + * {@link WriterReaderPhaser#writerCriticalSectionExit(long)} in order for CriticalSectionPhaser + * synchronization to function properly. + * + * @return an (opaque) value associated with the critical section entry, which MUST be provided + * to the matching {@link WriterReaderPhaser#writerCriticalSectionExit} call. + */ + public long writerCriticalSectionEnter() { + return startEpochUpdater.getAndIncrement(this); + } + + /** + * Indicate exit from a critical section containing a write operation. + *

+ * This call is wait-free on architectures that support wait free atomic increment operations, + * and is lock-free on architectures that do not. + *

+ * {@code writerCriticalSectionExit(long)} must be matched with a preceding + * {@link WriterReaderPhaser#writerCriticalSectionEnter()} call, and must be provided with the + * matching {@link WriterReaderPhaser#writerCriticalSectionEnter()} call's return value, in + * order for CriticalSectionPhaser synchronization to function properly. + * + * @param criticalValueAtEnter the (opaque) value returned from the matching + * {@link WriterReaderPhaser#writerCriticalSectionEnter()} call. + */ + public void writerCriticalSectionExit(long criticalValueAtEnter) { + (criticalValueAtEnter < 0 ? oddEndEpochUpdater : evenEndEpochUpdater).getAndIncrement(this); + } + + /** + * Enter to a critical section containing a read operation (reentrant, mutually excludes against + * {@link WriterReaderPhaser#readerLock} calls by other threads). + *

+ * {@link WriterReaderPhaser#readerLock} DOES NOT provide synchronization + * against {@link WriterReaderPhaser#writerCriticalSectionEnter()} calls. Use {@link WriterReaderPhaser#flipPhase()} + * to synchronize reads against writers. + */ + public void readerLock() { + readerLock.lock(); + } + + /** + * Exit from a critical section containing a read operation (relinquishes mutual exclusion against other + * {@link WriterReaderPhaser#readerLock} calls). + */ + public void readerUnlock() { + readerLock.unlock(); + } + + /** + * Flip a phase in the {@link WriterReaderPhaser} instance, {@link WriterReaderPhaser#flipPhase()} + * can only be called while holding the {@link WriterReaderPhaser#readerLock() readerLock}. + * {@link WriterReaderPhaser#flipPhase()} will return only after all writer critical sections (protected by + * {@link WriterReaderPhaser#writerCriticalSectionEnter() writerCriticalSectionEnter} and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionEnter}) that may have been + * in flight when the {@link WriterReaderPhaser#flipPhase()} call were made had completed. + *

+ * No actual writer critical section activity is required for {@link WriterReaderPhaser#flipPhase()} to + * succeed. + *

+ * However, {@link WriterReaderPhaser#flipPhase()} is lock-free with respect to calls to + * {@link WriterReaderPhaser#writerCriticalSectionEnter()} and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. It may spin-wait + * or for active writer critical section code to complete. + * + * @param yieldTimeNsec The amount of time (in nanoseconds) to sleep in each yield if yield loop is needed. + */ + public void flipPhase(long yieldTimeNsec) { + if (!readerLock.isHeldByCurrentThread()) { + throw new IllegalStateException("flipPhase() can only be called while holding the readerLock()"); + } + + // Read the volatile 'startEpoch' exactly once + boolean nextPhaseIsEven = (startEpoch < 0); // Current phase is odd... + + // First, clear currently unused [next] phase end epoch (to proper initial value for phase): + long initialStartValue = nextPhaseIsEven ? 0 : Long.MIN_VALUE; + (nextPhaseIsEven ? evenEndEpochUpdater : oddEndEpochUpdater).lazySet(this, initialStartValue); + + // Next, reset start value, indicating new phase, and retain value at flip: + long startValueAtFlip = startEpochUpdater.getAndSet(this, initialStartValue); + + // Now, spin until previous phase end value catches up with start value at flip: + while((nextPhaseIsEven ? oddEndEpoch : evenEndEpoch) != startValueAtFlip) { + if (yieldTimeNsec == 0) { + Thread.yield(); + } else { + try { + TimeUnit.NANOSECONDS.sleep(yieldTimeNsec); + } catch (InterruptedException ex) { + // nothing to do here, we just woke up earlier that expected. + } + } + } + } + + /** + * Flip a phase in the {@link WriterReaderPhaser} instance, {@code flipPhase()} + * can only be called while holding the {@link WriterReaderPhaser#readerLock() readerLock}. + * {@code flipPhase()} will return only after all writer critical sections (protected by + * {@link WriterReaderPhaser#writerCriticalSectionEnter() writerCriticalSectionEnter} and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionEnter}) that may have been + * in flight when the {@code flipPhase()} call were made had completed. + *

+ * No actual writer critical section activity is required for {@code flipPhase()} to + * succeed. + *

+ * However, {@code flipPhase()} is lock-free with respect to calls to + * {@link WriterReaderPhaser#writerCriticalSectionEnter()} and + * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. It may spin-wait + * or for active writer critical section code to complete. + */ + public void flipPhase() { + flipPhase(0); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ZigZagEncoding.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ZigZagEncoding.java new file mode 100644 index 000000000..240e19895 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/ZigZagEncoding.java @@ -0,0 +1,181 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram; + +import java.nio.ByteBuffer; + +/** + * This class provides encoding and decoding methods for writing and reading + * ZigZag-encoded LEB128-64b9B-variant (Little Endian Base 128) values to/from a + * {@link ByteBuffer}. LEB128's variable length encoding provides for using a + * smaller number of bytes for smaller values, and the use of ZigZag encoding + * allows small (closer to zero) negative values to use fewer bytes. Details + * on both LEB128 and ZigZag can be readily found elsewhere. + * + * The LEB128-64b9B-variant encoding used here diverges from the "original" + * LEB128 as it extends to 64 bit values: In the original LEB128, a 64 bit + * value can take up to 10 bytes in the stream, where this variant's encoding + * of a 64 bit values will max out at 9 bytes. + * + * As such, this encoder/decoder should NOT be used for encoding or decoding + * "standard" LEB128 formats (e.g. Google Protocol Buffers). + */ +class ZigZagEncoding { + + /** + * Writes a long value to the given buffer in LEB128 ZigZag encoded format + * @param buffer the buffer to write to + * @param value the value to write to the buffer + */ + static void putLong(ByteBuffer buffer, long value) { + value = (value << 1) ^ (value >> 63); + if (value >>> 7 == 0) { + buffer.put((byte) value); + } else { + buffer.put((byte) ((value & 0x7F) | 0x80)); + if (value >>> 14 == 0) { + buffer.put((byte) (value >>> 7)); + } else { + buffer.put((byte) (value >>> 7 | 0x80)); + if (value >>> 21 == 0) { + buffer.put((byte) (value >>> 14)); + } else { + buffer.put((byte) (value >>> 14 | 0x80)); + if (value >>> 28 == 0) { + buffer.put((byte) (value >>> 21)); + } else { + buffer.put((byte) (value >>> 21 | 0x80)); + if (value >>> 35 == 0) { + buffer.put((byte) (value >>> 28)); + } else { + buffer.put((byte) (value >>> 28 | 0x80)); + if (value >>> 42 == 0) { + buffer.put((byte) (value >>> 35)); + } else { + buffer.put((byte) (value >>> 35 | 0x80)); + if (value >>> 49 == 0) { + buffer.put((byte) (value >>> 42)); + } else { + buffer.put((byte) (value >>> 42 | 0x80)); + if (value >>> 56 == 0) { + buffer.put((byte) (value >>> 49)); + } else { + buffer.put((byte) (value >>> 49 | 0x80)); + buffer.put((byte) (value >>> 56)); + } + } + } + } + } + } + } + } + } + + /** + * Writes an int value to the given buffer in LEB128-64b9B ZigZag encoded format + * @param buffer the buffer to write to + * @param value the value to write to the buffer + */ + static void putInt(ByteBuffer buffer, int value) { + value = (value << 1) ^ (value >> 31); + if (value >>> 7 == 0) { + buffer.put((byte) value); + } else { + buffer.put((byte) ((value & 0x7F) | 0x80)); + if (value >>> 14 == 0) { + buffer.put((byte) (value >>> 7)); + } else { + buffer.put((byte) (value >>> 7 | 0x80)); + if (value >>> 21 == 0) { + buffer.put((byte) (value >>> 14)); + } else { + buffer.put((byte) (value >>> 14 | 0x80)); + if (value >>> 28 == 0) { + buffer.put((byte) (value >>> 21)); + } else { + buffer.put((byte) (value >>> 21 | 0x80)); + buffer.put((byte) (value >>> 28)); + } + } + } + } + } + + /** + * Read an LEB128-64b9B ZigZag encoded long value from the given buffer + * @param buffer the buffer to read from + * @return the value read from the buffer + */ + static long getLong(ByteBuffer buffer) { + long v = buffer.get(); + long value = v & 0x7F; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 7; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 14; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 21; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 28; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 35; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 42; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 49; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= v << 56; + } + } + } + } + } + } + } + } + value = (value >>> 1) ^ (-(value & 1)); + return value; + } + + /** + * Read an LEB128-64b9B ZigZag encoded int value from the given buffer + * @param buffer the buffer to read from + * @return the value read from the buffer + */ + static int getInt (ByteBuffer buffer) { + int v = buffer.get(); + int value = v & 0x7F; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 7; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 14; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 21; + if ((v & 0x80) != 0) { + v = buffer.get(); + value |= (v & 0x7F) << 28; + } + } + } + } + value = (value >>> 1) ^ (-(value & 1)); + return value; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedArrayContext.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedArrayContext.java new file mode 100644 index 000000000..b7f6c3da9 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedArrayContext.java @@ -0,0 +1,1105 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * A packed-value, sparse array context used for storing 64 bit signed values. + *

+ * An array context is optimised for tracking sparsely set (as in mostly zeros) values that tend to not make use of the + * full 64 bit value range even when they are non-zero. The array context's internal representation is such that the + * packed value at each virtual array index may be represented by 0-8 bytes of actual storage. + *

+ * An array context encodes the packed values in 8 "set trees" with each set tree representing one byte of the packed + * value at the virtual index in question. The {@link #getPackedIndex(int, int, boolean)} method is used to look up the + * byte-index corresponding to the given (set tree) value byte of the given virtual index, and can be used to add + * entries to represent that byte as needed. As a successful {@link #getPackedIndex(int, int, boolean)} may require a + * resizing of the array, it can throw a {@link ResizeException} to indicate that the requested packed index cannot be + * found or added without a resize of the physical storage. + */ +abstract class AbstractPackedArrayContext implements Serializable { + /* + * + * The physical representation uses an insert-at-the-end mechanism for adding contents to the array. Any + * insertion will occur at the very end of the array, and any expansion of an element will move it to the end, + * leaving an empty slot behind. + * + * Terminology: + * + * long-word: a 64-bit-aligned 64 bit word + * short-word: a 16-bit-aligned 16 bit word + * byte: an 8-bit-aligned byte + * + * long-index: an index of a 64-bit-aligned word within the overall array (i.e. in multiples of 8 bytes) + * short-index: an index of a 16-bit aligned short within the overall array (i.e. in multiples of 2 bytes) + * byte-index: an index of an 8-bit aligned byte within the overall array (i.e. in multiples of 1 byte) + * + * The storage array stores long (64 bit) words. Lookups for the various sizes are done as such: + * + * long getAtLongIndex(int longIndex) { return array[longIndex]; } + * short getAtShortIndex(int shortIndex) { return (short)((array[shortIndex >> 2] >> (shortIndex & 0x3)) & 0xffff);} + * byte getAtByteIndex(int byteIndex) { return (byte)((array[byteIndex >> 3] >> (byteIndex & 0x7)) & 0xff); } + * + * [Therefore there is no dependence on byte endianness of the underlying architecture] + * + * Structure: + * + * The packed array captures values at virtual indexes in a collection of striped "set trees" (also called "sets"), + * with each set tree representing one byte of the value at the virtual index in question. As such, there are 8 + * sets in the array, each corresponding to a byte in the overall value being stored. Set 0 contains the LSByte + * of the value, and Set 7 contains the MSByte of the value. + * + * The array contents is comprised of there types of entries: + * + * - The root indexes: A fixed size 8 short-words array of short indexes at the start of the array, containing + * the short-index of the root entry of each of the 8 set trees. + * + * - Non-Leaf Entries: Variable sized, 2-18 short-words entries representing non-leaf entries in a set tree. + * Non-Leaf entries comprise of a 2 short-word header containing a packed slot indicators bitmask and the + * (optional non-zero) index of previous version of the entry, followed by an array of 0-16 shortwords. + * The shortword found at a given slot in this array holds an index to an entry in the next level of + * the set tree. + * + * - Leaf Entries: comprised of long-words. Each byte [0-7] in the longword holds an actual value. Specifically, + * the byte-index of that LeafEntry byte in the array is the byte-index for the given set's byte value of a + * virtual index. + * + * If a given virtual index for a given set has no entry in a given set tree, the byte value for that set of + * that virtual index interpreted as 0. If a given set tree does not have an entry for a given virtual index, + * it is safe to assume that no higher significance set tree have one either. + ** + * Non-leaf entries structure and mutation protocols: + * + * The structure of a Non-Leaf entry in the array can be roughly described in terms of this C-style struct: + * + * struct nonLeafEntry { + * short packedSlotIndicators; + * short previousVersionIndex; + * short[] entrySlotsIndexes; + * } + * + * Non-leaf entries are 2-18 short-words in length, with the length determined by the number of bits set in + * the packedSlotIndicators short-word in the entry. The packed slot indicators short-word is a bit mask which + * represents the 16 possible next-level entries below the given entry, and has a bit set (to '1') for each slot + * that is actually populated with a next level entry. Each of the short-words in the entrySlots is + * associated with a specific active ('1') bit in the packedSlotIndicators short-word, and holds the index + * to the next level's entry associated with ta given path in the tree. [Note: the values in entrySlotsIndexes[] + * are short-indexes if the next level is not a leaf level, and long-indexes if the next level is + * a leaf.] + * + * Summary of Non-leaf entry use and replacement protocol: + * + * - No value in any entrySlotsIndexes[] array is ever initialized to a zero value. Zero values in + * entrySlotsIndexes[] can only appear through consolidation (see below). Once an entrySlotsIndexes[] + * slot is observed to contain a zero, it cannot change to a non-zero value. + * + * - Zero values encountered in entrySlotsIndexes[] arrays are never followed. If a zero value is found + * when looking for the index to a lower level entry during a tree walk, the tree walking operation is + * restarted from the root. + * + * - A Non-Leaf entry with an active (non zero index) previous version is never followed or expanded. + * Instead, any thread encountering a Non-leaf entry with an active previous version will consolidate + * the previous version with the current one. the consolidation operation will clear (zero) the + * previousVersionIndex, which will then allow the caller to continue with whatever use the thread was + * attempting to make of the entry. + * + * - Expansion of entries: Since entries hold only enough storage to represent currently populated paths + * below them in the set tree, any addition of entries at a lower level requires the expansion of the entry + * to make room for a larger entrySlotsIndexes array. The expansion of an entry in order to add a new + * next-level entry under follows the following steps: + * + * - Allocate a new and larger entry structure (initializes all slots to -1) + * + * - Populate the newly inserted slot with an index to a newly allocated next-level entry + * + * - Link the newly expanded entry to the previous entry structure via the previousVersionIndex field + * + * - Publish the newly expanded entry by [atomically] replacing the "pointer index" to the previous + * entry (located at a higher level entry's slot, or in the root indexes) with a "pointer index" to + * the newly expanded entry structure + * + * A failure to atomically publish a newly expanded entry (e.g. if the "pointer index" being replaced + * holds a value other than that in our not-yet-published previousVersionIndex) will restart the expansion + * operation from the beginning. + * + * When first published, a newly-visible expanded entry is not immediately "usable" because it has an + * active, "not yet consolidated" previous version entry, and any user of the entry will first have to + * consolidate it. The expansion will follow publication of the expanded entry with a consolidation of + * the previous entry into the new one, clearing the previousVersionIndex field in the process, and + * enabling normal use of the expanded entry. + * + * - Concurrent consolidation: While expansion and consolidation are ongoing, other threads can be + * concurrently walking the set trees. Per the protocol stated here, any tree walk encountering a Non-Leaf + * entry with an active previous version will consolidate the entry before using it. Consolidation can + * of a given entry can occur concurrently by an an expanding thread and by multiple walking threads. + * + * - Consolidation of a a previous version entry into a current one is done by: + * + * - For each non-zero index in the previous version entry, copy that index to the new associated + * entry slot in the entry, and CAS a zero in the old entry slot. If the CAS fails, repeat (including + * the zero check). + * + * - Once all entry slots in the previous version entry have been consolidated and zeroed, zero + * the index to the previous version entry. + */ + + private static final int PACKED_ARRAY_GROWTH_INCREMENT = 16; + private static final int PACKED_ARRAY_GROWTH_FRACTION_POW2 = 4; + private static final int SET_0_START_INDEX = 0; + private static final int NUMBER_OF_SETS = 8; + private static final int LEAF_LEVEL_SHIFT = 3; + private static final int NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS = 2; + private static final int NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET = 0; + private static final int NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET = 1; + + static final int MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY = 16; + static final int MAX_SUPPORTED_PACKED_COUNTS_ARRAY_LENGTH = (Short.MAX_VALUE / 4); + + private final boolean isPacked; + private int physicalLength; + private int virtualLength = 0; + private int topLevelShift = Integer.MAX_VALUE; // Make it nonsensical until properly initialized. + + AbstractPackedArrayContext(final int virtualLength, final int initialPhysicalLength) { + physicalLength = Math.max(initialPhysicalLength, MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY); + isPacked = (physicalLength <= AbstractPackedArrayContext.MAX_SUPPORTED_PACKED_COUNTS_ARRAY_LENGTH); + if (!isPacked) { + physicalLength = virtualLength; + } + } + + void init(final int virtualLength) { + if (!isPacked()) { + // Deal with non-packed context init: + this.virtualLength = virtualLength; + return; + } + // room for the 8 shorts root indexes: + boolean success; + do { + success = casPopulatedShortLength(getPopulatedShortLength(), SET_0_START_INDEX + 8); + } while (!success); + + // Populate empty root entries, and point to them from the root indexes: + for (int i = 0; i < NUMBER_OF_SETS; i++) { + setAtShortIndex(SET_0_START_INDEX + i, (short) 0); + } + setVirtualLength(virtualLength); + } + + // + // ### ######## ###### ######## ######## ### ###### ######## ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ######## ###### ## ######## ## ## ## ## ###### + // ######### ## ## ## ## ## ## ######### ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ######## ###### ## ## ## ## ## ###### ## ###### + // + + + abstract int length(); + + abstract int getPopulatedShortLength(); + + abstract boolean casPopulatedShortLength(int expectedPopulatedShortLength, int newPopulatedShortLength); + + abstract boolean casPopulatedLongLength(int expectedPopulatedShortLength, int newPopulatedShortLength); + + abstract long getAtLongIndex(int longIndex); + + abstract boolean casAtLongIndex(int longIndex, long expectedValue, long newValue); + + abstract void lazySetAtLongIndex(int longIndex, long newValue); + + abstract void clearContents(); + + abstract void resizeArray(int newLength); + + abstract long getAtUnpackedIndex(int index); + + abstract void setAtUnpackedIndex(int index, long newValue); + + abstract void lazySetAtUnpackedIndex(int index, long newValue); + + abstract long incrementAndGetAtUnpackedIndex(int index); + + abstract long addAndGetAtUnpackedIndex(int index, long valueToAdd); + + abstract String unpackedToString(); + + // + // ######## ######## #### ## ## #### ######## #### ## ## ######## ####### ######## ###### + // ## ## ## ## ## ### ### ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## ## + // ######## ######## ## ## ### ## ## ## ## ## ## ###### ## ## ######## ###### + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## #### ## ## #### ## #### ### ######## ####### ## ###### + // + + void setValuePart(final int longIndex, + final long valuePartAsLong, + final long valuePartMask, + final int valuePartShift) { + boolean success; + do { + long currentLongValue = getAtLongIndex(longIndex); + long newLongValue = (currentLongValue & ~valuePartMask) | (valuePartAsLong << valuePartShift); + success = casAtLongIndex(longIndex, currentLongValue, newLongValue); + } + while (!success); + } + + short getAtShortIndex(final int shortIndex) { + return (short) ((getAtLongIndex(shortIndex >> 2) >> ((shortIndex & 0x3) << 4)) & 0xffff); + } + + short getIndexAtShortIndex(final int shortIndex) { + return (short) ((getAtLongIndex(shortIndex >> 2) >> ((shortIndex & 0x3) << 4)) & 0x7fff); + } + + void setAtShortIndex(final int shortIndex, final short value) { + int longIndex = shortIndex >> 2; + int shortShift = (shortIndex & 0x3) << 4; + long shortMask = ((long) 0xffff) << shortShift; + long shortValueAsLong = ((long) value) & 0xffff; + setValuePart(longIndex, shortValueAsLong, shortMask, shortShift); + } + + boolean casAtShortIndex(final int shortIndex, final short expectedValue, final short newValue) { + int longIndex = shortIndex >> 2; + int shortShift = (shortIndex & 0x3) << 4; + long shortMask = ~(((long) 0xffff) << shortShift); + long newShortValueAsLong = ((long) newValue) & 0xffff; + long expectedShortValueAsLong = ((long) expectedValue) & 0xffff; + boolean success; + do { + long currentLongValue = getAtLongIndex(longIndex); + long currentShortValueAsLong = (currentLongValue >> shortShift) & 0xffff; + if (currentShortValueAsLong != expectedShortValueAsLong) { + return false; + } + long newLongValue = (currentLongValue & shortMask) | (newShortValueAsLong << shortShift); + success = casAtLongIndex(longIndex, currentLongValue, newLongValue); + } + while (!success); + return true; + } + + byte getAtByteIndex(final int byteIndex) { + return (byte) ((getAtLongIndex(byteIndex >> 3) >> ((byteIndex & 0x7) << 3)) & 0xff); + } + + void setAtByteIndex(final int byteIndex, final byte value) { + int longIndex = byteIndex >> 3; + int byteShift = (byteIndex & 0x7) << 3; + long byteMask = ((long) 0xff) << byteShift; + long byteValueAsLong = ((long) value) & 0xff; + setValuePart(longIndex, byteValueAsLong, byteMask, byteShift); + } + + /** + * add a byte value to a current byte value in the array + * + * @param byteIndex index of byte value to add to + * @param valueToAdd byte value to add + * @return the afterAddValue. ((afterAddValue & 0x100) != 0) indicates a carry. + */ + long addAtByteIndex(final int byteIndex, final byte valueToAdd) { + int longIndex = byteIndex >> 3; + int byteShift = (byteIndex & 0x7) << 3; + long byteMask = ((long) 0xff) << byteShift; + boolean success; + long newValue; + do { + long currentLongValue = getAtLongIndex(longIndex); + long byteValueAsLong = (currentLongValue >> byteShift) & 0xff; + newValue = byteValueAsLong + (((long) valueToAdd) & 0xff); + long newByteValueAsLong = newValue & 0xff; + long newLongValue = (currentLongValue & ~byteMask) | (newByteValueAsLong << byteShift); + success = casAtLongIndex(longIndex, currentLongValue, newLongValue); + } + while (!success); + return newValue; + } + + // + // ######## ## ## ######## ######## ## ## ######## #### ######## ## ######## ###### + // ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## #### ## ## ## ## #### ## ## ## ## ## ## ## + // ###### ## ## ## ## ######## ## ###### ## ###### ## ## ## ###### + // ## ## #### ## ## ## ## ## ## ## ## ## ## ## + // ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## + // ######## ## ## ## ## ## ## ## #### ######## ######## ######## ###### + // + + private int getPackedSlotIndicators(final int entryIndex) { + return ((int) getAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET)) & 0xffff; + } + + private void setPackedSlotIndicators(final int entryIndex, final short newPackedSlotIndicators) { + setAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET, newPackedSlotIndicators); + } + + private short getPreviousVersionIndex(final int entryIndex) { + return getAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET); + } + + private void setPreviousVersionIndex(final int entryIndex, final short newPreviousVersionIndex) { + setAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET, newPreviousVersionIndex); + } + + private short getIndexAtEntrySlot(final int entryIndex, final int slot) { + return getAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot); + } + + private void setIndexAtEntrySlot(final int entryIndex, final int slot, final short newIndexValue) { + setAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot, newIndexValue); + } + + private boolean casIndexAtEntrySlot(final int entryIndex, + final int slot, + final short expectedIndexValue, + final short newIndexValue) { + return casAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot, + expectedIndexValue, newIndexValue); + } + + private boolean casIndexAtEntrySlotIfNonZeroAndLessThan(final int entryIndex, + final int slot, + final short newIndexValue) { + boolean success; + do { + short existingIndexValue = getIndexAtEntrySlot(entryIndex, slot); + if (existingIndexValue == 0) return false; + if (newIndexValue <= existingIndexValue) return false; + success = casIndexAtEntrySlot(entryIndex, slot, existingIndexValue, newIndexValue); + } while (!success); + return true; + } + + // + // ######## ## ## ######## ######## ## ## ####### ######## ###### + // ## ### ## ## ## ## ## ## ## ## ## ## ## ## + // ## #### ## ## ## ## #### ## ## ## ## ## + // ###### ## ## ## ## ######## ## ## ## ######## ###### + // ## ## #### ## ## ## ## ## ## ## ## + // ## ## ### ## ## ## ## ## ## ## ## ## + // ######## ## ## ## ## ## ## ####### ## ###### + // + + private void expandArrayIfNeeded(final int entryLengthInLongs) throws ResizeException { + final int currentLength = length(); + if (length() < getPopulatedLongLength() + entryLengthInLongs) { + int growthIncrement = Math.max(entryLengthInLongs, PACKED_ARRAY_GROWTH_INCREMENT); + growthIncrement = Math.max(growthIncrement, getPopulatedLongLength() >> PACKED_ARRAY_GROWTH_FRACTION_POW2); + throw new ResizeException(currentLength + growthIncrement); + } + } + + private int newEntry(final int entryLengthInShorts) throws ResizeException { + // Add entry at the end of the array: + int newEntryIndex; + boolean success; + do { + newEntryIndex = getPopulatedShortLength(); + expandArrayIfNeeded((entryLengthInShorts >> 2) + 1); + success = casPopulatedShortLength(newEntryIndex, (newEntryIndex + entryLengthInShorts)); + } while (!success); + + for (int i = 0; i < entryLengthInShorts; i++) { + setAtShortIndex(newEntryIndex + i, (short) -1); // Poison value -1. Must be overridden before read + } + return newEntryIndex; + } + + private int newLeafEntry() throws ResizeException { + // Add entry at the end of the array: + int newEntryIndex; + boolean success; + do { + newEntryIndex = getPopulatedLongLength(); + expandArrayIfNeeded(1); + success = casPopulatedLongLength(newEntryIndex, (newEntryIndex + 1)); + } while (!success); + + lazySetAtLongIndex(newEntryIndex, 0); + return newEntryIndex; + } + + /** + * Consolidate entry with previous entry version if one exists + * + * @param entryIndex The shortIndex of the entry to be consolidated + */ + private void consolidateEntry(final int entryIndex) { + int previousVersionIndex = getPreviousVersionIndex(entryIndex); + if (previousVersionIndex == 0) return; + if (getPreviousVersionIndex(previousVersionIndex) != 0) { + throw new IllegalStateException("Encountered Previous Version Entry that is not itself consolidated."); + } + + int previousVersionPackedSlotsIndicators = getPackedSlotIndicators(previousVersionIndex); + // Previous version exists, needs consolidation + + int packedSlotsIndicators = getPackedSlotIndicators(entryIndex); + int insertedSlotMask = packedSlotsIndicators ^ previousVersionPackedSlotsIndicators; // only bit that differs + int slotsBelowBitNumber = packedSlotsIndicators & (insertedSlotMask - 1); + int insertedSlotIndex = Integer.bitCount(slotsBelowBitNumber); + int numberOfSlotsInEntry = Integer.bitCount(packedSlotsIndicators); + + // Copy the entry slots from previous version, skipping the newly inserted slot in the target: + int sourceSlot = 0; + for (int targetSlot = 0; targetSlot < numberOfSlotsInEntry; targetSlot++) { + if (targetSlot != insertedSlotIndex) { + boolean success = true; + do { + short indexAtSlot = getIndexAtEntrySlot(previousVersionIndex, sourceSlot); + if (indexAtSlot != 0) { + // Copy observed index at slot to current entry + // (only copy value in if previous value is less than new one AND is non-zero) + casIndexAtEntrySlotIfNonZeroAndLessThan(entryIndex, targetSlot, indexAtSlot); + + // CAS the previous version slot to 0. + // (Succeeds only if the index in that slot has not changed. Retry if it did). + success = casIndexAtEntrySlot(previousVersionIndex, sourceSlot, indexAtSlot, (short) 0); + } + } + while (!success); + sourceSlot++; + } + } + + setPreviousVersionIndex(entryIndex, (short) 0); + } + + /** + * Expand entry as indicated. + * + * @param existingEntryIndex the index of the entry + * @param entryPointerIndex index to the slot pointing to the entry (needs to be fixed up) + * @param insertedSlotIndex relative [packed] index of slot being inserted into entry + * @param insertedSlotMask mask value fo slot being inserted + * @param nextLevelIsLeaf the level below this one is a leaf level + * @return the updated index of the entry (-1 if expansion failed due to conflict) + * @throws RetryException if expansion fails due to concurrent conflict, and caller should try again. + */ + private int expandEntry(final int existingEntryIndex, + final int entryPointerIndex, + final int insertedSlotIndex, + final int insertedSlotMask, + final boolean nextLevelIsLeaf) throws RetryException, ResizeException { + int packedSlotIndicators = ((int) getAtShortIndex(existingEntryIndex)) & 0xffff; + packedSlotIndicators |= insertedSlotMask; + int numberOfSlotsInExpandedEntry = Integer.bitCount(packedSlotIndicators); + if (insertedSlotIndex >= numberOfSlotsInExpandedEntry) { + throw new IllegalStateException("inserted slot index is out of range given provided masks"); + } + int expandedEntryLength = numberOfSlotsInExpandedEntry + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS; + + // Create new next-level entry to refer to from slot at this level: + int indexOfNewNextLevelEntry = 0; + if (nextLevelIsLeaf) { + indexOfNewNextLevelEntry = newLeafEntry(); // Establish long-index to new leaf entry + } else { + // TODO: Optimize this by creating the whole sub-tree here, rather than a step that will immediately expand + // Create a new 1 word (empty, no slots set) entry for the next level: + indexOfNewNextLevelEntry = newEntry(NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS); // Establish index to new entry + setPackedSlotIndicators(indexOfNewNextLevelEntry, (short) 0); + setPreviousVersionIndex(indexOfNewNextLevelEntry, (short) 0); + } + short insertedSlotValue = (short) indexOfNewNextLevelEntry; + + int expandedEntryIndex = newEntry(expandedEntryLength); + + // populate the packed indicators word: + setPackedSlotIndicators(expandedEntryIndex, (short) packedSlotIndicators); + setPreviousVersionIndex(expandedEntryIndex, (short) existingEntryIndex); + + // Populate the inserted slot with the index of the new next level entry: + setIndexAtEntrySlot(expandedEntryIndex, insertedSlotIndex, insertedSlotValue); + + // Copy of previous version entries is deferred to later consolidateEntry() call. + + // Set the pointer to the updated entry index. If CAS fails, discard by throwing retry exception. + boolean success = casAtShortIndex(entryPointerIndex, (short) existingEntryIndex, (short) expandedEntryIndex); + if (!success) { + throw new RetryException(); + } + + // Expanded entry is published, now consolidate it: + + consolidateEntry(expandedEntryIndex); + + return expandedEntryIndex; + + } + + + // + // ###### ######## ######## ## ## ### ## ## #### ## ## ######## ######## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ## + // ## #### ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ###### ### + // ## ## ## ## ## ## ######### ## ## ## ## #### ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## + // ###### ######## ## ### ## ## ######## ## #### ## ## ######## ######## ## ## + // + + + private int getRootEntry(final int setNumber) { + try { + return getRootEntry(setNumber, false); + } catch (RetryException ex) { + throw new IllegalStateException("Should not Resize or Retry exceptions on real-only read: ", ex); + } catch (ResizeException ex) { + throw new IllegalStateException("Should not Resize or Retry exceptions on real-only read: ", ex); + } + + } + + private int getRootEntry(final int setNumber, boolean insertAsNeeded) throws RetryException, ResizeException { + int entryPointerIndex = SET_0_START_INDEX + setNumber; + int entryIndex = getIndexAtShortIndex(entryPointerIndex); + + if (entryIndex == 0) { + if (!insertAsNeeded) { + return 0; // Index does not currently exist in packed array; + } + + entryIndex = newEntry(NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS); + // Create a new empty (no slots set) entry for the next level: + setPackedSlotIndicators(entryIndex, (short) 0); + setPreviousVersionIndex(entryIndex, (short) 0); + + boolean success = casAtShortIndex(entryPointerIndex, (short) 0, (short) entryIndex); + if (!success) { + throw new RetryException(); + } + } + + if (((getTopLevelShift() != LEAF_LEVEL_SHIFT)) && getPreviousVersionIndex(entryIndex) != 0) { + consolidateEntry(entryIndex); + } + return entryIndex; + } + + /** + * Get the byte-index (into the packed array) corresponding to a given (set tree) value byte of given virtual index. + * Inserts new set tree nodes as needed if indicated. + * + * @param setNumber The set tree number (0-7, 0 corresponding with the LSByte set tree) + * @param virtualIndex The virtual index into the PackedArray + * @param insertAsNeeded If true, will insert new set tree nodes as needed if they do not already exist + * @return the byte-index corresponding to the given (set tree) value byte of the given virtual index + */ + int getPackedIndex(final int setNumber, final int virtualIndex, final boolean insertAsNeeded) + throws ResizeException { + int byteIndex = 0; // Must be overwritten to finish. Will retry until non-zero. + do { + try { + assert (setNumber >= 0 && setNumber < NUMBER_OF_SETS); + if (virtualIndex >= getVirtualLength()) { + throw new ArrayIndexOutOfBoundsException( + String.format("Attempting access at index %d, beyond virtualLength %d", + virtualIndex, getVirtualLength())); + } + int entryPointerIndex = SET_0_START_INDEX + setNumber; + int entryIndex = getRootEntry(setNumber, insertAsNeeded); + if (entryIndex == 0) { + return -1; // Index does not currently exist in packed array; + } + + // Work down the levels of non-leaf entries: + for (int indexShift = getTopLevelShift(); indexShift >= LEAF_LEVEL_SHIFT; indexShift -= 4) { + boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT); + // Target is a packedSlotIndicators entry + int packedSlotIndicators = getPackedSlotIndicators(entryIndex); + int slotBitNumber = (virtualIndex >>> indexShift) & 0xf; + int slotMask = 1 << slotBitNumber; + int slotsBelowBitNumber = packedSlotIndicators & (slotMask - 1); + int slotNumber = Integer.bitCount(slotsBelowBitNumber); + + if ((packedSlotIndicators & slotMask) == 0) { + // The entryIndex slot does not have the contents we want + if (!insertAsNeeded) { + return -1; // Index does not currently exist in packed array; + } + + // Expand the entry, adding the index to new entry at the proper slot: + entryIndex = expandEntry(entryIndex, entryPointerIndex, slotNumber, slotMask, nextLevelIsLeaf); + } + + // Next level's entry pointer index is in the appropriate slot in the entries array in this entry: + entryPointerIndex = entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slotNumber; + + entryIndex = getIndexAtShortIndex(entryPointerIndex); + if (entryIndex == 0) { + throw new RetryException(); + } + if ((!nextLevelIsLeaf) && getPreviousVersionIndex(entryIndex) != 0) { + consolidateEntry(entryIndex); + } + + // entryIndex is either holds the long-index of a leaf entry, or the shorty-index of the next + // level entry's packed slot indicators short-word. + } + + // entryIndex is the long-index of a leaf entry that contains the value byte for the given set + + byteIndex = (entryIndex << 3) + (virtualIndex & 0x7); // Determine byte index offset within leaf entry + + } catch (RetryException ignored) { + // Retry will happen automatically since byteIndex was not set to non-zero value; + } + } + while (byteIndex == 0); + + return byteIndex; + } + + private long contextLocalGetValueAtIndex(final int virtualIndex) { + long value = 0; + for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum++) { + int packedIndex = 0; + long byteValueAtPackedIndex; + do { + try { + packedIndex = getPackedIndex(byteNum, virtualIndex, false); + if (packedIndex < 0) { + return value; + } + byteValueAtPackedIndex = (((long) getAtByteIndex(packedIndex)) & 0xff) << (byteNum << 3); + } catch (ResizeException ex) { + throw new IllegalStateException("Should never encounter a resize exception without inserts"); + } + } while (packedIndex == 0); + + value += byteValueAtPackedIndex; + } + return value; + } + + // + // ## ## ######## ####### ######## ## ## ## ### ######## ######## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ### ####### ######## ## ## ######## ## ## ## ## ## ## ###### + // ## ## ## ## ## ## ## ## ## ######### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ####### ## ####### ######## ## ## ## ######## + // + + + void populateEquivalentEntriesWithZerosFromOther(final AbstractPackedArrayContext other) { + if (getVirtualLength() < other.getVirtualLength()) { + throw new IllegalStateException("Cannot populate array of smaller virtual length"); + } + for (int i = 0; i < NUMBER_OF_SETS; i++) { + int otherEntryIndex = other.getAtShortIndex(SET_0_START_INDEX + i); + if (otherEntryIndex == 0) continue; // No tree to duplicate + int entryIndexPointer = SET_0_START_INDEX + i; + for (int j = getTopLevelShift(); j > other.getTopLevelShift(); j -= 4) { + // for each inserted level: + + // Allocate entry in other: + int sizeOfEntry = NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + 1; + int newEntryIndex = 0; + do { + try { + newEntryIndex = newEntry(sizeOfEntry); + } catch (ResizeException ex) { + resizeArray(ex.getNewSize()); + } + } + while (newEntryIndex == 0); + + // Link new level in. + setAtShortIndex(entryIndexPointer, (short) newEntryIndex); + // Populate new level entry, use pointer to slot 0 as place to populate under: + setPackedSlotIndicators(newEntryIndex, (short) 0x1); // Slot 0 populated + setPreviousVersionIndex(newEntryIndex, (short) 0); // No previous version + entryIndexPointer = newEntryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS; // Where slot 0 index goes. + } + copyEntriesAtLevelFromOther(other, otherEntryIndex, + entryIndexPointer, other.getTopLevelShift()); + } + } + + private void copyEntriesAtLevelFromOther(final AbstractPackedArrayContext other, + final int otherLevelEntryIndex, + final int levelEntryIndexPointer, + final int otherIndexShift) { + boolean nextLevelIsLeaf = (otherIndexShift == LEAF_LEVEL_SHIFT); + int packedSlotIndicators = other.getPackedSlotIndicators(otherLevelEntryIndex); + int numberOfSlots = Integer.bitCount(packedSlotIndicators); + int sizeOfEntry = NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + numberOfSlots; + + // Allocate entry: + int entryIndex = 0; + do { + try { + entryIndex = newEntry(sizeOfEntry); + } catch (ResizeException ex) { + resizeArray(ex.getNewSize()); + } + } + while (entryIndex == 0); + + setAtShortIndex(levelEntryIndexPointer, (short) entryIndex); + setAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET, (short) packedSlotIndicators); + setAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET, (short) 0); + for (int i = 0; i < numberOfSlots; i++) { + if (nextLevelIsLeaf) { + // Make leaf in other: + int leafEntryIndex = 0; + do { + try { + leafEntryIndex = newLeafEntry(); + } catch (ResizeException ex) { + resizeArray(ex.getNewSize()); + } + } + while (leafEntryIndex == 0); + setIndexAtEntrySlot(entryIndex, i, (short) leafEntryIndex); + lazySetAtLongIndex(leafEntryIndex, 0); + } else { + int otherNextLevelEntryIndex = other.getIndexAtEntrySlot(otherLevelEntryIndex, i); + copyEntriesAtLevelFromOther(other, otherNextLevelEntryIndex, + (entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + i), + otherIndexShift - 4); + } + } + } + + // + // #### ######## ######## ######## ### ######## #### ####### ## ## + // ## ## ## ## ## ## ## ## ## ## ## ### ## + // ## ## ## ## ## ## ## ## ## ## ## #### ## + // ## ## ###### ######## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ######### ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## ## ## ## ### + // #### ## ######## ## ## ## ## ## #### ####### ## ## + // + + // Recorded Value iteration: + + private int seekToPopulatedVirtualIndexStartingAtLevel(final int startingVirtualIndex, + final int levelEntryIndex, + final int indexShift) throws RetryException { + int virtualIndex = startingVirtualIndex; + int firstVirtualIndexPastThisLevel = (((virtualIndex >>> indexShift) | 0xf) + 1) << indexShift; + boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT); + do { + // Target is a packedSlotIndicators entry + int packedSlotIndicators = getPackedSlotIndicators(levelEntryIndex); + int startingSlotBitNumber = (virtualIndex >>> indexShift) & 0xf; + int slotMask = 1 << startingSlotBitNumber; + int slotsAtAndAboveBitNumber = packedSlotIndicators & ~(slotMask - 1); + int nextActiveSlotBitNumber = Integer.numberOfTrailingZeros(slotsAtAndAboveBitNumber); + + + if (nextActiveSlotBitNumber > 15) { + // this level has no more set bits, pop back up a level. + int indexShiftAbove = indexShift + 4; + virtualIndex += 1 << indexShiftAbove; + virtualIndex &= ~((1 << indexShiftAbove) - 1); // Start at the beginning of the next slot a level above. + return -virtualIndex; // Negative value indicates a skip to a different index. + } + + // Drill into bit. + if (nextActiveSlotBitNumber != startingSlotBitNumber) { + virtualIndex += (nextActiveSlotBitNumber - startingSlotBitNumber) << indexShift; + virtualIndex &= ~((1 << indexShift) - 1); // Start at the beginning of the next slot of this level + } + + if (nextLevelIsLeaf) { + // There is recorded value here. No need to look. + return virtualIndex; + } + + // Next level is not a leaf. Drill into it: + + int nextSlotMask = 1 << nextActiveSlotBitNumber; + int slotsBelowNextBitNumber = packedSlotIndicators & (nextSlotMask - 1); + int nextSlotNumber = Integer.bitCount(slotsBelowNextBitNumber); + + if ((packedSlotIndicators & nextSlotMask) == 0) { + throw new IllegalStateException("Unexpected 0 at slot index"); + } + + int entryPointerIndex = levelEntryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + nextSlotNumber; + int nextLevelEntryIndex = getIndexAtShortIndex(entryPointerIndex); + if (nextLevelEntryIndex == 0) { + throw new RetryException(); + } + if (getPreviousVersionIndex(nextLevelEntryIndex) != 0) { + consolidateEntry(nextLevelEntryIndex); + } + + virtualIndex = + seekToPopulatedVirtualIndexStartingAtLevel(virtualIndex, nextLevelEntryIndex, indexShift - 4); + if (virtualIndex < 0) { + virtualIndex = -virtualIndex; + } else { + return virtualIndex; + } + } while (virtualIndex < firstVirtualIndexPastThisLevel); + + return virtualIndex; + } + + private int findFirstPotentiallyPopulatedVirtualIndexStartingAt(final int startingVirtualIndex) { + int nextVirtualIndex = -1; + // Look for a populated virtual index in set 0: + boolean retry; + do { + retry = false; + try { + int entryIndex = getRootEntry(0); + if (entryIndex == 0) return getVirtualLength(); // Nothing under the root + nextVirtualIndex = + seekToPopulatedVirtualIndexStartingAtLevel(startingVirtualIndex, entryIndex, + getTopLevelShift()); + } catch (RetryException ex) { + retry = true; + } + } while (retry); + + // Don't drill to value if out of range: + if ((nextVirtualIndex < 0) || (nextVirtualIndex >= getVirtualLength())) { + return getVirtualLength(); + } + + return nextVirtualIndex; + } + + // Recorded values iteration: + + class NonZeroValuesIterator implements Iterator { + + int nextVirtualIndex = 0; + long nextValue; + + final IterationValue currentIterationValue = new IterationValue(); + + private void findFirstNonZeroValueVirtualIndexStartingAt(final int startingVirtualIndex) { + if (!isPacked()) { + // Look for non-zero value in unpacked context: + for (nextVirtualIndex = startingVirtualIndex; + nextVirtualIndex < getVirtualLength(); + nextVirtualIndex++) { + if ((nextValue = getAtUnpackedIndex(nextVirtualIndex)) != 0) { + return; + } + } + return; + } + // Context is packed: + nextVirtualIndex = startingVirtualIndex; + do { + nextVirtualIndex = findFirstPotentiallyPopulatedVirtualIndexStartingAt(nextVirtualIndex); + if (nextVirtualIndex >= getVirtualLength()) break; + if ((nextValue = contextLocalGetValueAtIndex(nextVirtualIndex)) != 0) break; + nextVirtualIndex++; + } while (true); + } + + @Override + public IterationValue next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + currentIterationValue.set(nextVirtualIndex, nextValue); + findFirstNonZeroValueVirtualIndexStartingAt(nextVirtualIndex + 1); + return currentIterationValue; + } + + @Override + public boolean hasNext() { + return ((nextVirtualIndex >= 0) && + (nextVirtualIndex < getVirtualLength())); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + NonZeroValuesIterator() { + findFirstNonZeroValueVirtualIndexStartingAt(0); + } + } + + /** + * An Iterator over all non-Zero values in the array + * + * @return an Iterator over all non-Zero values in the array + */ + Iterable nonZeroValues() { + return new Iterable() { + public Iterator iterator() { + return new NonZeroValuesIterator(); + } + }; + } + + // + // ###### #### ######## ######## #### ###### ## ## #### ######## ######## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## #### ## ## ## ## ## ## + // ###### ## ## ###### #### ###### ######### ## ###### ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## + // ## ## ## ## ## ## ## ## ## ## ## ## ## ## + // ###### #### ######## ######## #### ## ###### ## ## #### ## ## + // + + boolean isPacked() { + return isPacked; + } + + int getPhysicalLength() { + return physicalLength; + } + + int getVirtualLength() { + return virtualLength; + } + + int determineTopLevelShiftForVirtualLength(final int virtualLength) { + int sizeMagnitude = (int) Math.ceil(Math.log(virtualLength) / Math.log(2)); + int eightsSizeMagnitude = sizeMagnitude - 3; + int multipleOfFourSizeMagnitude = (int) Math.ceil(eightsSizeMagnitude / 4.0) * 4; + multipleOfFourSizeMagnitude = Math.max(multipleOfFourSizeMagnitude, 8); + int topLevelShiftNeeded = (multipleOfFourSizeMagnitude - 4) + 3; + return topLevelShiftNeeded; + } + + void setVirtualLength(final int virtualLength) { + if (!isPacked()) { + throw new IllegalStateException("Should never be adjusting the virtual size of a non-packed context"); + } + int newTopLevelShift = determineTopLevelShiftForVirtualLength(virtualLength); + setTopLevelShift(newTopLevelShift); + this.virtualLength = virtualLength; + } + + int getTopLevelShift() { + return topLevelShift; + } + + private void setTopLevelShift(final int topLevelShift) { + this.topLevelShift = topLevelShift; + } + + int getPopulatedLongLength() { + return (getPopulatedShortLength() + 3) >> 2; // round up + } + + int getPopulatedByteLength() { + return getPopulatedShortLength() << 1; + } + + // + // ######## ####### ###### ######## ######## #### ## ## ###### + // ## ## ## ## ## ## ## ## ## ### ## ## ## + // ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ####### ###### ## ######## ## ## ## ## ## #### + // ## ## ## ## ## ## ## ## ## #### ## ## + // ## ## ## ## ## ## ## ## ## ## ### ## ## + // ## ####### ###### ## ## ## #### ## ## ###### + // + + private String nonLeafEntryToString(final int entryIndex, + final int indexShift, + final int indentLevel) { + String output = ""; + for (int i = 0; i < indentLevel; i++) { + output += " "; + } + try { + final int packedSlotIndicators = getPackedSlotIndicators(entryIndex); + output += String.format("slotIndicators: 0x%02x, prevVersionIndex: %3d: [ ", + packedSlotIndicators, + getPreviousVersionIndex(entryIndex)); + final int numberOfSlotsInEntry = Integer.bitCount(packedSlotIndicators); + for (int i = 0; i < numberOfSlotsInEntry; i++) { + output += String.format("%d", getIndexAtEntrySlot(entryIndex, i)); + if (i < numberOfSlotsInEntry - 1) { + output += ", "; + } + } + output += String.format(" ] (indexShift = %d)\n", indexShift); + final boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT); + for (int i = 0; i < numberOfSlotsInEntry; i++) { + final int nextLevelEntryIndex = getIndexAtEntrySlot(entryIndex, i); + if (nextLevelIsLeaf) { + output += leafEntryToString(nextLevelEntryIndex, indentLevel + 4); + } else { + output += nonLeafEntryToString(nextLevelEntryIndex, + indexShift - 4, indentLevel + 4); + } + } + } catch (Exception ex) { + output += String.format("Exception thrown at nonLeafEntry at index %d with indexShift %d\n", + entryIndex, indexShift); + } + return output; + } + + private String leafEntryToString(final int entryIndex, final int indentLevel) { + String output = ""; + for (int i = 0; i < indentLevel; i++) { + output += " "; + } + try { + output += "Leaf bytes : "; + for (int i = 56; i >= 0; i -= 8) { + output += String.format("0x%02x ", (getAtLongIndex(entryIndex) >>> i) & 0xff); + + } + output += "\n"; + } catch (Exception ex) { + output += String.format("Exception thrown at leafEntry at index %d\n", entryIndex); + } + return output; + } + + private String recordedValuesToString() { + String output = ""; + try { + for (IterationValue v : nonZeroValues()) { + output += String.format("[%d] : %d\n", v.getIndex(), v.getValue()); + } + return output; + } catch (Exception ex) { + output += "!!! Exception thrown in value iteration...\n"; + } + return output; + } + + @Override + public String toString() { + String output = "PackedArrayContext:\n"; + if (!isPacked()) { + return output + "Context is unpacked:\n" + unpackedToString(); + } + for (int setNumber = 0; setNumber < NUMBER_OF_SETS; setNumber++) { + try { + int entryPointerIndex = SET_0_START_INDEX + setNumber; + int entryIndex = getIndexAtShortIndex(entryPointerIndex); + output += String.format("Set %d: root = %d \n", setNumber, entryIndex); + if (entryIndex == 0) continue; + output += nonLeafEntryToString(entryIndex, getTopLevelShift(), 4); + } catch (Exception ex) { + output += String.format("Exception thrown in set %d\n", setNumber); + } + } + output += recordedValuesToString(); + return output; + } + + private static class RetryException extends Exception {} +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedLongArray.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedLongArray.java new file mode 100644 index 000000000..3e26f5dee --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/AbstractPackedLongArray.java @@ -0,0 +1,406 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * A Packed array of signed 64 bit values, and supports {@link #get get()}, {@link #set set()}, + * {@link #add add()} and {@link #increment increment()} operations on the logical contents of the array. + */ +abstract class AbstractPackedLongArray implements Iterable, Serializable { + /** + * An {@link AbstractPackedLongArray} Uses {@link AbstractPackedArrayContext} to track + * the array's logical contents. Contexts may be switched when a context requires resizing + * to complete logical array operations (get, set, add, increment). Contexts are + * established and used within critical sections in order to facilitate concurrent + * implementors. + */ + + private static final int NUMBER_OF_SETS = 8; + + private AbstractPackedArrayContext arrayContext; + private long startTimeStampMsec = Long.MAX_VALUE; + private long endTimeStampMsec = 0; + + AbstractPackedArrayContext getArrayContext() { + return arrayContext; + } + + void setArrayContext(AbstractPackedArrayContext newArrayContext) { + arrayContext = newArrayContext; + } + + /** + * get the start time stamp [optionally] stored with this array + * @return the start time stamp [optionally] stored with this array + */ + public long getStartTimeStamp() { + return startTimeStampMsec; + } + + /** + * Set the start time stamp value associated with this array to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + public void setStartTimeStamp(final long timeStampMsec) { + this.startTimeStampMsec = timeStampMsec; + } + + /** + * get the end time stamp [optionally] stored with this array + * @return the end time stamp [optionally] stored with this array + */ + public long getEndTimeStamp() { + return endTimeStampMsec; + } + + /** + * Set the end time stamp value associated with this array to a given value. + * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. + */ + public void setEndTimeStamp(final long timeStampMsec) { + this.endTimeStampMsec = timeStampMsec; + } + + /** + * Set a new virtual length for the array. + * @param newVirtualArrayLength the + */ + abstract public void setVirtualLength(final int newVirtualArrayLength); + + /** + * Create a copy of this array, complete with data and everything. + * + * @return A distinct copy of this array. + */ + abstract public AbstractPackedLongArray copy(); + + abstract void resizeStorageArray(int newPhysicalLengthInLongs); + + abstract void clearContents(); + + abstract long criticalSectionEnter(); + + abstract void criticalSectionExit(long criticalValueAtEnter); + + + @Override + public String toString() { + String output = "PackedArray:\n"; + AbstractPackedArrayContext arrayContext = getArrayContext(); + output += arrayContext.toString(); + return output; + } + + /** + * Get value at virtual index in the array + * @param index the virtual array index + * @return the array value at the virtual index given + */ + public long get(final int index) { + long value = 0; + for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum ++) { + int packedIndex = 0; + long byteValueAtPackedIndex = 0; + do { + int newArraySize = 0; + long criticalValue = criticalSectionEnter(); + try { + // Establish context within: critical section + AbstractPackedArrayContext arrayContext = getArrayContext(); + // Deal with unpacked context: + if (!arrayContext.isPacked()) { + return arrayContext.getAtUnpackedIndex(index); + } + // Context is packed: + packedIndex = arrayContext.getPackedIndex(byteNum, index, false); + if (packedIndex < 0) { + return value; + } + byteValueAtPackedIndex = + (((long)arrayContext.getAtByteIndex(packedIndex)) & 0xff) << (byteNum << 3); + } catch (ResizeException ex) { + newArraySize = ex.getNewSize(); // Resize outside of critical section + } finally { + criticalSectionExit(criticalValue); + if (newArraySize != 0) { + resizeStorageArray(newArraySize); + } + } + } while (packedIndex == 0); + + value += byteValueAtPackedIndex; + } + return value; + } + + /** + * Increment value at a virtual index in the array + * @param index virtual index of value to increment + */ + public void increment(final int index) { + add(index, 1); + } + + /** + * Add to a value at a virtual index in the array + * @param index the virtual index of the value to be added to + * @param value the value to add + */ + public void add(final int index, final long value) { + if (value == 0) { + return; + } + long remainingValueToAdd = value; + + do { + try { + long byteMask = 0xff; + for (int byteNum = 0, byteShift = 0; + byteNum < NUMBER_OF_SETS; + byteNum++, byteShift += 8, byteMask <<= 8) { + final long criticalValue = criticalSectionEnter(); + try { + // Establish context within: critical section + AbstractPackedArrayContext arrayContext = getArrayContext(); + // Deal with unpacked context: + if (!arrayContext.isPacked()) { + arrayContext.addAndGetAtUnpackedIndex(index, remainingValueToAdd); + return; + } + // Context is packed: + int packedIndex = arrayContext.getPackedIndex(byteNum, index, true); + + long amountToAddAtSet = remainingValueToAdd & byteMask; + byte byteToAdd = (byte) (amountToAddAtSet >> byteShift); + long afterAddByteValue = arrayContext.addAtByteIndex(packedIndex, byteToAdd); + + // Reduce remaining value to add by amount just added: + remainingValueToAdd -= amountToAddAtSet; + + // Account for carry: + long carryAmount = afterAddByteValue & 0x100; + remainingValueToAdd += carryAmount << byteShift; + + if (remainingValueToAdd == 0) { + return; // nothing to add to higher magnitudes + } + } finally { + criticalSectionExit(criticalValue); + + } + } + return; + } catch (ResizeException ex){ + resizeStorageArray(ex.getNewSize()); // Resize outside of critical section + } + } while (true); + } + + /** + * Set the value at a virtual index in the array + * @param index the virtual index of the value to set + * @param value the value to set + */ + public void set(final int index, final long value) { + int bytesAlreadySet = 0; + do { + long valueForNextLevels = value; + try { + for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum++) { + long criticalValue = criticalSectionEnter(); + try { + // Establish context within: critical section + AbstractPackedArrayContext arrayContext = getArrayContext(); + // Deal with unpacked context: + if (!arrayContext.isPacked()) { + arrayContext.setAtUnpackedIndex(index, value); + return; + } + // Context is packed: + if (valueForNextLevels == 0) { + // Special-case zeros to avoid inflating packed array for no reason + int packedIndex = arrayContext.getPackedIndex(byteNum, index, false); + if (packedIndex < 0) { + return; // no need to create entries for zero values if they don't already exist + } + } + // Make sure byte is populated: + int packedIndex = arrayContext.getPackedIndex(byteNum, index, true); + + // Determine value to write, and prepare for next levels + byte byteToWrite = (byte) (valueForNextLevels & 0xff); + valueForNextLevels >>= 8; + + if (byteNum < bytesAlreadySet) { + // We want to avoid writing to the same byte twice when not doing so for the + // entire 64 bit value atomically, as doing so opens a race with e.g. concurrent + // adders. So don't actually write the byte if has been written before. + continue; + } + arrayContext.setAtByteIndex(packedIndex, byteToWrite); + bytesAlreadySet++; + } finally { + criticalSectionExit(criticalValue); + } + } + return; + } catch (ResizeException ex) { + resizeStorageArray(ex.getNewSize()); // Resize outside of critical section + } + } while (true); + } + + /** + * Add the contents of the other array to this one + * + * @param other The to add to this array + */ + public void add(final AbstractPackedLongArray other) { + for (IterationValue v : other.nonZeroValues()) { + add(v.getIndex(), v.getValue()); + } + } + + /** + * Clear the array contents + */ + public void clear() { + clearContents(); + } + + /** + * Get the current physical length (in longs) of the array's backing storage + * @return the current physical length (in longs) of the array's current backing storage + */ + public int getPhysicalLength() { + return getArrayContext().length(); + } + + /** + * Get the (virtual) length of the array + * @return the (virtual) length of the array + */ + public int length() { + return getArrayContext().getVirtualLength(); + } + + // Regular array iteration (iterates over all virtual indexes, zero-value or not: + + class AllValuesIterator implements Iterator { + + int nextVirtualIndex = 0; + + @Override + public Long next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return get(nextVirtualIndex++); + } + + @Override + public boolean hasNext() { + return ((nextVirtualIndex >= 0) && + (nextVirtualIndex < length())); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + /** + * An Iterator over all values in the array + * @return an Iterator over all values in the array + */ + public Iterator iterator() { + return new AllValuesIterator(); + } + + /** + * An Iterator over all non-Zero values in the array + * @return an Iterator over all non-Zero values in the array + */ + public Iterable nonZeroValues() { + return getArrayContext().nonZeroValues(); + } + + /** + * Determine if this array is equivalent to another. + * + * @param other the other array to compare to + * @return True if this array are equivalent with the other. + */ + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (!(other instanceof AbstractPackedLongArray)) { + return false; + } + AbstractPackedLongArray that = (AbstractPackedLongArray) other; + if (length() != that.length()) { + return false; + } + if (this.arrayContext.isPacked() || that.arrayContext.isPacked()) { + // If at least one of the arrays is packed, comparing only the + // non-zero values that exist in both arrays, using two passes, + // will likely be more efficient than a single all-index pass: + // - If both are packed, it will obviously be much faster. + // - If one is packed and the other is not, we would be visiting + // every index in the non-packed array, in one of the passes, + // but would still only visit the non-zero elements in the + // packed one. + for (IterationValue v : this.nonZeroValues()) { + if (that.get(v.getIndex()) != v.getValue()) { + return false; + } + } + for (IterationValue v : that.nonZeroValues()) { + if (this.get(v.getIndex()) != v.getValue()) { + return false; + } + } + } else { + for (int i = 0; i < this.length(); i++) { + if (this.get(i) != that.get(i)) { + return false; + } + } + } + return true; + } + + static final int NUMBER_OF_NON_ZEROS_TO_HASH = 8; + + @Override + public int hashCode() { + int h = 0; + h = oneAtATimeHashStep(h, length()); + int count = 0; + // Include the first NUMBER_OF_NON_ZEROS_TO_HASH non-zeros in the hash: + for (IterationValue v : nonZeroValues()) { + if (++count > NUMBER_OF_NON_ZEROS_TO_HASH) { + break; + } + h = oneAtATimeHashStep(h, (int) v.getIndex()); + h = oneAtATimeHashStep(h, (int) v.getValue()); + } + h += (h << 3); + h ^= (h >> 11); + h += (h << 15); + return h; + } + + private int oneAtATimeHashStep(final int incomingHash, final int v) { + int h = incomingHash; + h += v; + h += (h << 10); + h ^= (h >> 6); + return h; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedArrayContext.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedArrayContext.java new file mode 100644 index 000000000..244555fd3 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedArrayContext.java @@ -0,0 +1,124 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongArray; + + +class ConcurrentPackedArrayContext extends PackedArrayContext { + + ConcurrentPackedArrayContext(final int virtualLength, + final int initialPhysicalLength, + final boolean allocateArray) { + super(virtualLength, initialPhysicalLength, false); + if (allocateArray) { + array = new AtomicLongArray(getPhysicalLength()); + init(virtualLength); + } + } + + ConcurrentPackedArrayContext(final int virtualLength, + final int initialPhysicalLength) { + this(virtualLength, initialPhysicalLength, true); + } + + ConcurrentPackedArrayContext(final int newVirtualCountsArraySize, + final AbstractPackedArrayContext from, + final int arrayLength) { + this(newVirtualCountsArraySize, arrayLength); + if (isPacked()) { + populateEquivalentEntriesWithZerosFromOther(from); + } + } + + private AtomicLongArray array; + private volatile int populatedShortLength; + + private static final AtomicIntegerFieldUpdater populatedShortLengthUpdater = + AtomicIntegerFieldUpdater.newUpdater(ConcurrentPackedArrayContext.class, "populatedShortLength"); + + @Override + int length() { + return array.length(); + } + + @Override + int getPopulatedShortLength() { + return populatedShortLength; + } + + @Override + boolean casPopulatedShortLength(final int expectedPopulatedShortLength, final int newPopulatedShortLength) { + return populatedShortLengthUpdater.compareAndSet(this, expectedPopulatedShortLength, newPopulatedShortLength); + } + + @Override + boolean casPopulatedLongLength(final int expectedPopulatedLongLength, final int newPopulatedLongLength) { + int existingShortLength = getPopulatedShortLength(); + int existingLongLength = (existingShortLength + 3) >> 2; + if (existingLongLength != expectedPopulatedLongLength) return false; + return casPopulatedShortLength(existingShortLength, newPopulatedLongLength << 2); + } + + @Override + long getAtLongIndex(final int longIndex) { + return array.get(longIndex); + } + + @Override + boolean casAtLongIndex(final int longIndex, final long expectedValue, final long newValue) { + return array.compareAndSet(longIndex, expectedValue, newValue); + } + + @Override + void lazySetAtLongIndex(final int longIndex, final long newValue) { + array.lazySet(longIndex, newValue); + } + + @Override + void clearContents() { + for (int i = 0; i < array.length(); i++) { + array.lazySet(i, 0); + } + init(getVirtualLength()); + } + + @Override + void resizeArray(final int newLength) { + final AtomicLongArray newArray = new AtomicLongArray(newLength); + int copyLength = Math.min(array.length(), newLength); + for (int i = 0; i < copyLength; i++) { + newArray.lazySet(i, array.get(i)); + } + array = newArray; + } + + @Override + long getAtUnpackedIndex(final int index) { + return array.get(index); + } + + @Override + void setAtUnpackedIndex(final int index, final long newValue) { + array.set(index, newValue); + } + + @Override + void lazySetAtUnpackedIndex(final int index, final long newValue) { + array.lazySet(index, newValue); + } + + @Override + long incrementAndGetAtUnpackedIndex(final int index) { + return array.incrementAndGet(index); + } + + @Override + long addAndGetAtUnpackedIndex(final int index, final long valueToAdd) { + return array.addAndGet(index, valueToAdd); + } + + @Override + String unpackedToString() { + return array.toString(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedLongArray.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedLongArray.java new file mode 100644 index 000000000..6254ec4ed --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ConcurrentPackedLongArray.java @@ -0,0 +1,169 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +import io.prometheus.client.HdrHistogram.WriterReaderPhaser; + +import java.io.IOException; +import java.io.ObjectInputStream; + +/** + * A Packed array of signed 64 bit values that supports {@link #get get()}, {@link #set set()}, {@link #add add()} and + * {@link #increment increment()} operations the logical contents of the array. + *

+ * {@link ConcurrentPackedLongArray} supports concurrent accumulation, with the {@link #add add()} and {@link #increment + * increment()} methods providing lossless atomic accumulation in the presence of multiple writers. However, it is + * important to note that {@link #add add()} and {@link #increment increment()} are the *only* safe concurrent + * operations, and that all other operations, including {@link #get get()}, {@link #set set()} and {@link #clear()} may + * produce "surprising" results if used on an array that is not at rest. + *

+ * While the {@link #add add()} and {@link #increment increment()} methods are not quite wait-free, they come "close" + * that behavior in the sense that a given thread will incur a total of no more than a capped fixed number (e.g. 74 in a + * current implementation) of non-wait-free add or increment operations during the lifetime of an array, regardless of + * the number of operations done. + *

+ */ +public class ConcurrentPackedLongArray extends PackedLongArray { + + public ConcurrentPackedLongArray(final int virtualLength) { + this(virtualLength, AbstractPackedArrayContext.MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY); + } + + public ConcurrentPackedLongArray(final int virtualLength, final int initialPhysicalLength) { + super(); + setArrayContext(new ConcurrentPackedArrayContext(virtualLength, initialPhysicalLength)); + } + + transient WriterReaderPhaser wrp = new WriterReaderPhaser(); + + @Override + void resizeStorageArray(final int newPhysicalLengthInLongs) { + AbstractPackedArrayContext inactiveArrayContext; + try { + wrp.readerLock(); + + // Create a new array context, mimicking the structure of the currently active + // context, but without actually populating any values. + ConcurrentPackedArrayContext newArrayContext = + new ConcurrentPackedArrayContext( + getArrayContext().getVirtualLength(), + getArrayContext(), newPhysicalLengthInLongs + ); + + // Flip the current live array context and the newly created one: + inactiveArrayContext = getArrayContext(); + setArrayContext(newArrayContext); + + wrp.flipPhase(); + + // The now inactive array context is stable, and the new array context is active. + // We don't want to try to record values from the inactive into the new array context + // here (under the wrp reader lock) because we could deadlock if resizing is needed. + // Instead, value recording will be done after we release the read lock. + + } finally { + wrp.readerUnlock(); + } + + // Record all contents from the now inactive array to new live one: + for (IterationValue v : inactiveArrayContext.nonZeroValues()) { + add(v.getIndex(), v.getValue()); + } + + // inactive array contents is fully committed into the newly resized live array. It can now die in peace. + + } + + @Override + public void setVirtualLength(final int newVirtualArrayLength) { + if (newVirtualArrayLength < length()) { + throw new IllegalArgumentException( + "Cannot set virtual length, as requested length " + newVirtualArrayLength + + " is smaller than the current virtual length " + length()); + } + AbstractPackedArrayContext inactiveArrayContext; + try { + wrp.readerLock(); + AbstractPackedArrayContext currentArrayContext = getArrayContext(); + if (currentArrayContext.isPacked() && + (currentArrayContext.determineTopLevelShiftForVirtualLength(newVirtualArrayLength) == + currentArrayContext.getTopLevelShift())) { + // No changes to the array context contents is needed. Just change the virtual length. + currentArrayContext.setVirtualLength(newVirtualArrayLength); + return; + } + inactiveArrayContext = currentArrayContext; + setArrayContext( + new ConcurrentPackedArrayContext( + newVirtualArrayLength, + inactiveArrayContext, + inactiveArrayContext.length() + )); + + wrp.flipPhase(); + + // The now inactive array context is stable, and the new array context is active. + // We don't want to try to record values from the inactive into the new array context + // here (under the wrp reader lock) because we could deadlock if resizing is needed. + // Instead, value recording will be done after we release the read lock. + + } finally { + wrp.readerUnlock(); + } + + for (IterationValue v : inactiveArrayContext.nonZeroValues()) { + add(v.getIndex(), v.getValue()); + } + } + + @Override + public ConcurrentPackedLongArray copy() { + ConcurrentPackedLongArray copy = new ConcurrentPackedLongArray(this.length(), this.getPhysicalLength()); + copy.add(this); + return copy; + } + + @Override + void clearContents() { + try { + wrp.readerLock(); + getArrayContext().clearContents(); + } finally { + wrp.readerUnlock(); + } + } + + @Override + long criticalSectionEnter() { + return wrp.writerCriticalSectionEnter(); + } + + @Override + void criticalSectionExit(long criticalValueAtEnter) { + wrp.writerCriticalSectionExit(criticalValueAtEnter); + } + + @Override + public String toString() { + try { + wrp.readerLock(); + return super.toString(); + } finally { + wrp.readerUnlock(); + } + } + + @Override + public void clear() { + try { + wrp.readerLock(); + super.clear(); + } finally { + wrp.readerUnlock(); + } + } + + private void readObject(final ObjectInputStream o) + throws IOException, ClassNotFoundException { + o.defaultReadObject(); + wrp = new WriterReaderPhaser(); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/IterationValue.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/IterationValue.java new file mode 100644 index 000000000..36c03ef9d --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/IterationValue.java @@ -0,0 +1,33 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +/** + * An iteration value representing the index iterated to, and the value found at that index + */ +public class IterationValue { + IterationValue() { + } + + void set(final int index, final long value) { + this.index = index; + this.value = value; + } + + /** + * The index iterated to + * @return the index iterated to + */ + public int getIndex() { + return index; + } + + /** + * The value at the index iterated to + * @return the value at the index iterated to + */ + public long getValue() { + return value; + } + + private int index; + private long value; +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayContext.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayContext.java new file mode 100644 index 000000000..5ebcc09ce --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayContext.java @@ -0,0 +1,119 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +import java.util.Arrays; + +/** + * A non-concurrent array context. No atomics used. + */ +class PackedArrayContext extends AbstractPackedArrayContext { + + PackedArrayContext(final int virtualLength, + final int initialPhysicalLength, + final boolean allocateArray) { + super(virtualLength, initialPhysicalLength); + if (allocateArray) { + array = new long[getPhysicalLength()]; + init(virtualLength); + } + } + + PackedArrayContext(final int virtualLength, + final int initialPhysicalLength) { + this(virtualLength, initialPhysicalLength, true); + } + + PackedArrayContext(final int virtualLength, + final AbstractPackedArrayContext from, + final int newPhysicalArrayLength) { + this(virtualLength, newPhysicalArrayLength); + if (isPacked()) { + populateEquivalentEntriesWithZerosFromOther(from); + } + } + + private long[] array; + private int populatedShortLength = 0; + + @Override + int length() { + return array.length; + } + + @Override + int getPopulatedShortLength() { + return populatedShortLength; + } + + @Override + boolean casPopulatedShortLength(final int expectedPopulatedShortLength, final int newPopulatedShortLength) { + if (this.populatedShortLength != expectedPopulatedShortLength) return false; + this.populatedShortLength = newPopulatedShortLength; + return true; + } + + @Override + boolean casPopulatedLongLength(final int expectedPopulatedLongLength, final int newPopulatedLongLength) { + if (getPopulatedLongLength() != expectedPopulatedLongLength) return false; + return casPopulatedShortLength(populatedShortLength, newPopulatedLongLength << 2); + } + + @Override + long getAtLongIndex(final int longIndex) { + return array[longIndex]; + } + + @Override + boolean casAtLongIndex(final int longIndex, final long expectedValue, final long newValue) { + if (array[longIndex] != expectedValue) return false; + array[longIndex] = newValue; + return true; + } + + @Override + void lazySetAtLongIndex(final int longIndex, final long newValue) { + array[longIndex] = newValue; + } + + @Override + void clearContents() { + Arrays.fill(array, 0); + init(getVirtualLength()); + } + + @Override + void resizeArray(final int newLength) { + array = Arrays.copyOf(array, newLength); + } + + @Override + long getAtUnpackedIndex(final int index) { + return array[index]; + } + + @Override + void setAtUnpackedIndex(final int index, final long newValue) { + array[index] = newValue; + } + + @Override + void lazySetAtUnpackedIndex(final int index, final long newValue) { + array[index] = newValue; + } + + @Override + long incrementAndGetAtUnpackedIndex(final int index) { + array[index]++; + return array[index]; + } + + @Override + long addAndGetAtUnpackedIndex(final int index, final long valueToAdd) { + array[index] += valueToAdd; + return array[index]; + } + + @Override + String unpackedToString() { + return Arrays.toString(array); + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayRecorder.java new file mode 100644 index 000000000..823871bd4 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArrayRecorder.java @@ -0,0 +1,296 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram.packedarray; + +import io.prometheus.client.HdrHistogram.WriterReaderPhaser; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records increments and adds of integer values at indexes of a logical array of 64 bit signed integer values, and + * provides stable interval {@link PackedLongArray} samples from live recorded data without interrupting or stalling + * active recording of values. Each interval array provided contains all values accumulated since the previous + * interval array was taken. + *

+ * This pattern is commonly used in logging interval accumulator information while recording is ongoing. + *

+ * {@link PackedArrayRecorder} supports fully concurrent + * {@link PackedArrayRecorder#increment(int)} and + * {@link PackedArrayRecorder#add(int, long)} calls. + * While the {@link #increment increment()} and {@link #add add()} methods are not quite wait-free, they + * come "close" to that behavior in the sense that a given thread will incur a total of no more than a capped + * fixed number (e.g. 74 in a current implementation) of non-wait-free add or increment operations during + * the lifetime of an interval array (including across recycling of that array across intervals within the + * same recorder), regardless of the number of operations done. + *

+ * A common pattern for using a {@link PackedArrayRecorder} looks like this: + *


+ * PackedArrayRecorder recorder = new PackedArrayRecorder(); //
+ * PackedLongArray intervalArray = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval array]
+ *   ...
+ *   // Get interval array, recycling previous interval array:
+ *   intervalArray = recorder.getIntervalArray(intervalArray);
+ *   // Process the interval array, which is nice and stable here:
+ *   myLogWriter.logArrayContents(intervalArray);
+ *   ...
+ * [end of loop construct]
+ * 
+ * + */ + +public class PackedArrayRecorder { + private static AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile PackedLongArray activeArray; + + /** + * Construct a {@link PackedArrayRecorder} with a given (virtual) array length. + * + * @param virtualLength The (virtual) array length + */ + public PackedArrayRecorder(final int virtualLength) { + activeArray = new InternalConcurrentPackedLongArray(instanceId, virtualLength); + activeArray.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct a {@link PackedArrayRecorder} with a given (virtual) array length, starting with a given + * initial physical backing store length + * + * @param virtualLength The (virtual) array length + * @param initialPhysicalLength The initial physical backing store length + */ + public PackedArrayRecorder(final int virtualLength, final int initialPhysicalLength) { + activeArray = new InternalConcurrentPackedLongArray(instanceId, virtualLength, initialPhysicalLength); + activeArray.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Returns the virtual length of the array represented by this recorder + * @return The virtual length of the array represented by this recorder + */ + public int length() { + return activeArray.length(); + } + + /** + * Change the (virtual) length of the array represented by the this recorder + * @param newVirtualLength the new (virtual) length to use + */ + public void setVirtualLength(int newVirtualLength) { + try { + recordingPhaser.readerLock(); + // We don't care about concurrent modifications to the array, as setVirtualLength() in the + // ConcurrentPackedLongArray takes care of those. However, we must perform the change of virtual + // length under the recorder's readerLock protection to prevent mid-change observations: + activeArray.setVirtualLength(newVirtualLength); + } finally { + recordingPhaser.readerUnlock(); + } + } + + /** + * Increment a value at a given index in the array + * @param index the index of the value to be incremented + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length() + */ + public void increment(final int index) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeArray.increment(index); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Add to a value at a given index in the array + * @param index The index of value to add to + * @param valueToAdd The amount to add to the value at the given index + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length() + */ + public void add(final int index, final long valueToAdd) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeArray.add(index, valueToAdd); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * Calling this method is equivalent to calling {@code getIntervalArray(null)}. It is generally recommended + * that the {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalHistogram(arrayToRecycle)} orm be used for + * regular interval array sampling, as that form accepts a previously returned interval array that can be + * recycled internally to avoid allocation and content copying operations, and is therefore significantly + * more efficient for repeated use than {@link PackedArrayRecorder#getIntervalArray()}. + *

+ * Calling {@link PackedArrayRecorder#getIntervalArray()} will reset the values at + * all indexes of the array tracked by the recorder, and start accumulating values for the next interval. + * + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray() { + return getIntervalArray(null); + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} + * accepts a previously returned interval array that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link PackedArrayRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must + * be either be null or an interval array returned by a previous call to + * {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} or + * {@link PackedArrayRecorder#getIntervalArray()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval array more than once. If + * the same interval array instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} will reset the values at all indexes of the array + * tracked by the recorder, and start accumulating values for the next interval. + * + * @param arrayToRecycle a previously returned interval array (from this instance of + * {@link PackedArrayRecorder}) that may be recycled to avoid allocation and + * copy operations. + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle) { + return getIntervalArray(arrayToRecycle, true); + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} + * accepts a previously returned interval array that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link PackedArrayRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must + * be either be null or an interval array returned by a previous call to + * {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} or + * {@link PackedArrayRecorder#getIntervalArray()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval array more than once. If + * the same interval array instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle, enforeContainingInstance)} will reset the values at all indexes + * of the array tracked by the recorder, and start accumulating values for the next interval. + * + * @param arrayToRecycle a previously returned interval array that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of arrays previously returned from this + * instance of {@link PackedArrayRecorder}. If false, will allow recycling arrays + * previously returned by other instances of {@link PackedArrayRecorder}. + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle, + final boolean enforceContainingInstance) { + // Verify that replacement array can validly be used as an inactive array replacement: + validateFitAsReplacementArray(arrayToRecycle, enforceContainingInstance); + PackedLongArray sampledArray = performIntervalSample(arrayToRecycle); + return sampledArray; + } + + /** + * Reset the array contents to all zeros. + */ + public synchronized void reset() { + // the currently active array is reset each time we flip: + performIntervalSample(null); + } + + private PackedLongArray performIntervalSample(final PackedLongArray arrayToRecycle) { + PackedLongArray inactiveArray = arrayToRecycle; + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveArray == null) { + if (activeArray instanceof InternalConcurrentPackedLongArray) { + inactiveArray = new InternalConcurrentPackedLongArray(instanceId, activeArray.length()); + } else { + throw new IllegalStateException("Unexpected internal array type for activeArray"); + } + } else { + inactiveArray.clear(); + } + + // Swap active and inactive arrays: + final PackedLongArray tempArray = inactiveArray; + inactiveArray = activeArray; + activeArray = tempArray; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeArray.setStartTimeStamp(now); + inactiveArray.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active array: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + return inactiveArray; + } + + private static class InternalConcurrentPackedLongArray extends ConcurrentPackedLongArray { + private final long containingInstanceId; + + private InternalConcurrentPackedLongArray(final long id, int virtualLength, final int initialPhysicalLength) { + super(virtualLength, initialPhysicalLength); + this.containingInstanceId = id; + } + + private InternalConcurrentPackedLongArray(final long id, final int virtualLength) { + super(virtualLength); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementArray(final PackedLongArray replacementArray, + final boolean enforceContainingInstance) { + boolean bad = true; + if (replacementArray == null) { + bad = false; + } else if (replacementArray instanceof InternalConcurrentPackedLongArray) { + if ((activeArray instanceof InternalConcurrentPackedLongArray) + && + ((!enforceContainingInstance) || + (((InternalConcurrentPackedLongArray)replacementArray).containingInstanceId == + ((InternalConcurrentPackedLongArray) activeArray).containingInstanceId) + )) { + bad = false; + } + } + if (bad) { + throw new IllegalArgumentException("replacement array must have been obtained via a previous" + + " getIntervalArray() call from this " + this.getClass().getName() + + (enforceContainingInstance ? " instance" : " class")); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArraySingleWriterRecorder.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArraySingleWriterRecorder.java new file mode 100644 index 000000000..a17f60d27 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedArraySingleWriterRecorder.java @@ -0,0 +1,297 @@ +/** + * Written by Gil Tene of Azul Systems, and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * @author Gil Tene + */ + +package io.prometheus.client.HdrHistogram.packedarray; + +import io.prometheus.client.HdrHistogram.WriterReaderPhaser; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Records increments and adds of integer values at indexes of a logical array of 64 bit signed integer values, and + * provides stable interval {@link PackedLongArray} samples from live recorded data without interrupting or stalling + * active recording of values. Each interval array provided contains all values accumulated since the previous + * interval array was taken. + *

+ * This pattern is commonly used in logging interval accumulator information while recording is ongoing. + *

+ * {@link PackedArraySingleWriterRecorder} expects only a single thread (the "single writer") to call + * {@link PackedArraySingleWriterRecorder#increment(int)} or + * {@link PackedArraySingleWriterRecorder#add(int, long)} at any point in time. + * It DOES NOT safely support concurrent increment or add calls. + * While the {@link #increment increment()} and {@link #add add()} methods are not quite wait-free, they + * come "close" to that behavior in the sense that a given thread will incur a total of no more than a capped + * fixed number (e.g. 74 in a current implementation) of non-wait-free add or increment operations during + * the lifetime of an interval array (including across recycling of that array across intervals within the + * same recorder), regardless of the number of operations done. + *

+ * A common pattern for using a {@link PackedArraySingleWriterRecorder} looks like this: + *


+ * PackedArraySingleWriterRecorder recorder = new PackedArraySingleWriterRecorder(); //
+ * PackedLongArray intervalArray = null;
+ * ...
+ * [start of some loop construct that periodically wants to grab an interval array]
+ *   ...
+ *   // Get interval array, recycling previous interval array:
+ *   intervalArray = recorder.getIntervalArray(intervalArray);
+ *   // Process the interval array, which is nice and stable here:
+ *   myLogWriter.logArrayContents(intervalArray);
+ *   ...
+ * [end of loop construct]
+ * 
+ * + */ + +public class PackedArraySingleWriterRecorder { + private static final AtomicLong instanceIdSequencer = new AtomicLong(1); + private final long instanceId = instanceIdSequencer.getAndIncrement(); + + private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); + + private volatile PackedLongArray activeArray; + + /** + * Construct a {@link PackedArraySingleWriterRecorder} with a given (virtual) array length. + * + * @param virtualLength The (virtual) array length + */ + public PackedArraySingleWriterRecorder(final int virtualLength) { + activeArray = new InternalPackedLongArray(instanceId, virtualLength); + activeArray.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Construct a {@link PackedArraySingleWriterRecorder} with a given (virtual) array length, starting with a given + * initial physical backing store length + * + * @param virtualLength The (virtual) array length + * @param initialPhysicalLength The initial physical backing store length + */ + public PackedArraySingleWriterRecorder(final int virtualLength, final int initialPhysicalLength) { + activeArray = new InternalPackedLongArray(instanceId, virtualLength, initialPhysicalLength); + activeArray.setStartTimeStamp(System.currentTimeMillis()); + } + + /** + * Returns the virtual length of the array represented by this recorder + * @return The virtual length of the array represented by this recorder + */ + public int length() { + return activeArray.length(); + } + + /** + * Change the (virtual) length of the array represented by the this recorder + * @param newVirtualLength the new (virtual) length to use + */ + public void setVirtualLength(int newVirtualLength) { + try { + recordingPhaser.readerLock(); + // We don't care about concurrent modifications to the array, as setVirtualLength() in the + // ConcurrentPackedLongArray takes care of those. However, we must perform the change of virtual + // length under the recorder's readerLock protection to prevent mid-change observations: + activeArray.setVirtualLength(newVirtualLength); + } finally { + recordingPhaser.readerUnlock(); + } + } + + /** + * Increment a value at a given index in the array + * @param index the index of the value to be incremented + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length() + */ + public void increment(final int index) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeArray.increment(index); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Add to a value at a given index in the array + * @param index The index of value to add to + * @param valueToAdd The amount to add to the value at the given index + * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length() + */ + public void add(final int index, final long valueToAdd) throws ArrayIndexOutOfBoundsException { + long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); + try { + activeArray.add(index, valueToAdd); + } finally { + recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); + } + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * Calling this method is equivalent to calling {@code getIntervalArray(null)}. It is generally recommended + * that the {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalHistogram(arrayToRecycle)} orm be used for + * regular interval array sampling, as that form accepts a previously returned interval array that can be + * recycled internally to avoid allocation and content copying operations, and is therefore significantly + * more efficient for repeated use than {@link PackedArraySingleWriterRecorder#getIntervalArray()}. + *

+ * Calling {@link PackedArraySingleWriterRecorder#getIntervalArray()} will reset the values at + * all indexes of the array tracked by the recorder, and start accumulating values for the next interval. + * + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray() { + return getIntervalArray(null); + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} + * accepts a previously returned interval array that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link PackedArraySingleWriterRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must + * be either be null or an interval array returned by a previous call to + * {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} or + * {@link PackedArraySingleWriterRecorder#getIntervalArray()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval array more than once. If + * the same interval array instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} will reset the values at all indexes of the array + * tracked by the recorder, and start accumulating values for the next interval. + * + * @param arrayToRecycle a previously returned interval array (from this instance of + * {@link PackedArraySingleWriterRecorder}) that may be recycled to avoid allocation and + * copy operations. + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle) { + return getIntervalArray(arrayToRecycle, true); + } + + /** + * Get an interval array, which will include a stable, consistent view of all values + * accumulated since the last interval array was taken. + *

+ * {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} + * accepts a previously returned interval array that can be recycled internally to avoid allocation + * and content copying operations, and is therefore significantly more efficient for repeated use than + * {@link PackedArraySingleWriterRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must + * be either be null or an interval array returned by a previous call to + * {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle)} or + * {@link PackedArraySingleWriterRecorder#getIntervalArray()}. + *

+ * NOTE: The caller is responsible for not recycling the same returned interval array more than once. If + * the same interval array instance is recycled more than once, behavior is undefined. + *

+ * Calling {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle) + * getIntervalArray(arrayToRecycle, enforeContainingInstance)} will reset the values at all indexes + * of the array tracked by the recorder, and start accumulating values for the next interval. + * + * @param arrayToRecycle a previously returned interval array that may be recycled to avoid allocation and + * copy operations. + * @param enforceContainingInstance if true, will only allow recycling of arrays previously returned from this + * instance of {@link PackedArraySingleWriterRecorder}. If false, will allow recycling arrays + * previously returned by other instances of {@link PackedArraySingleWriterRecorder}. + * @return an array containing the values accumulated since the last interval array was taken. + */ + public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle, + final boolean enforceContainingInstance) { + // Verify that replacement array can validly be used as an inactive array replacement: + validateFitAsReplacementArray(arrayToRecycle, enforceContainingInstance); + PackedLongArray sampledArray = performIntervalSample(arrayToRecycle); + return sampledArray; + } + + /** + * Reset the array contents to all zeros. + */ + public synchronized void reset() { + // the currently active array is reset each time we flip: + performIntervalSample(null); + } + + private PackedLongArray performIntervalSample(final PackedLongArray arrayToRecycle) { + PackedLongArray inactiveArray = arrayToRecycle; + try { + recordingPhaser.readerLock(); + + // Make sure we have an inactive version to flip in: + if (inactiveArray == null) { + if (activeArray instanceof InternalPackedLongArray) { + inactiveArray = new InternalPackedLongArray(instanceId, activeArray.length()); + } else { + throw new IllegalStateException("Unexpected internal array type for activeArray"); + } + } else { + inactiveArray.clear(); + } + + // Swap active and inactive arrays: + final PackedLongArray tempArray = inactiveArray; + inactiveArray = activeArray; + activeArray = tempArray; + + // Mark end time of previous interval and start time of new one: + long now = System.currentTimeMillis(); + activeArray.setStartTimeStamp(now); + inactiveArray.setEndTimeStamp(now); + + // Make sure we are not in the middle of recording a value on the previously active array: + + // Flip phase to make sure no recordings that were in flight pre-flip are still active: + recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); + } finally { + recordingPhaser.readerUnlock(); + } + return inactiveArray; + } + + private static class InternalPackedLongArray extends PackedLongArray { + private final long containingInstanceId; + + private InternalPackedLongArray(final long id, int virtualLength, final int initialPhysicalLength) { + super(virtualLength, initialPhysicalLength); + this.containingInstanceId = id; + } + + private InternalPackedLongArray(final long id, final int virtualLength) { + super(virtualLength); + this.containingInstanceId = id; + } + } + + private void validateFitAsReplacementArray(final PackedLongArray replacementArray, + final boolean enforceContainingInstance) { + boolean bad = true; + if (replacementArray == null) { + bad = false; + } else if (replacementArray instanceof InternalPackedLongArray) { + if ((activeArray instanceof InternalPackedLongArray) + && + ((!enforceContainingInstance) || + (((InternalPackedLongArray)replacementArray).containingInstanceId == + ((InternalPackedLongArray) activeArray).containingInstanceId) + )) { + bad = false; + } + } + if (bad) { + throw new IllegalArgumentException("replacement array must have been obtained via a previous" + + " getIntervalArray() call from this " + this.getClass().getName() + + (enforceContainingInstance ? " instance" : " class")); + } + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedLongArray.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedLongArray.java new file mode 100644 index 000000000..b3c0beca8 --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/PackedLongArray.java @@ -0,0 +1,73 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +/** + * A Packed array of signed 64 bit values, and supports {@link #get get()}, {@link #set set()}, + * {@link #add add()} and {@link #increment increment()} operations on the logical contents of the array. + */ +public class PackedLongArray extends AbstractPackedLongArray { + + PackedLongArray() {} + + public PackedLongArray(final int virtualLength) { + this(virtualLength, AbstractPackedArrayContext.MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY); + } + + public PackedLongArray(final int virtualLength, final int initialPhysicalLength) { + setArrayContext(new PackedArrayContext(virtualLength, initialPhysicalLength)); + } + + @Override + void resizeStorageArray(final int newPhysicalLengthInLongs) { + AbstractPackedArrayContext oldArrayContext = getArrayContext(); + PackedArrayContext newArrayContext = + new PackedArrayContext(oldArrayContext.getVirtualLength(), oldArrayContext, newPhysicalLengthInLongs); + setArrayContext(newArrayContext); + for (IterationValue v : oldArrayContext.nonZeroValues()) { + set(v.getIndex(), v.getValue()); + } + } + + @Override + public void setVirtualLength(final int newVirtualArrayLength) { + if (newVirtualArrayLength < length()) { + throw new IllegalArgumentException( + "Cannot set virtual length, as requested length " + newVirtualArrayLength + + " is smaller than the current virtual length " + length()); + } + AbstractPackedArrayContext currentArrayContext = getArrayContext(); + if (currentArrayContext.isPacked() && + (currentArrayContext.determineTopLevelShiftForVirtualLength(newVirtualArrayLength) == + currentArrayContext.getTopLevelShift())) { + // No changes to the array context contents is needed. Just change the virtual length. + currentArrayContext.setVirtualLength(newVirtualArrayLength); + return; + } + AbstractPackedArrayContext oldArrayContext = currentArrayContext; + setArrayContext(new PackedArrayContext(newVirtualArrayLength, oldArrayContext, oldArrayContext.length())); + for (IterationValue v : oldArrayContext.nonZeroValues()) { + set(v.getIndex(), v.getValue()); + } + } + + @Override + public PackedLongArray copy() { + PackedLongArray copy = new PackedLongArray(this.length(), this.getPhysicalLength()); + copy.add(this); + return copy; + } + + @Override + void clearContents() { + getArrayContext().clearContents(); + } + + @Override + long criticalSectionEnter() { + return 0; + } + + @Override + void criticalSectionExit(final long criticalValueAtEnter) { + } +} + diff --git a/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ResizeException.java b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ResizeException.java new file mode 100644 index 000000000..b5870321c --- /dev/null +++ b/simpleclient/src/main/java/io/prometheus/client/HdrHistogram/packedarray/ResizeException.java @@ -0,0 +1,13 @@ +package io.prometheus.client.HdrHistogram.packedarray; + +class ResizeException extends Exception { + private final int newSize; + + ResizeException(final int newSize) { + this.newSize = newSize; + } + + int getNewSize() { + return newSize; + } +} diff --git a/simpleclient/src/main/java/io/prometheus/client/Summary.java b/simpleclient/src/main/java/io/prometheus/client/Summary.java index 4d79e558a..ceddbc0ef 100644 --- a/simpleclient/src/main/java/io/prometheus/client/Summary.java +++ b/simpleclient/src/main/java/io/prometheus/client/Summary.java @@ -1,7 +1,5 @@ package io.prometheus.client; -import io.prometheus.client.CKMSQuantiles.Quantile; - import java.io.Closeable; import java.util.ArrayList; import java.util.Collections; @@ -11,6 +9,8 @@ import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; /** * Summary metric, to track the size of events. @@ -21,6 +21,8 @@ *

  • Request size
  • * * + * Note that observing negative measurements is not supported and will cause an {@link IllegalArgumentException}. + * *

    * Example Summaries: *

    @@ -74,19 +76,28 @@
      *   
  • ageBuckets(int): Set the number of buckets used to implement the sliding time window. If your time window is 10 minutes, and you have ageBuckets=5, * buckets will be switched every 2 minutes. The value is a trade-off between resources (memory and cpu for maintaining the bucket) * and how smooth the time window is moved. Default value is 5. + *
  • numberOfSignificantValueDigits(int): Set the precision (significant decimal digits) of the underlying HdrHistogram. + * Default value is 2. See {@link io.prometheus.client.HdrHistogram.ConcurrentDoubleHistogram} + *
  • highestToLowestValueRatio(long): Set the initial dynamic range (and memory usage) of the underlying HdrHistogram. + * Default value is 1000. See {@link io.prometheus.client.HdrHistogram.ConcurrentDoubleHistogram} * * * See https://prometheus.io/docs/practices/histograms/ for more info on quantiles. + * See http://hdrhistogram.org and https://github.com/HdrHistogram/HdrHistogram for more info on HdrHistogram. */ public class Summary extends SimpleCollector implements Counter.Describable { - final List quantiles; // Can be empty, but can never be null. - final long maxAgeSeconds; - final int ageBuckets; + private final List quantiles; // Can be empty, but can never be null. + private final long highestToLowestValueRatio; + private final int numberOfSignificantValueDigits; + private final long maxAgeSeconds; + private final int ageBuckets; Summary(Builder b) { super(b); - quantiles = Collections.unmodifiableList(new ArrayList(b.quantiles)); + this.quantiles = Collections.unmodifiableList(new ArrayList(b.quantiles)); + this.highestToLowestValueRatio = b.highestToLowestValueRatio; + this.numberOfSignificantValueDigits = b.numberOfSignificantValueDigits; this.maxAgeSeconds = b.maxAgeSeconds; this.ageBuckets = b.ageBuckets; initializeNoLabelsChild(); @@ -94,24 +105,46 @@ public class Summary extends SimpleCollector implements Counter.D public static class Builder extends SimpleCollector.Builder { - private final List quantiles = new ArrayList(); + private final List quantiles = new ArrayList(); + private long highestToLowestValueRatio = 1000; + private int numberOfSignificantValueDigits = 2; private long maxAgeSeconds = TimeUnit.MINUTES.toSeconds(10); private int ageBuckets = 5; - public Builder quantile(double quantile, double error) { + public Builder quantile(double quantile) { if (quantile < 0.0 || quantile > 1.0) { throw new IllegalArgumentException("Quantile " + quantile + " invalid: Expected number between 0.0 and 1.0."); } - if (error < 0.0 || error > 1.0) { - throw new IllegalArgumentException("Error " + error + " invalid: Expected number between 0.0 and 1.0."); + quantiles.add(quantile); + return this; + } + + // backwards compatibility + public Builder quantile(double quantile, double error) { + this.quantile(quantile); + this.numberOfSignificantValueDigits(Math.max(this.numberOfSignificantValueDigits, (int)-Math.log10(error))); + return this; + } + + public Builder highestToLowestValueRatio(long highestToLowestValueRatio) { + if (highestToLowestValueRatio < 2) { + throw new IllegalArgumentException("highestToLowestValueRatio cannot be " + highestToLowestValueRatio + " : Expected at least 2."); + } + this.highestToLowestValueRatio = highestToLowestValueRatio; + return this; + } + + public Builder numberOfSignificantValueDigits(int numberOfSignificantValueDigits) { + if (numberOfSignificantValueDigits < 0 || numberOfSignificantValueDigits > 5) { + throw new IllegalArgumentException("numberOfSignificantValueDigits cannot be " + numberOfSignificantValueDigits + " : Expected number between 0 and 5."); } - quantiles.add(new Quantile(quantile, error)); + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; return this; } public Builder maxAgeSeconds(long maxAgeSeconds) { if (maxAgeSeconds <= 0) { - throw new IllegalArgumentException("maxAgeSeconds cannot be " + maxAgeSeconds); + throw new IllegalArgumentException("maxAgeSeconds cannot be " + maxAgeSeconds + " : Expected non negative number."); } this.maxAgeSeconds = maxAgeSeconds; return this; @@ -119,7 +152,7 @@ public Builder maxAgeSeconds(long maxAgeSeconds) { public Builder ageBuckets(int ageBuckets) { if (ageBuckets <= 0) { - throw new IllegalArgumentException("ageBuckets cannot be " + ageBuckets); + throw new IllegalArgumentException("ageBuckets cannot be " + ageBuckets + " : Expected non negative number."); } this.ageBuckets = ageBuckets; return this; @@ -135,20 +168,21 @@ public Summary create() { dontInitializeNoLabelsChild = true; return new Summary(this); } + } /** - * Return a Builder to allow configuration of a new Summary. Ensures required fields are provided. + * Return a Builder to allow configuration of a new Summary. Ensures required fields are provided. * - * @param name The name of the metric - * @param help The help string of the metric + * @param name The name of the metric + * @param help The help string of the metric */ public static Builder build(String name, String help) { return new Builder().name(name).help(help); } /** - * Return a Builder to allow configuration of a new Summary. + * Return a Builder to allow configuration of a new Summary. */ public static Builder build() { return new Builder(); @@ -156,26 +190,30 @@ public static Builder build() { @Override protected Child newChild() { - return new Child(quantiles, maxAgeSeconds, ageBuckets); + return new Child(quantiles, highestToLowestValueRatio, numberOfSignificantValueDigits, maxAgeSeconds, ageBuckets); } - /** * Represents an event being timed. */ public static class Timer implements Closeable { + private final Child child; private final long start; - private Timer(Child child, long start) { + + private Timer(Child child) { this.child = child; - this.start = start; + this.start = SimpleTimer.defaultTimeProvider.nanoTime(); } + /** * Observe the amount of time in seconds since {@link Child#startTimer} was called. + * * @return Measured duration in seconds since {@link Child#startTimer} was called. */ public double observeDuration() { - double elapsed = SimpleTimer.elapsedSecondsFromNanos(start, SimpleTimer.defaultTimeProvider.nanoTime()); + long end = SimpleTimer.defaultTimeProvider.nanoTime(); + double elapsed = SimpleTimer.elapsedSecondsFromNanos(start, end); child.observe(elapsed); return elapsed; } @@ -187,6 +225,7 @@ public double observeDuration() { public void close() { observeDuration(); } + } /** @@ -197,62 +236,26 @@ public void close() { */ public static class Child { - /** - * Executes runnable code (e.g. a Java 8 Lambda) and observes a duration of how long it took to run. - * - * @param timeable Code that is being timed - * @return Measured duration in seconds for timeable to complete. - */ - public double time(Runnable timeable) { - Timer timer = startTimer(); - - double elapsed; - try { - timeable.run(); - } finally { - elapsed = timer.observeDuration(); - } - return elapsed; - } - - /** - * Executes callable code (e.g. a Java 8 Lambda) and observes a duration of how long it took to run. - * - * @param timeable Code that is being timed - * @return Result returned by callable. - */ - public E time(Callable timeable) { - Timer timer = startTimer(); - - try { - return timeable.call(); - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - timer.observeDuration(); - } - } - public static class Value { + public final double count; public final double sum; public final SortedMap quantiles; - private Value(double count, double sum, List quantiles, TimeWindowQuantiles quantileValues) { - this.count = count; - this.sum = sum; + private Value(DoubleAdder count, DoubleAdder sum, List quantiles, TimeWindowQuantiles quantileValues) { + this.count = count.sum(); + this.sum = sum.sum(); this.quantiles = Collections.unmodifiableSortedMap(snapshot(quantiles, quantileValues)); } - private SortedMap snapshot(List quantiles, TimeWindowQuantiles quantileValues) { + private SortedMap snapshot(List quantiles, TimeWindowQuantiles quantileValues) { SortedMap result = new TreeMap(); - for (Quantile q : quantiles) { - result.put(q.quantile, quantileValues.get(q.quantile)); + for (Double quantile : quantiles) { + result.put(quantile, quantileValues.get(quantile)); } return result; } + } // Having these separate leaves us open to races, @@ -261,53 +264,98 @@ private SortedMap snapshot(List quantiles, TimeWindowQ // This should be reevaluated in the future. private final DoubleAdder count = new DoubleAdder(); private final DoubleAdder sum = new DoubleAdder(); - private final List quantiles; + private final List quantiles; private final TimeWindowQuantiles quantileValues; - private Child(List quantiles, long maxAgeSeconds, int ageBuckets) { + private Child(List quantiles, long highestToLowestValueRatio, int numberOfSignificantValueDigits, long maxAgeSeconds, int ageBuckets) { this.quantiles = quantiles; - if (quantiles.size() > 0) { - quantileValues = new TimeWindowQuantiles(quantiles.toArray(new Quantile[]{}), maxAgeSeconds, ageBuckets); - } else { - quantileValues = null; - } + this.quantileValues = quantiles.isEmpty() ? null : new TimeWindowQuantiles(highestToLowestValueRatio, numberOfSignificantValueDigits, maxAgeSeconds, ageBuckets); } /** * Observe the given amount. */ public void observe(double amt) { + if (amt < 0.0) { + return; // ignore negative measurements + } + count.add(1); sum.add(amt); if (quantileValues != null) { - quantileValues.insert(amt); + try { + quantileValues.insert(amt); + } catch (Exception e) { + // handle possible rare exceptions from HdrHistogram + Logger.getLogger(Summary.class.getName()) + .log(Level.WARNING, "Failed to record value: " + amt, e); + } } } + /** * Start a timer to track a duration. *

    * Call {@link Timer#observeDuration} at the end of what you want to measure the duration of. */ public Timer startTimer() { - return new Timer(this, SimpleTimer.defaultTimeProvider.nanoTime()); + return new Timer(this); } + + /** + * Executes runnable code (e.g. a Java 8 Lambda) and observes a duration of how long it took to run. + * + * @param timeable Code that is being timed + * @return Measured duration in seconds for timeable to complete. + */ + public double time(Runnable timeable) { + Timer timer = startTimer(); + double elapsed; + try { + timeable.run(); + } finally { + elapsed = timer.observeDuration(); + } + return elapsed; + } + + /** + * Executes callable code (e.g. a Java 8 Lambda) and observes a duration of how long it took to run. + * + * @param timeable Code that is being timed + * @return Result returned by callable. + */ + public E time(Callable timeable) { + Timer timer = startTimer(); + try { + return timeable.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + timer.observeDuration(); + } + } + /** * Get the value of the Summary. *

    * Warning: The definition of {@link Value} is subject to change. */ public Value get() { - return new Value(count.sum(), sum.sum(), quantiles, quantileValues); + return new Value(count, sum, quantiles, quantileValues); } + } // Convenience methods. + /** * Observe the given amount on the summary with no labels. */ public void observe(double amt) { noLabelsChild.observe(amt); } + /** * Start a timer to track a duration on the summary with no labels. *

    @@ -323,7 +371,7 @@ public Timer startTimer() { * @param timeable Code that is being timed * @return Measured duration in seconds for timeable to complete. */ - public double time(Runnable timeable){ + public double time(Runnable timeable) { return noLabelsChild.time(timeable); } @@ -333,7 +381,7 @@ public double time(Runnable timeable){ * @param timeable Code that is being timed * @return Result returned by callable. */ - public E time(Callable timeable){ + public E time(Callable timeable) { return noLabelsChild.time(timeable); } @@ -349,19 +397,18 @@ public Child.Value get() { @Override public List collect() { List samples = new ArrayList(); - for(Map.Entry, Child> c: children.entrySet()) { - Child.Value v = c.getValue().get(); + for (Map.Entry, Child> child : children.entrySet()) { + Child.Value value = child.getValue().get(); List labelNamesWithQuantile = new ArrayList(labelNames); labelNamesWithQuantile.add("quantile"); - for(Map.Entry q : v.quantiles.entrySet()) { - List labelValuesWithQuantile = new ArrayList(c.getKey()); - labelValuesWithQuantile.add(doubleToGoString(q.getKey())); - samples.add(new MetricFamilySamples.Sample(fullname, labelNamesWithQuantile, labelValuesWithQuantile, q.getValue())); + for (Map.Entry quantile : value.quantiles.entrySet()) { + List labelValuesWithQuantile = new ArrayList(child.getKey()); + labelValuesWithQuantile.add(doubleToGoString(quantile.getKey())); + samples.add(new MetricFamilySamples.Sample(fullname, labelNamesWithQuantile, labelValuesWithQuantile, quantile.getValue())); } - samples.add(new MetricFamilySamples.Sample(fullname + "_count", labelNames, c.getKey(), v.count)); - samples.add(new MetricFamilySamples.Sample(fullname + "_sum", labelNames, c.getKey(), v.sum)); + samples.add(new MetricFamilySamples.Sample(fullname + "_count", labelNames, child.getKey(), value.count)); + samples.add(new MetricFamilySamples.Sample(fullname + "_sum", labelNames, child.getKey(), value.sum)); } - return familySamplesList(Type.SUMMARY, samples); } diff --git a/simpleclient/src/main/java/io/prometheus/client/TimeWindowQuantiles.java b/simpleclient/src/main/java/io/prometheus/client/TimeWindowQuantiles.java index cc60bc39b..07c0536d4 100644 --- a/simpleclient/src/main/java/io/prometheus/client/TimeWindowQuantiles.java +++ b/simpleclient/src/main/java/io/prometheus/client/TimeWindowQuantiles.java @@ -1,54 +1,89 @@ package io.prometheus.client; -import io.prometheus.client.CKMSQuantiles.Quantile; +import io.prometheus.client.HdrHistogram.ConcurrentDoubleHistogram; + import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; /** - * Wrapper around CKMSQuantiles. + * Wrapper around HdrHistogram. * - * Maintains a ring buffer of CKMSQuantiles to provide quantiles over a sliding windows of time. + * Maintains a ring buffer of HdrHistogram to provide quantiles over a sliding windows of time. */ class TimeWindowQuantiles { - private final Quantile[] quantiles; - private final CKMSQuantiles[] ringBuffer; - private int currentBucket; - private long lastRotateTimestampMillis; - private final long durationBetweenRotatesMillis; + private final AtomicReference buckets; + private final AtomicLong lastRotateTimestampNanos; + + private final long highestToLowestValueRatio; + private final int numberOfSignificantValueDigits; + private final long durationBetweenRotatesNanos; - public TimeWindowQuantiles(Quantile[] quantiles, long maxAgeSeconds, int ageBuckets) { - this.quantiles = quantiles; - this.ringBuffer = new CKMSQuantiles[ageBuckets]; + public TimeWindowQuantiles(long highestToLowestValueRatio, int numberOfSignificantValueDigits, long maxAgeSeconds, int ageBuckets) { + this.highestToLowestValueRatio = highestToLowestValueRatio; + this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; + ConcurrentDoubleHistogram[] emptyBuckets = new ConcurrentDoubleHistogram[ageBuckets]; for (int i = 0; i < ageBuckets; i++) { - this.ringBuffer[i] = new CKMSQuantiles(quantiles); + emptyBuckets[i] = createBucket(); } - this.currentBucket = 0; - this.lastRotateTimestampMillis = System.currentTimeMillis(); - this.durationBetweenRotatesMillis = TimeUnit.SECONDS.toMillis(maxAgeSeconds) / ageBuckets; + this.buckets = new AtomicReference(emptyBuckets); + this.lastRotateTimestampNanos = new AtomicLong(System.nanoTime()); + this.durationBetweenRotatesNanos = TimeUnit.SECONDS.toNanos(maxAgeSeconds) / ageBuckets; + } + + private ConcurrentDoubleHistogram createBucket() { + ConcurrentDoubleHistogram bucket = new ConcurrentDoubleHistogram(highestToLowestValueRatio, numberOfSignificantValueDigits); + bucket.setAutoResize(true); + + return bucket; } - public synchronized double get(double q) { - CKMSQuantiles currentBucket = rotate(); - return currentBucket.get(q); + public double get(double quantile) { + // On concurrent `get` and `rotate`, it is acceptable to `get` the sample from an outdated `bucket`. + ConcurrentDoubleHistogram currentBucket = getCurrentBucket(); + return currentBucket.getTotalCount() == 0 ? Double.NaN : currentBucket.getValueAtPercentile(quantile * 100.0); } - public synchronized void insert(double value) { + public void insert(double value) { + // On concurrent `insert` and `rotate`, it should be acceptable to lose the measurement in the newest `bucket`. rotate(); - for (CKMSQuantiles ckmsQuantiles : ringBuffer) { - ckmsQuantiles.insert(value); + + for (ConcurrentDoubleHistogram bucket : buckets.get()) { + bucket.recordValue(value); } } - private CKMSQuantiles rotate() { - long timeSinceLastRotateMillis = System.currentTimeMillis() - lastRotateTimestampMillis; - while (timeSinceLastRotateMillis > durationBetweenRotatesMillis) { - ringBuffer[currentBucket] = new CKMSQuantiles(quantiles); - if (++currentBucket >= ringBuffer.length) { - currentBucket = 0; + private ConcurrentDoubleHistogram getCurrentBucket() { + rotate(); + + return buckets.get()[0]; // oldest bucket + } + + private void rotate() { + // On concurrent `rotate` and `rotate`: + // - `currentTime` is cached to reduce thread contention. + // - `lastRotate` is used to ensure the correct number of rotations. + + // Correctness is guaranteed by atomic memory access ordering and visibility semantics. + // Note that it is not possible for other threads to read partially initialized `buckets`. + long currentTime = System.nanoTime(); + long lastRotate = lastRotateTimestampNanos.get(); + while (currentTime - lastRotate > durationBetweenRotatesNanos) { + if (lastRotateTimestampNanos.compareAndSet(lastRotate, lastRotate + durationBetweenRotatesNanos)) { + // rotate buckets (atomic) + ConcurrentDoubleHistogram[] oldBuckets = buckets.get(); + int ageBuckets = oldBuckets.length; + ConcurrentDoubleHistogram[] newBuckets = new ConcurrentDoubleHistogram[ageBuckets]; + newBuckets[ageBuckets - 1] = createBucket(); // newest bucket + System.arraycopy(oldBuckets, 1, newBuckets, 0, ageBuckets - 1); // older buckets + while (!buckets.compareAndSet(oldBuckets, newBuckets)) { + oldBuckets = buckets.get(); + System.arraycopy(oldBuckets, 1, newBuckets, 0, ageBuckets - 1); // older buckets + } } - timeSinceLastRotateMillis -= durationBetweenRotatesMillis; - lastRotateTimestampMillis += durationBetweenRotatesMillis; + lastRotate = lastRotateTimestampNanos.get(); } - return ringBuffer[currentBucket]; } + } diff --git a/simpleclient/src/test/java/io/prometheus/client/SummaryTest.java b/simpleclient/src/test/java/io/prometheus/client/SummaryTest.java index 06e1f9d9b..3721bcddf 100644 --- a/simpleclient/src/test/java/io/prometheus/client/SummaryTest.java +++ b/simpleclient/src/test/java/io/prometheus/client/SummaryTest.java @@ -23,7 +23,7 @@ public class SummaryTest { @Before public void setUp() { registry = new CollectorRegistry(); - noLabels = Summary.build().name("nolabels").help("help").register(registry); + noLabels = Summary.build().name("no_labels").help("help").register(registry); labels = Summary.build().name("labels").help("help").labelNames("l").register(registry); noLabelsAndQuantiles = Summary.build() .quantile(0.5, 0.05) @@ -44,10 +44,10 @@ public void tearDown() { } private double getCount() { - return registry.getSampleValue("nolabels_count").doubleValue(); + return registry.getSampleValue("no_labels_count").doubleValue(); } private double getSum() { - return registry.getSampleValue("nolabels_sum").doubleValue(); + return registry.getSampleValue("no_labels_sum").doubleValue(); } private double getNoLabelQuantile(double q) { return registry.getSampleValue("no_labels_and_quantiles", new String[]{"quantile"}, new String[]{Collector.doubleToGoString(q)}).doubleValue(); @@ -63,6 +63,7 @@ public void testObserve() { assertEquals(2.0, getSum(), .001); assertEquals(1.0, noLabels.get().count, .001); assertEquals(2.0, noLabels.get().sum, .001); + noLabels.labels().observe(4); assertEquals(2.0, getCount(), .001); assertEquals(6.0, getSum(), .001); @@ -74,19 +75,26 @@ public void testObserve() { public void testQuantiles() { int nSamples = 1000000; // simulate one million samples + double sum = 0.0; + for (int i=1; i<=nSamples; i++) { // In this test, we observe the numbers from 1 to nSamples, // because that makes it easy to verify if the quantiles are correct. labelsAndQuantiles.labels("a").observe(i); noLabelsAndQuantiles.observe(i); + sum += i; } assertEquals(getNoLabelQuantile(0.5), 0.5 * nSamples, 0.05 * nSamples); assertEquals(getNoLabelQuantile(0.9), 0.9 * nSamples, 0.01 * nSamples); assertEquals(getNoLabelQuantile(0.99), 0.99 * nSamples, 0.001 * nSamples); + assertEquals(nSamples, registry.getSampleValue("no_labels_and_quantiles_count"), 0.001); + assertEquals(sum, registry.getSampleValue("no_labels_and_quantiles_sum"), 0.001); assertEquals(getLabeledQuantile("a", 0.5), 0.5 * nSamples, 0.05 * nSamples); assertEquals(getLabeledQuantile("a", 0.9), 0.9 * nSamples, 0.01 * nSamples); assertEquals(getLabeledQuantile("a", 0.99), 0.99 * nSamples, 0.001 * nSamples); + assertEquals(nSamples, registry.getSampleValue("labels_and_quantiles_count", new String[]{"l"}, new String[]{"a"}), 0.001); + assertEquals(sum, registry.getSampleValue("labels_and_quantiles_sum", new String[]{"l"}, new String[]{"a"}), 0.001); } @Test @@ -144,6 +152,9 @@ public Integer call() { public void noLabelsDefaultZeroValue() { assertEquals(0.0, getCount(), .001); assertEquals(0.0, getSum(), .001); + noLabels.observe(2.0); + assertEquals(1.0, getCount(), .001); + assertEquals(2.0, getSum(), .001); } private Double getLabelsCount(String labelValue) { @@ -159,16 +170,18 @@ public void testLabels() { assertEquals(null, getLabelsSum("a")); assertEquals(null, getLabelsCount("b")); assertEquals(null, getLabelsSum("b")); + labels.labels("a").observe(2); - assertEquals(1.0, getLabelsCount("a").doubleValue(), .001); - assertEquals(2.0, getLabelsSum("a").doubleValue(), .001); + assertEquals(1.0, getLabelsCount("a"), .001); + assertEquals(2.0, getLabelsSum("a"), .001); assertEquals(null, getLabelsCount("b")); assertEquals(null, getLabelsSum("b")); + labels.labels("b").observe(3); - assertEquals(1.0, getLabelsCount("a").doubleValue(), .001); - assertEquals(2.0, getLabelsSum("a").doubleValue(), .001); - assertEquals(1.0, getLabelsCount("b").doubleValue(), .001); - assertEquals(3.0, getLabelsSum("b").doubleValue(), .001); + assertEquals(1.0, getLabelsCount("a"), .001); + assertEquals(2.0, getLabelsSum("a"), .001); + assertEquals(1.0, getLabelsCount("b"), .001); + assertEquals(3.0, getLabelsSum("b"), .001); } @Test @@ -177,12 +190,8 @@ public void testCollect() { List mfs = labels.collect(); ArrayList samples = new ArrayList(); - ArrayList labelNames = new ArrayList(); - labelNames.add("l"); - ArrayList labelValues = new ArrayList(); - labelValues.add("a"); - samples.add(new Collector.MetricFamilySamples.Sample("labels_count", labelNames, labelValues, 1.0)); - samples.add(new Collector.MetricFamilySamples.Sample("labels_sum", labelNames, labelValues, 2.0)); + samples.add(new Collector.MetricFamilySamples.Sample("labels_count", asList("l"), asList("a"), 1.0)); + samples.add(new Collector.MetricFamilySamples.Sample("labels_sum", asList("l"), asList("a"), 2.0)); Collector.MetricFamilySamples mfsFixture = new Collector.MetricFamilySamples("labels", Collector.Type.SUMMARY, "help", samples); assertEquals(1, mfs.size());