Skip to content

Commit

Permalink
Merge pull request #2196 from rosahbruno/1790501-fix-timing-distribut…
Browse files Browse the repository at this point in the history
…ion-docs
  • Loading branch information
rosahbruno committed Oct 5, 2022
2 parents 86847ed + 04fd7db commit 945a940
Show file tree
Hide file tree
Showing 12 changed files with 88 additions and 24 deletions.
28 changes: 20 additions & 8 deletions docs/user/reference/metrics/custom_distribution.md
Expand Up @@ -114,6 +114,9 @@ val snapshot = Graphics.checkerboardPeak.testGetValue()
// Does the sum have the expected value?
assertEquals(23, snapshot.sum)

// Does the count have the expected value?
assertEquals(1L, snapshot.count)

// Buckets are indexed by their lower bound.
assertEquals(1L, snapshot.values[19])
```
Expand All @@ -130,6 +133,9 @@ val snapshot = Graphics.INSTANCE.checkerboardPeak().testGetValue();

// Does the sum have the expected value?
assertEquals(23, snapshot.sum);

// Does the count have the expected value?
assertEquals(1L, snapshot.count);
```

</div>
Expand All @@ -139,10 +145,12 @@ assertEquals(23, snapshot.sum);
// Get snapshot.
let snapshot = graphics.checkerboardPeak.testGetValue()

// Usually you don't know the exact values,
// but how many should have been recorded.
// Does the sum have the expected value?
XCTAssertEqual(23, snapshot.sum)

// Does the count have the expected value?
XCTAssertEqual(1, snapshot.count)

// Buckets are indexed by their lower bound.
XCTAssertEqual(1L, snapshot.values[19])
```
Expand All @@ -157,12 +165,14 @@ metrics = load_metrics("metrics.yaml")
# Get snapshot.
snapshot = metrics.graphics.checkerboard_peak.test_get_value()

# Usually you don't know the exact values,
# but how many should have been recorded.
# Does the sum have the expected value?
assert 23 == snapshot.sum

# Does the count have the expected value?
assert 1 == snapshot.count

# Buckets are indexed by their lower bound.
assertEquals(1L, snapshot.values[19])
assert 1 == snapshot.values[19]
```

</div>
Expand All @@ -171,12 +181,14 @@ assertEquals(1L, snapshot.values[19])
```Rust
use glean_metrics::graphics;

// Usually you don't know the exact values,
// but how many should have been recorded.
// Does the sum have the expected value?
assert_eq!(23, graphics::checkerboard_peak.test_get_value(None).unwrap().sum);

// Does the count have the expected value?
assert_eq!(1, graphics::checkerboard_peak.test_get_value(None).unwrap().count);

// Buckets are indexed by their lower bound.
assert_eq(1, snapshot.values[19])
assert_eq!(1, snapshot.values[19])
```

</div>
Expand Down
2 changes: 1 addition & 1 deletion docs/user/reference/metrics/memory_distribution.md
Expand Up @@ -196,7 +196,7 @@ assert_eq!(11, snapshot.sum);

// Usually you don't know the exact timing values,
// but how many should have been recorded.
assert_eq!(2, snapshot.values.len());
assert_eq!(2, snapshot.count);
```


Expand Down
25 changes: 20 additions & 5 deletions docs/user/reference/metrics/timing_distribution.md
Expand Up @@ -372,9 +372,12 @@ import org.mozilla.yourApplication.GleanMetrics.Pages
// Get snapshot.
val snapshot = Pages.pageLoad.testGetValue()

// Does the sum have the expected value?
assertEquals(11, snapshot.sum)

// Usually you don't know the exact timing values,
// but how many should have been recorded.
assertEquals(1L, snapshot.sum)
assertEquals(2L, snapshot.count)
```

</div>
Expand All @@ -386,9 +389,12 @@ import org.mozilla.yourApplication.GleanMetrics.Pages;
// Get snapshot.
DistributionData snapshot = pages.INSTANCE.pageLoad().testGetValue();

// Does the sum have the expected value?
assertEquals(11, snapshot.sum);

// Usually you don't know the exact timing values,
// but how many should have been recorded.
assertEquals(1L, snapshot.getSum());
assertEquals(2L, snapshot.getCount());
```

</div>
Expand All @@ -398,9 +404,12 @@ assertEquals(1L, snapshot.getSum());
// Get snapshot.
let snapshot = pages.pageLoad.testGetValue()

// Does the sum have the expected value?
XCTAssertEqual(11, snapshot.sum)

// Usually you don't know the exact timing values,
// but how many should have been recorded.
XCTAssertEqual(1, snapshot.sum)
XCTAssertEqual(2, snapshot.count)
```

</div>
Expand All @@ -413,9 +422,12 @@ metrics = load_metrics("metrics.yaml")
# Get snapshot.
snapshot = metrics.pages.page_load.test_get_value()

# Does the sum have the expected value?
assert 11 == snapshot.sum

# Usually you don't know the exact timing values,
# but how many should have been recorded.
assert 1 == snapshot.sum
assert 2 == snapshot.count
```

</div>
Expand All @@ -428,9 +440,12 @@ use glean_metrics::pages;
// Get snapshot
let snapshot = pages::page_load.test_get_value(None).unwrap();

// Does the sum have the expected value?
assert_eq!(11, snapshot.sum);

// Usually you don't know the exact timing values,
// but how many should have been recorded.
assert_eq!(1, snapshot.values.len());
assert_eq!(2, snapshot.count);
```


Expand Down
Expand Up @@ -53,6 +53,7 @@ class TimingDistributionMetricTypeTest {
val snapshot = metric.testGetValue()!!
// Check the sum
assertTrue(snapshot.sum > 0L)
assertEquals(snapshot.count, 3L)
// Check that the 1L fell into the first bucket (max 1)
// assertEquals(1L, snapshot.values[1])
// Check that the 2L fell into the second bucket (max 2)
Expand Down Expand Up @@ -126,6 +127,7 @@ class TimingDistributionMetricTypeTest {
val snapshot = metric.testGetValue("store2")!!
// Check the sum
assertTrue(snapshot.sum > 0)
assertEquals(snapshot.count, 3L)
// Check that the 1L fell into the first bucket
// assertEquals(1L, snapshot.values[1])
// Check that the 2L fell into the second bucket
Expand All @@ -137,6 +139,7 @@ class TimingDistributionMetricTypeTest {
val snapshot2 = metric.testGetValue("store3")!!
// Check the sum
assertEquals(snapshot.sum, snapshot2.sum)
assertEquals(snapshot2.count, 3L)
// Check that the 1L fell into the first bucket
// assertEquals(1L, snapshot2.values[1])
// Check that the 2L fell into the second bucket
Expand Down Expand Up @@ -169,6 +172,9 @@ class TimingDistributionMetricTypeTest {
// Check the sum
assertEquals(6L * secondsToNanos, snapshot.sum)

// Check that we got the right number of samples.
assertEquals(snapshot.count, 3L)

// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`,
Expand Down Expand Up @@ -273,6 +279,7 @@ class TimingDistributionMetricTypeTest {
val snapshot = metric.testGetValue()!!
// Check the sum
assertTrue(snapshot.sum > 0L)
assertEquals(snapshot.count, 3L)
// Check that the 1L fell into the first bucket (max 1)
// assertEquals(1L, snapshot.values[1])
// Check that the 2L fell into the second bucket (max 2)
Expand Down Expand Up @@ -311,6 +318,8 @@ class TimingDistributionMetricTypeTest {

val snapshot = metric.testGetValue()!!
assertTrue("Should have stored some nanoseconds", snapshot.sum > 0L)

assertEquals(snapshot.count, 1L)
}

@Test
Expand Down
Expand Up @@ -36,6 +36,7 @@ class TimingDistributionTypeTests: XCTestCase {
let snapshot = metric.testGetValue()!
let sum = snapshot.values.values.reduce(0, +)
XCTAssertEqual(3, sum)
XCTAssertEqual(3, snapshot.count)
}

func testTimingDistributionMustNotRecordIfDisabled() {
Expand Down Expand Up @@ -91,10 +92,12 @@ class TimingDistributionTypeTests: XCTestCase {
var snapshot = metric.testGetValue("store2")!
var sum = snapshot.values.values.reduce(0, +)
XCTAssertEqual(3, sum)
XCTAssertEqual(3, snapshot.count)

snapshot = metric.testGetValue("store3")!
sum = snapshot.values.values.reduce(0, +)
XCTAssertEqual(3, sum)
XCTAssertEqual(3, snapshot.count)
}

func testTimingDistributionMustNotRecordIfCanceled() {
Expand Down Expand Up @@ -162,6 +165,7 @@ class TimingDistributionTypeTests: XCTestCase {
let snapshot = metric.testGetValue()!
let sum = snapshot.values.values.reduce(0, +)
XCTAssertEqual(3, sum)
XCTAssertEqual(3, snapshot.count)
}

func testMeasureFunctionThrows() {
Expand Down
7 changes: 2 additions & 5 deletions glean-core/python/tests/metrics/test_timing_distribution.py
Expand Up @@ -31,9 +31,7 @@ def test_the_api_saves_to_its_storage_engine():

snapshot = metric.test_get_value()
assert 0 < snapshot.sum

count = sum([v for v in snapshot.values.values()])
assert 3 == count
assert 3 == snapshot.count


def test_disabled_timing_distributions_must_not_record_data():
Expand Down Expand Up @@ -91,8 +89,7 @@ def test_api_saves_to_secondary_pings():
for store in ["store1", "store2", "store3"]:
snapshot = metric.test_get_value(store)
assert 0 < snapshot.sum
count = sum([v for v in snapshot.values.values()])
assert 3 == count
assert 3 == snapshot.count


def test_stopping_a_non_existent_timer_records_an_error():
Expand Down
3 changes: 3 additions & 0 deletions glean-core/src/glean.udl
Expand Up @@ -388,6 +388,9 @@ dictionary DistributionData {

// The accumulated sum of all the samples in the distribution.
i64 sum;

// The total number of entries in the distribution.
i64 count;
};

// Identifier for a running timer.
Expand Down
1 change: 1 addition & 0 deletions glean-core/src/metrics/custom_distribution.rs
Expand Up @@ -34,6 +34,7 @@ pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
.map(|(k, v)| (k as i64, v as i64))
.collect(),
sum: hist.sum() as i64,
count: hist.count() as i64,
}
}

Expand Down
1 change: 1 addition & 0 deletions glean-core/src/metrics/memory_distribution.rs
Expand Up @@ -44,6 +44,7 @@ pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
.map(|(k, v)| (k as i64, v as i64))
.collect(),
sum: hist.sum() as i64,
count: hist.count() as i64,
}
}

Expand Down
3 changes: 3 additions & 0 deletions glean-core/src/metrics/mod.rs
Expand Up @@ -77,6 +77,9 @@ pub struct DistributionData {

/// The accumulated sum of all the samples in the distribution.
pub sum: i64,

/// The total number of entries in the distribution.
pub count: i64,
}

/// The available metrics.
Expand Down
3 changes: 3 additions & 0 deletions glean-core/src/metrics/timing_distribution.rs
Expand Up @@ -75,6 +75,7 @@ pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
.map(|(k, v)| (k as i64, v as i64))
.collect(),
sum: hist.sum() as i64,
count: hist.count() as i64,
}
}

Expand Down Expand Up @@ -487,6 +488,7 @@ mod test {
let snap = snapshot(&hist);

let expected_json = json!({
"count": 10,
"sum": 55,
"values": {
"1": 1,
Expand Down Expand Up @@ -520,6 +522,7 @@ mod test {
let snap = snapshot(&hist);

let expected_json = json!({
"count": 4,
"sum": 4612,
"values": {
"1024": 2,
Expand Down
26 changes: 21 additions & 5 deletions glean-core/tests/timing_distribution.rs
Expand Up @@ -47,6 +47,7 @@ fn serializer_should_correctly_serialize_timing_distribution() {
.get_value(&glean, "store1")
.expect("Value should be stored");

assert_eq!(snapshot.count, 1);
assert_eq!(snapshot.sum, duration as i64);
}

Expand Down Expand Up @@ -164,9 +165,12 @@ fn the_accumulate_samples_api_correctly_stores_timing_values() {

let seconds_to_nanos = 1000 * 1000 * 1000;

// Check that we got the right sum and number of samples.
// Check that we got the right sum.
assert_eq!(snapshot.sum, 6 * seconds_to_nanos);

// Check that we got the right number of samples.
assert_eq!(snapshot.count, 3);

// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`.
Expand Down Expand Up @@ -201,9 +205,12 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
.get_value(&glean, "store1")
.expect("Value should be stored");

// Check that we got the right sum and number of samples.
// Check that we got the right sum.
assert_eq!(snapshot.sum, 6);

// Check that we got the right number of samples.
assert_eq!(snapshot.count, 3);

// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
Expand Down Expand Up @@ -245,6 +252,9 @@ fn the_accumulate_samples_api_correctly_handles_overflowing_values() {
// Overflowing values are truncated to MAX_SAMPLE_TIME and recorded.
assert_eq!(snapshot.sum as u64, MAX_SAMPLE_TIME + 6);

// Check that we got the right number of samples.
assert_eq!(snapshot.count, 4);

// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
Expand Down Expand Up @@ -340,9 +350,12 @@ fn the_accumulate_raw_samples_api_correctly_stores_timing_values() {
.get_value(&glean, "store1")
.expect("Value should be stored");

// Check that we got the right sum and number of samples.
// Check that we got the right sum.
assert_eq!(snapshot.sum, 6 * seconds_to_nanos as i64);

// Check that we got the right number of samples.
assert_eq!(snapshot.count, 3);

// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`.
Expand Down Expand Up @@ -386,16 +399,19 @@ fn raw_samples_api_error_cases() {
.get_value(&glean, "store1")
.expect("Value should be stored");

// Check that we got the right sum and number of samples.
// Check that we got the right sum.
assert_eq!(snapshot.sum, 2 + max_sample_time as i64);

// Check that we got the right number of samples.
assert_eq!(snapshot.count, 3);

// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = {1, max_sample_time}`.
assert_eq!(2, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&599512966122]);

// No errors should be reported.
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow)
Expand Down

0 comments on commit 945a940

Please sign in to comment.