Skip to content
This repository has been archived by the owner on Oct 3, 2023. It is now read-only.

Commit

Permalink
quickstart/{node, python}/metrics: Add tags for "status" and "error" (#…
Browse files Browse the repository at this point in the history
…485)

* python: Add error tags
* node: Add error tags
  • Loading branch information
hvent90 authored and Emmanuel T Odeke committed Nov 28, 2018
1 parent 499a0ee commit de28a67
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 95 deletions.
170 changes: 102 additions & 68 deletions content/quickstart/nodejs/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -219,36 +219,38 @@ function processLine(line) {
## Record and Aggregate Data

### Create Views and Tags
We now determine how our metrics will be organized by creating `Views`. We will also create the variable needed to add extra text meta-data to our metrics, `tagKey`.
We now determine how our metrics will be organized by creating `Views`. We will also create the variable needed to add extra text meta-data to our metrics -- `methodTagKey`, `statusTagKey`, and `errorTagKey`.

{{<tabs Snippet All>}}
{{<highlight javascript>}}
const tagKey = "method";
const methodTagKey = "method";
const statusTagKey = "status";
const errorTagKey = "error";

const latencyView = stats.createView(
"demo/latency",
mLatencyMs,
AggregationType.DISTRIBUTION,
[tagKey],
3,
[methodTagKey, statusTagKey, errorTagKey],
"The distribution of the latencies",
// Bucket Boundaries:
// [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
[0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]
);

1
const lineCountView = stats.createView(
"demo/lines_in",
mLineLengths,
AggregationType.COUNT,
[tagKey],
0,
[methodTagKey],
"The number of lines from standard input"
)

const lineLengthView = stats.createView(
"demo/line_lengths",
mLineLengths,
AggregationType.DISTRIBUTION,
[tagKey],
3,
[methodTagKey],
"Groups the lengths of keys in buckets",
// Bucket Boudaries:
// [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
Expand Down Expand Up @@ -277,32 +279,34 @@ const stream = fs.createReadStream("./test.txt");
// Creates an interface to read and process our file line by line
const lineReader = readline.createInterface({ input: stream });

const tagKey = "method";
const methodTagKey = "method";
const statusTagKey = "status";
const errorTagKey = "error";

const latencyView = stats.createView(
"demo/latency",
mLatencyMs,
AggregationType.DISTRIBUTION,
[tagKey],
3,
[methodTagKey, statusTagKey, errorTagKey],
"The distribution of the latencies",
// Bucket Boundaries:
// [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
[0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]
);

1
const lineCountView = stats.createView(
"demo/lines_in",
mLineLengths,
AggregationType.COUNT,
[tagKey],
0,
[methodTagKey],
"The number of lines from standard input"
)

const lineLengthView = stats.createView(
"demo/line_lengths",
mLineLengths,
AggregationType.DISTRIBUTION,
[tagKey],
3,
[methodTagKey],
"Groups the lengths of keys in buckets",
// Bucket Boudaries:
// [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
Expand Down Expand Up @@ -335,19 +339,33 @@ Again, this is arbitrary and purely up the user. For example, if we wanted to tr
Now we will record the desired metrics. To do so, we will use `stats.record()` and pass in our measurements.

{{<tabs Snippet All>}}
{{<highlight javascript>}}
const tags = { "method": "repl" };

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
{{<highlight javascript>}}
lineReader.on("line", function (line) {
try {
// ...
const tags = {method: "repl", status: "OK"};

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
});
} catch (err) {
stats.record({
measure: mLatencyMs,
{method: "repl", status: "ERROR", error: err.message},
value: (new Date()) - startTime.getTime()
});
}

// Restarts the start time for the REPL
startTime = endTime;
});
{{</highlight>}}

Expand Down Expand Up @@ -409,25 +427,33 @@ let startTime = new Date();

// REPL is the read, evaluate, print and loop
lineReader.on("line", function (line) { // Read
const processedLine = processLine(line); // Evaluate
console.log(processedLine); // Print

// Registers the end of our REPL
const endTime = new Date();

const tags = { "method": "repl" };

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
});
try {
const processedLine = processLine(line); // Evaluate
console.log(processedLine); // Print

// Registers the end of our REPL
const endTime = new Date();

const tags = {method: "repl", status: "OK"};

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
});
} catch (err) {
stats.record({
measure: mLatencyMs,
{method: "repl", status: "ERROR", error: err.message},
value: (new Date()) - startTime.getTime()
});
}

// Restarts the start time for the REPL
startTime = endTime;
Expand Down Expand Up @@ -531,25 +557,33 @@ let startTime = new Date();

// REPL is the read, evaluate, print and loop
lineReader.on("line", function (line) { // Read
const processedLine = processLine(line); // Evaluate
console.log(processedLine); // Print

// Registers the end of our REPL
const endTime = new Date();

const tags = { "method": "repl" };

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
});
try {
const processedLine = processLine(line); // Evaluate
console.log(processedLine); // Print

// Registers the end of our REPL
const endTime = new Date();

const tags = {method: "repl", status: "OK"};

stats.record({
measure: mLineLengths,
tags,
value: processedLine.length
});

stats.record({
measure: mLatencyMs,
tags,
value: endTime.getTime() - startTime.getTime()
});
} catch (err) {
stats.record({
measure: mLatencyMs,
{method: "repl", status: "ERROR", error: err.message},
value: (new Date()) - startTime.getTime()
});
}

// Restarts the start time for the REPL
startTime = endTime;
Expand Down
49 changes: 22 additions & 27 deletions content/quickstart/python/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,8 @@ import opencensus.tags import tag_value as tag_value_module
# The latency in milliseconds
m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms")

# Encounters the number of non EOF(end-of-file) errors.
m_errors = measure_module.Int("repl/errors", "The number of errors encountered", "1")

# Counts/groups the lengths of lines read in.
m_line_lengths = measure_module.Int("repl/line_lengths", "The distribution of line lengths", "By")
m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By")
{{</highlight>}}

{{<highlight python>}}
Expand All @@ -124,9 +121,6 @@ from opencensus.tags import tag_value as tag_value_module
# The latency in milliseconds
m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms")

# Encounters the number of non EOF(end-of-file) errors.
m_errors = measure_module.MeasureInt("repl/errors", "The number of errors encountered", "1")

# Counts/groups the lengths of lines read in.
m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By")

Expand All @@ -135,6 +129,10 @@ stats_recorder = stats.Stats().stats_recorder

# Create the tag key
key_method = tag_key_module.TagKey("method")
# Create the status key
key_status = tag_key_module.TagKey("status")
# Create the error key
key_error = tag_key_module.TagKey("error")

def main():
# In a REPL:
Expand Down Expand Up @@ -189,9 +187,6 @@ from opencensus.tags import tag_value as tag_value_module
# The latency in milliseconds
m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms")

# Encounters the number of non EOF(end-of-file) errors.
m_errors = measure_module.MeasureInt("repl/errors", "The number of errors encountered", "1")

# Counts/groups the lengths of lines read in.
m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By")

Expand All @@ -200,29 +195,28 @@ stats_recorder = stats.Stats().stats_recorder

# Create the tag key
key_method = tag_key_module.TagKey("method")
# Create the status key
key_status = tag_key_module.TagKey("status")
# Create the error key
key_error = tag_key_module.TagKey("error")

latency_view = view_module.View("demo/latency", "The distribution of the latencies",
[key_method],
m_latency_ms,
# Latency in buckets:
# [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
aggregation_module.DistributionAggregation([0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]))
[key_method, key_status, key_error],
m_latency_ms,
# Latency in buckets:
# [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
aggregation_module.DistributionAggregation([0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000]))

line_count_view = view_module.View("demo/lines_in", "The number of lines from standard input",
[],
m_line_lengths,
aggregation_module.CountAggregation())

error_count_view = view_module.View("demo/errors", "The number of errors encountered",
[key_method],
m_errors,
aggregation_module.CountAggregation())
[],
m_line_lengths,
aggregation_module.CountAggregation())

line_length_view = view_module.View("demo/line_lengths", "Groups the lengths of keys in buckets",
[],
m_line_lengths,
# Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
aggregation_module.DistributionAggregation([0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000]))
[],
m_line_lengths,
# Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
aggregation_module.DistributionAggregation([0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000]))

def main():
# In a REPL:
Expand All @@ -249,6 +243,7 @@ def readEvaluateProcessLine():

tmap = tag_map_module.TagMap()
tmap.insert(key_method, tag_value_module.TagValue("repl"))
tmap.insert(key_status, tag_value_module.TagValue("OK"))

# Insert the tag map finally
mmap.record(tmap)
Expand Down

0 comments on commit de28a67

Please sign in to comment.