From de28a671ada4e0e0bbbaea3e6f08e9aeb74b2c62 Mon Sep 17 00:00:00 2001 From: Henry Ventura Date: Wed, 28 Nov 2018 14:49:01 -0800 Subject: [PATCH] quickstart/{node, python}/metrics: Add tags for "status" and "error" (#485) * python: Add error tags * node: Add error tags --- content/quickstart/nodejs/metrics.md | 170 ++++++++++++++++----------- content/quickstart/python/metrics.md | 49 ++++---- 2 files changed, 124 insertions(+), 95 deletions(-) diff --git a/content/quickstart/nodejs/metrics.md b/content/quickstart/nodejs/metrics.md index 617c0619..9f8efc66 100644 --- a/content/quickstart/nodejs/metrics.md +++ b/content/quickstart/nodejs/metrics.md @@ -219,36 +219,38 @@ function processLine(line) { ## Record and Aggregate Data ### Create Views and Tags -We now determine how our metrics will be organized by creating `Views`. We will also create the variable needed to add extra text meta-data to our metrics, `tagKey`. +We now determine how our metrics will be organized by creating `Views`. We will also create the variable needed to add extra text meta-data to our metrics -- `methodTagKey`, `statusTagKey`, and `errorTagKey`. {{}} {{}} -const tagKey = "method"; +const methodTagKey = "method"; +const statusTagKey = "status"; +const errorTagKey = "error"; const latencyView = stats.createView( "demo/latency", mLatencyMs, - AggregationType.DISTRIBUTION, - [tagKey], + 3, + [methodTagKey, statusTagKey, errorTagKey], "The distribution of the latencies", // Bucket Boundaries: // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000] ); - +1 const lineCountView = stats.createView( "demo/lines_in", mLineLengths, - AggregationType.COUNT, - [tagKey], + 0, + [methodTagKey], "The number of lines from standard input" ) const lineLengthView = stats.createView( "demo/line_lengths", mLineLengths, - AggregationType.DISTRIBUTION, - [tagKey], + 3, + [methodTagKey], "Groups the lengths of keys in buckets", // Bucket Boudaries: // [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] @@ -277,32 +279,34 @@ const stream = fs.createReadStream("./test.txt"); // Creates an interface to read and process our file line by line const lineReader = readline.createInterface({ input: stream }); -const tagKey = "method"; +const methodTagKey = "method"; +const statusTagKey = "status"; +const errorTagKey = "error"; const latencyView = stats.createView( "demo/latency", mLatencyMs, - AggregationType.DISTRIBUTION, - [tagKey], + 3, + [methodTagKey, statusTagKey, errorTagKey], "The distribution of the latencies", // Bucket Boundaries: // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000] ); - +1 const lineCountView = stats.createView( "demo/lines_in", mLineLengths, - AggregationType.COUNT, - [tagKey], + 0, + [methodTagKey], "The number of lines from standard input" ) const lineLengthView = stats.createView( "demo/line_lengths", mLineLengths, - AggregationType.DISTRIBUTION, - [tagKey], + 3, + [methodTagKey], "Groups the lengths of keys in buckets", // Bucket Boudaries: // [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] @@ -335,19 +339,33 @@ Again, this is arbitrary and purely up the user. For example, if we wanted to tr Now we will record the desired metrics. To do so, we will use `stats.record()` and pass in our measurements. {{}} -{{}} -const tags = { "method": "repl" }; - -stats.record({ - measure: mLineLengths, - tags, - value: processedLine.length -}); - -stats.record({ - measure: mLatencyMs, - tags, - value: endTime.getTime() - startTime.getTime() +{{}} +lineReader.on("line", function (line) { + try { + // ... + const tags = {method: "repl", status: "OK"}; + + stats.record({ + measure: mLineLengths, + tags, + value: processedLine.length + }); + + stats.record({ + measure: mLatencyMs, + tags, + value: endTime.getTime() - startTime.getTime() + }); + } catch (err) { + stats.record({ + measure: mLatencyMs, + {method: "repl", status: "ERROR", error: err.message}, + value: (new Date()) - startTime.getTime() + }); + } + + // Restarts the start time for the REPL + startTime = endTime; }); {{}} @@ -409,25 +427,33 @@ let startTime = new Date(); // REPL is the read, evaluate, print and loop lineReader.on("line", function (line) { // Read - const processedLine = processLine(line); // Evaluate - console.log(processedLine); // Print - - // Registers the end of our REPL - const endTime = new Date(); - - const tags = { "method": "repl" }; - - stats.record({ - measure: mLineLengths, - tags, - value: processedLine.length - }); - - stats.record({ - measure: mLatencyMs, - tags, - value: endTime.getTime() - startTime.getTime() - }); + try { + const processedLine = processLine(line); // Evaluate + console.log(processedLine); // Print + + // Registers the end of our REPL + const endTime = new Date(); + + const tags = {method: "repl", status: "OK"}; + + stats.record({ + measure: mLineLengths, + tags, + value: processedLine.length + }); + + stats.record({ + measure: mLatencyMs, + tags, + value: endTime.getTime() - startTime.getTime() + }); + } catch (err) { + stats.record({ + measure: mLatencyMs, + {method: "repl", status: "ERROR", error: err.message}, + value: (new Date()) - startTime.getTime() + }); + } // Restarts the start time for the REPL startTime = endTime; @@ -531,25 +557,33 @@ let startTime = new Date(); // REPL is the read, evaluate, print and loop lineReader.on("line", function (line) { // Read - const processedLine = processLine(line); // Evaluate - console.log(processedLine); // Print - - // Registers the end of our REPL - const endTime = new Date(); - - const tags = { "method": "repl" }; - - stats.record({ - measure: mLineLengths, - tags, - value: processedLine.length - }); - - stats.record({ - measure: mLatencyMs, - tags, - value: endTime.getTime() - startTime.getTime() - }); + try { + const processedLine = processLine(line); // Evaluate + console.log(processedLine); // Print + + // Registers the end of our REPL + const endTime = new Date(); + + const tags = {method: "repl", status: "OK"}; + + stats.record({ + measure: mLineLengths, + tags, + value: processedLine.length + }); + + stats.record({ + measure: mLatencyMs, + tags, + value: endTime.getTime() - startTime.getTime() + }); + } catch (err) { + stats.record({ + measure: mLatencyMs, + {method: "repl", status: "ERROR", error: err.message}, + value: (new Date()) - startTime.getTime() + }); + } // Restarts the start time for the REPL startTime = endTime; diff --git a/content/quickstart/python/metrics.md b/content/quickstart/python/metrics.md index eb4be6dd..6fb09c35 100644 --- a/content/quickstart/python/metrics.md +++ b/content/quickstart/python/metrics.md @@ -100,11 +100,8 @@ import opencensus.tags import tag_value as tag_value_module # The latency in milliseconds m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms") -# Encounters the number of non EOF(end-of-file) errors. -m_errors = measure_module.Int("repl/errors", "The number of errors encountered", "1") - # Counts/groups the lengths of lines read in. -m_line_lengths = measure_module.Int("repl/line_lengths", "The distribution of line lengths", "By") +m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By") {{}} {{}} @@ -124,9 +121,6 @@ from opencensus.tags import tag_value as tag_value_module # The latency in milliseconds m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms") -# Encounters the number of non EOF(end-of-file) errors. -m_errors = measure_module.MeasureInt("repl/errors", "The number of errors encountered", "1") - # Counts/groups the lengths of lines read in. m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By") @@ -135,6 +129,10 @@ stats_recorder = stats.Stats().stats_recorder # Create the tag key key_method = tag_key_module.TagKey("method") +# Create the status key +key_status = tag_key_module.TagKey("status") +# Create the error key +key_error = tag_key_module.TagKey("error") def main(): # In a REPL: @@ -189,9 +187,6 @@ from opencensus.tags import tag_value as tag_value_module # The latency in milliseconds m_latency_ms = measure_module.MeasureFloat("repl/latency", "The latency in milliseconds per REPL loop", "ms") -# Encounters the number of non EOF(end-of-file) errors. -m_errors = measure_module.MeasureInt("repl/errors", "The number of errors encountered", "1") - # Counts/groups the lengths of lines read in. m_line_lengths = measure_module.MeasureInt("repl/line_lengths", "The distribution of line lengths", "By") @@ -200,29 +195,28 @@ stats_recorder = stats.Stats().stats_recorder # Create the tag key key_method = tag_key_module.TagKey("method") +# Create the status key +key_status = tag_key_module.TagKey("status") +# Create the error key +key_error = tag_key_module.TagKey("error") latency_view = view_module.View("demo/latency", "The distribution of the latencies", - [key_method], - m_latency_ms, - # Latency in buckets: - # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] - aggregation_module.DistributionAggregation([0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000])) + [key_method, key_status, key_error], + m_latency_ms, + # Latency in buckets: + # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] + aggregation_module.DistributionAggregation([0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000])) line_count_view = view_module.View("demo/lines_in", "The number of lines from standard input", - [], - m_line_lengths, - aggregation_module.CountAggregation()) - -error_count_view = view_module.View("demo/errors", "The number of errors encountered", - [key_method], - m_errors, - aggregation_module.CountAggregation()) + [], + m_line_lengths, + aggregation_module.CountAggregation()) line_length_view = view_module.View("demo/line_lengths", "Groups the lengths of keys in buckets", - [], - m_line_lengths, - # Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] - aggregation_module.DistributionAggregation([0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000])) + [], + m_line_lengths, + # Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] + aggregation_module.DistributionAggregation([0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000])) def main(): # In a REPL: @@ -249,6 +243,7 @@ def readEvaluateProcessLine(): tmap = tag_map_module.TagMap() tmap.insert(key_method, tag_value_module.TagValue("repl")) + tmap.insert(key_status, tag_value_module.TagValue("OK")) # Insert the tag map finally mmap.record(tmap)