Skip to content
forked from v8/v8

Commit

Permalink
[resultdb] Add ResultDB indicator
Browse files Browse the repository at this point in the history
Adds a new indicator that will send every result to ResultDB (and ultimately in a bq table; to be configured later).

If we are not running in a ResultDB context we introduce only a minimal overhead by exiting early from indicator.

To test these changes in a luci context with ResultDB we activated resultdb feature flag via V8-Recipe-Flags. This feature got implemented in https://crrev.com/c/3925576 .


V8-Recipe-Flags: resultdb
Bug: v8:13316
Change-Id: I5d98e8f27531b536686a8d63b993313b9d6f62c5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3905385
Commit-Queue: Liviu Rau <liviurau@google.com>
Reviewed-by: Alexander Schulze <alexschulze@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83672}
  • Loading branch information
Liviu Rau authored and V8 LUCI CQ committed Oct 13, 2022
1 parent fb3321e commit 237de89
Show file tree
Hide file tree
Showing 7 changed files with 191 additions and 46 deletions.
5 changes: 5 additions & 0 deletions .vpython3
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,8 @@ wheel: <
name: "infra/python/wheels/protobuf-py3"
version: "version:3.19.3"
>

wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>
6 changes: 5 additions & 1 deletion tools/testrunner/objects/testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,9 +447,13 @@ def cmp(x, y):
(other.suite.name, other.name, other.variant)
)

def __str__(self):
@property
def full_name(self):
return self.suite.name + '/' + self.name

def __str__(self):
return self.full_name


class D8TestCase(TestCase):
def get_shell(self):
Expand Down
82 changes: 38 additions & 44 deletions tools/testrunner/testproc/indicators.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@


def print_failure_header(test, is_flaky=False):
text = [str(test)]
text = [test.full_name]
if test.output_proc.negative:
text.append('[negative]')
if is_flaky:
Expand All @@ -24,6 +24,23 @@ def print_failure_header(test, is_flaky=False):
print(output.encode(encoding, errors='replace').decode(encoding))


def formatted_result_output(result):
lines = []
if result.output.stderr:
lines.append("--- stderr ---")
lines.append(result.output.stderr.strip())
if result.output.stdout:
lines.append("--- stdout ---")
lines.append(result.output.stdout.strip())
lines.append("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
lines.append("exit code: %s" % result.output.exit_code_string)
lines.append("--- CRASHED ---")
if result.output.HasTimedOut():
lines.append("--- TIMEOUT ---")
return '\n'.join(lines)


class ProgressIndicator():

def __init__(self, context, options, test_count):
Expand Down Expand Up @@ -68,19 +85,7 @@ def finished(self):
for test, result, is_flaky in self._failed:
flaky += int(is_flaky)
print_failure_header(test, is_flaky=is_flaky)
if result.output.stderr:
print("--- stderr ---")
print(result.output.stderr.strip())
if result.output.stdout:
print("--- stdout ---")
print(result.output.stdout.strip())
print("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
print("exit code: %s" % result.output.exit_code_string)
print("--- CRASHED ---")
crashed += 1
if result.output.HasTimedOut():
print("--- TIMEOUT ---")
print(formatted_result_output(result))
if len(self._failed) == 0:
print("===")
print("=== All tests succeeded")
Expand Down Expand Up @@ -230,7 +235,7 @@ def on_test_result(self, test, result):
else:
self._passed += 1

self._print_progress(str(test))
self._print_progress(test.full_name)
if result.has_unexpected_output:
output = result.output
stdout = output.stdout.strip()
Expand Down Expand Up @@ -358,10 +363,7 @@ def __init__(self, context, options, test_count, framework_name):
self.test_count = 0

def on_test_result(self, test, result):
if result.is_rerun:
self.process_results(test, result.results)
else:
self.process_results(test, [result])
self.process_results(test, result.as_list)

def process_results(self, test, results):
for run, result in enumerate(results):
Expand All @@ -376,7 +378,7 @@ def process_results(self, test, results):
if not result.has_unexpected_output and run == 0:
continue

record = self._test_record(test, result, output, run)
record = self._test_record(test, result, run)
record.update({
"result": test.output_proc.get_outcome(output),
"stdout": output.stdout,
Expand All @@ -392,41 +394,33 @@ def result_value(test, result, output):
return ""
return test.output_proc.get_outcome(output)

record = self._test_record(test, result, output, run)
record.update({
"result": result_value(test, result, output),
"marked_slow": test.is_slow,
})
record = self._test_record(test, result, run)
record.update(
result=result_value(test, result, output),
marked_slow=test.is_slow,
)
self.tests.add(record)
self.duration_sum += record['duration']
self.test_count += 1

def _test_record(self, test, result, output, run):
return {
"name": str(test),
"flags": result.cmd.args,
"command": result.cmd.to_string(relative=True),
"run": run + 1,
"exit_code": output.exit_code,
"expected": test.expected_outcomes,
"duration": output.duration,
"random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
"variant_flags": test.variant_flags,
"framework_name": self.framework_name,
}
def _test_record(self, test, result, run):
record = util.base_test_record(test, result, run)
record.update(
framework_name=self.framework_name,
command=result.cmd.to_string(relative=True),
)
return record

def finished(self):
duration_mean = None
if self.test_count:
duration_mean = self.duration_sum / self.test_count

result = {
"results": self.results,
"slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
"test_total": self.test_count,
'results': self.results,
'slowest_tests': self.tests.as_list(),
'duration_mean': duration_mean,
'test_total': self.test_count,
}

with open(self.options.json_test_results, "w") as f:
Expand Down
3 changes: 2 additions & 1 deletion tools/testrunner/testproc/progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from . import base
from testrunner.local import utils
from testrunner.testproc.indicators import JsonTestProgressIndicator, PROGRESS_INDICATORS
from testrunner.testproc.resultdb import ResultDBIndicator


class ResultsTracker(base.TestProcObserver):
Expand Down Expand Up @@ -66,7 +67,7 @@ def __init__(self, context, options, framework_name, test_count):
0,
JsonTestProgressIndicator(context, options, test_count,
framework_name))

self.procs.append(ResultDBIndicator(context, options, test_count))
self._requirement = max(proc._requirement for proc in self.procs)

def _on_result_for(self, test, result):
Expand Down
8 changes: 8 additions & 0 deletions tools/testrunner/testproc/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ def is_grouped(self):
def is_rerun(self):
return False

@property
def as_list(self):
return [self]


class Result(ResultBase):
"""Result created by the output processor."""
Expand Down Expand Up @@ -112,5 +116,9 @@ def __init__(self, results):
def is_rerun(self):
return True

@property
def as_list(self):
return self.results

def status(self):
return ' '.join(r.status() for r in self.results)
95 changes: 95 additions & 0 deletions tools/testrunner/testproc/resultdb.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import json
import logging
import pprint
import requests
import os

from . import base
from .indicators import (
formatted_result_output,
ProgressIndicator,
)
from .util import (
base_test_record,
extract_tags,
strip_ascii_control_characters,
)


class ResultDBIndicator(ProgressIndicator):

def __init__(self, context, options, test_count):
super(ResultDBIndicator, self).__init__(context, options, test_count)
self._requirement = base.DROP_PASS_OUTPUT
self.rpc = ResultDB_RPC()

def on_test_result(self, test, result):
for run, sub_result in enumerate(result.as_list):
self.send_result(test, sub_result, run)

def send_result(self, test, result, run):
# We need to recalculate the observed (but lost) test behaviour.
# `result.has_unexpected_output` indicates that the run behaviour of the
# test matches the expected behaviour irrespective of passing or failing.
result_expected = not result.has_unexpected_output
test_should_pass = not test.is_fail
run_passed = (result_expected == test_should_pass)
rdb_result = {
'testId': strip_ascii_control_characters(test.full_name),
'status': 'PASS' if run_passed else 'FAIL',
'expected': result_expected,
}

if result.output and result.output.duration:
rdb_result.update(duration=f'{result.output.duration}ms')
if result.has_unexpected_output:
formated_output = formatted_result_output(result)
sanitized = strip_ascii_control_characters(formated_output)
# TODO(liviurau): do we have a better presentation data for this?
# Protobuf strings can have len == 2**32.
rdb_result.update(summaryHtml=f'<pre>{sanitized}</pre>')
record = base_test_record(test, result, run)
rdb_result.update(tags=extract_tags(record))
self.rpc.send(rdb_result)


class ResultDB_RPC:

def __init__(self):
self.session = None
luci_context = os.environ.get('LUCI_CONTEXT')
# TODO(liviurau): use a factory method and return None in absence of
# necessary context.
if not luci_context:
logging.warning(
f'No LUCI_CONTEXT found. No results will be sent to ResutDB.')
return
with open(luci_context, mode="r", encoding="utf-8") as f:
config = json.load(f)
sink = config.get('result_sink', None)
if not sink:
logging.warning(
f'No ResultDB sink found. No results will be sent to ResutDB.')
return
self.session = requests.Session()
self.session.headers = {
'Authorization': f'ResultSink {sink.get("auth_token")}',
}
self.url = f'http://{sink.get("address")}/prpc/luci.resultsink.v1.Sink/ReportTestResults'

def send(self, result):
if self.session:
payload = dict(testResults=[result])
try:
self.session.post(self.url, json=payload).raise_for_status()
except Exception as e:
logging.error(f'Request failed: {payload}')
raise e

def __del__(self):
if self.session:
self.session.close()
38 changes: 38 additions & 0 deletions tools/testrunner/testproc/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import logging
import os
import platform
import re
import signal
import subprocess

Expand Down Expand Up @@ -53,6 +54,43 @@ def kill_processes_linux():
logging.exception('Failed to kill process')


def strip_ascii_control_characters(unicode_string):
return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))


def base_test_record(test, result, run):
record = {
'name': test.full_name,
'flags': result.cmd.args,
'run': run + 1,
'expected': test.expected_outcomes,
'random_seed': test.random_seed,
'target_name': test.get_shell(),
'variant': test.variant,
'variant_flags': test.variant_flags,
}
if result.output:
record.update(
exit_code=result.output.exit_code,
duration=result.output.duration,
)
return record


def extract_tags(record):
tags = []
for k, v in record.items():
if type(v) == list:
tags += [sanitized_kv_dict(k, e) for e in v]
else:
tags.append(sanitized_kv_dict(k, v))
return tags


def sanitized_kv_dict(k, v):
return dict(key=k, value=strip_ascii_control_characters(v))


class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the
Expand Down

0 comments on commit 237de89

Please sign in to comment.