Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

router check tool: add flag for only printing failed tests #8160

Merged
merged 22 commits into from
Sep 11, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/root/install/tools/route_table_check_tool.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ Usage
-d, --details
Show detailed test execution results. The first line indicates the test name.

--only-show-failures
Displays test results for failed tests. Omits test names for passing tests if the details flag is set.

-p, --useproto
Use Proto test file schema

Expand Down
1 change: 1 addition & 0 deletions docs/root/intro/version_history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ Version history
* router check tool: add coverage reporting & enforcement.
* router check tool: add comprehensive coverage reporting.
* router check tool: add deprecated field check.
* router check tool: add flag for only printing results of failed tests.
* thrift_proxy: fix crashing bug on invalid transport/protocol framing
* tls: added verification of IP address SAN fields in certificates against configured SANs in the
* tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP.
Expand Down
40 changes: 22 additions & 18 deletions test/tools/router_check/router.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,19 +100,14 @@ RouterCheckTool::RouterCheckTool(
bool RouterCheckTool::compareEntriesInJson(const std::string& expected_route_json) {
Json::ObjectSharedPtr loader = Json::Factory::loadFromFile(expected_route_json, *api_);
loader->validateSchema(Json::ToolSchema::routerCheckSchema());

bool no_failures = true;
for (const Json::ObjectSharedPtr& check_config : loader->asObjectArray()) {
headers_finalized_ = false;
ToolConfig tool_config = ToolConfig::create(check_config);
tool_config.route_ = config_->route(*tool_config.headers_, tool_config.random_value_);

std::string test_name = check_config->getString("test_name", "");
if (details_) {
std::cout << test_name << std::endl;
}
tests_.emplace_back(test_name, std::vector<std::string>{});
Json::ObjectSharedPtr validate = check_config->getObject("validate");

using checkerFunc = std::function<bool(ToolConfig&, const std::string&)>;
const std::unordered_map<std::string, checkerFunc> checkers = {
{"cluster_name",
Expand All @@ -128,7 +123,6 @@ bool RouterCheckTool::compareEntriesInJson(const std::string& expected_route_jso
{"path_redirect",
[this](auto&... params) -> bool { return this->compareRedirectPath(params...); }},
};

// Call appropriate function for each match case.
for (const auto& test : checkers) {
if (validate->hasObject(test.first)) {
Expand All @@ -142,7 +136,6 @@ bool RouterCheckTool::compareEntriesInJson(const std::string& expected_route_jso
}
}
}

if (validate->hasObject("header_fields")) {
for (const Json::ObjectSharedPtr& header_field : validate->getObjectArray("header_fields")) {
if (!compareHeaderField(tool_config, header_field->getString("field"),
Expand All @@ -151,7 +144,6 @@ bool RouterCheckTool::compareEntriesInJson(const std::string& expected_route_jso
}
}
}

if (validate->hasObject("custom_header_fields")) {
for (const Json::ObjectSharedPtr& header_field :
validate->getObjectArray("custom_header_fields")) {
Expand All @@ -162,7 +154,7 @@ bool RouterCheckTool::compareEntriesInJson(const std::string& expected_route_jso
}
}
}

printResults();
return no_failures;
}

Expand All @@ -183,9 +175,7 @@ bool RouterCheckTool::compareEntries(const std::string& expected_routes) {
tool_config.route_ = config_->route(*tool_config.headers_, tool_config.random_value_);

const std::string& test_name = check_config.test_name();
if (details_) {
std::cout << test_name << std::endl;
}
tests_.emplace_back(test_name, std::vector<std::string>{});
const envoy::RouterCheckToolSchema::ValidationAssert& validate = check_config.validate();

using checkerFunc =
Expand All @@ -208,7 +198,7 @@ bool RouterCheckTool::compareEntries(const std::string& expected_routes) {
}
}
}

printResults();
return no_failures;
}

Expand Down Expand Up @@ -424,13 +414,24 @@ bool RouterCheckTool::compareResults(const std::string& actual, const std::strin
if (expected == actual) {
return true;
}
tests_.back().second.emplace_back("expected: [" + expected + "], actual: [" + actual +
"], test type: " + test_type);
return false;
}

void RouterCheckTool::printResults() {
// Output failure details to stdout if details_ flag is set to true
if (details_) {
std::cerr << "expected: [" << expected << "], actual: [" << actual
<< "], test type: " << test_type << std::endl;
for (const auto& test_result : tests_) {
// All test names are printed if the details_ flag is true unless only_show_failures_ is also
// true.
if ((details_ && !only_show_failures_) ||
(only_show_failures_ && !test_result.second.empty())) {
std::cout << test_result.first << std::endl;
for (const auto& failure : test_result.second) {
std::cerr << failure << std::endl;
}
}
}
return false;
}

// The Mock for runtime value checks.
Expand All @@ -446,6 +447,8 @@ Options::Options(int argc, char** argv) {
TCLAP::CmdLine cmd("router_check_tool", ' ', "none", true);
TCLAP::SwitchArg is_proto("p", "useproto", "Use Proto test file schema", cmd, false);
TCLAP::SwitchArg is_detailed("d", "details", "Show detailed test execution results", cmd, false);
TCLAP::SwitchArg only_show_failures("", "only-show-failures", "Only display failing tests", cmd,
false);
TCLAP::SwitchArg disable_deprecation_check("", "disable-deprecation-check",
"Disable deprecated fields check", cmd, false);
TCLAP::ValueArg<double> fail_under("f", "fail-under",
Expand All @@ -468,6 +471,7 @@ Options::Options(int argc, char** argv) {

is_proto_ = is_proto.getValue();
is_detailed_ = is_detailed.getValue();
only_show_failures_ = only_show_failures.getValue();
fail_under_ = fail_under.getValue();
comprehensive_coverage_ = comprehensive_coverage.getValue();
disable_deprecation_check_ = disable_deprecation_check.getValue();
Expand Down
21 changes: 20 additions & 1 deletion test/tools/router_check/router.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,11 @@ class RouterCheckTool : Logger::Loggable<Logger::Id::testing> {
*/
void setShowDetails() { details_ = true; }

/**
* Set whether to only print failing match cases.
*/
void setOnlyShowFailures() { only_show_failures_ = true; }

float coverage(bool detailed) {
return detailed ? coverage_.detailedReport() : coverage_.report();
}
Expand Down Expand Up @@ -137,13 +142,21 @@ class RouterCheckTool : Logger::Loggable<Logger::Id::testing> {
bool compareResults(const std::string& actual, const std::string& expected,
const std::string& test_type);

void printResults();

bool runtimeMock(const std::string& key, const envoy::type::FractionalPercent& default_value,
uint64_t random_value);

bool headers_finalized_{false};

bool details_{false};

bool only_show_failures_{false};

// The first member of each pair is the name of the test.
// The second member is a list of any failing results for that test as strings.
std::vector<std::pair<std::string, std::vector<std::string>>> tests_;
LisaLudique marked this conversation as resolved.
Show resolved Hide resolved

// TODO(hennna): Switch away from mocks following work done by @rlazarus in github issue #499.
std::unique_ptr<NiceMock<Server::Configuration::MockFactoryContext>> factory_context_;
std::unique_ptr<Router::ConfigImpl> config_;
Expand Down Expand Up @@ -196,10 +209,15 @@ class Options {
bool isProto() const { return is_proto_; }

/**
* @return true is detailed test execution results are displayed.
* @return true if detailed test execution results are displayed.
*/
bool isDetailed() const { return is_detailed_; }

/**
* @return true if only test failures are displayed.
*/
bool onlyShowFailures() const { return only_show_failures_; }

/**
* @return true if the deprecated field check for RouteConfiguration is disabled.
*/
Expand All @@ -214,6 +232,7 @@ class Options {
bool comprehensive_coverage_;
bool is_proto_;
bool is_detailed_;
bool only_show_failures_;
bool disable_deprecation_check_;
};
} // namespace Envoy
4 changes: 4 additions & 0 deletions test/tools/router_check/router_check.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ int main(int argc, char* argv[]) {
checktool.setShowDetails();
}

if (options.onlyShowFailures()) {
checktool.setOnlyShowFailures();
}

bool is_equal = options.isProto()
? checktool.compareEntries(options.testPath())
: checktool.compareEntriesInJson(options.unlabelledTestPath());
Expand Down
7 changes: 4 additions & 3 deletions test/tools/router_check/test/config/Weighted.golden.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,11 @@
"test_name": "Test_2",
"input": {
":authority": "www1.lyft.com",
":path": "/foo",
"random_value": 115
":path": "/test/123",
"random_value": 115,
":method": "GET"
},
"validate": {"cluster_name": "cluster1"}
"validate": {"cluster_name": "cluster1", "virtual_cluster_name": "test_virtual_cluster"}
},
{
"test_name": "Test_3",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
"test_name": "Test_2",
"input": {
"authority": "www1.lyft.com",
"path": "/foo",
"path": "/test/123",
"method": "GET",
"random_value": 115
},
"validate": {"cluster_name": "cluster1"}
"validate": {"cluster_name": "cluster1", "virtual_cluster_name": "test_virtual_cluster"}
},
{
"test_name": "Test_3",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@ tests {
test_name: "Test_2"
input: {
authority: "www1.lyft.com"
path: "/foo"
path: "/test/123"
method: "GET"
random_value: 115
}
validate: {
cluster_name: { value: "cluster1" }
cluster_name: { value: "cluster1"}
virtual_cluster_name: { value: "test_virtual_cluster" }
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@ tests:
- test_name: Test_2
input:
authority: www1.lyft.com
path: "/foo"
path: "/test/123"
method: GET
random_value: 115
validate:
cluster_name: cluster1
virtual_cluster_name: test_virtual_cluster
- test_name: Test_3
input:
authority: www1.lyft.com
Expand Down
9 changes: 9 additions & 0 deletions test/tools/router_check/test/config/Weighted.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,15 @@ virtual_hosts:
weight: 30
- name: cluster3
weight: 40
virtual_clusters:
- headers:
- name: :path
safe_regex_match:
google_re2: {}
regex: ^/test/\d+$
- name: :method
exact_match: GET
name: test_virtual_cluster
- name: www2
domains:
- www2.lyft.com
Expand Down
21 changes: 18 additions & 3 deletions test/tools/router_check/test/route_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,25 @@ if [[ "${BAD_CONFIG_OUTPUT}" != *"Unable to parse"* ]]; then
exit 1
fi

# Failure test case
echo "testing failure test case"
# Failure output flag test cases
echo "testing failure test cases"
# Failure test case with only details flag set
FAILURE_OUTPUT=$("${PATH_BIN}" "${PATH_CONFIG}/TestRoutes.yaml" "${PATH_CONFIG}/Weighted.golden.json" "--details" 2>&1) ||
echo "${FAILURE_OUTPUT:-no-output}"
if [[ "${FAILURE_OUTPUT}" != *"expected: [cluster1], actual: [instant-server], test type: cluster_name"* ]]; then
if [[ "${FAILURE_OUTPUT}" != *"Test_1"*"Test_2"*"expected: [test_virtual_cluster], actual: [other], test type: virtual_cluster_name"*"expected: [cluster1], actual: [instant-server], test type: cluster_name"*"Test_3"* ]]; then
exit 1
fi

# Failure test case with details flag set and failures flag set
FAILURE_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/TestRoutes.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.json" "--details" "--only-show-failures" "--useproto" 2>&1) ||
echo "${FAILURE_OUTPUT:-no-output}"
if [[ "${FAILURE_OUTPUT}" != *"Test_2"*"expected: [cluster1], actual: [instant-server], test type: cluster_name"* ]] || [[ "${FAILURE_OUTPUT}" == *"Test_1"* ]]; then
exit 1
fi

# Failure test case with details flag unset and failures flag set
FAILURE_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/TestRoutes.yaml" "-t" "${PATH_CONFIG}/Weighted.golden.proto.json" "--only-show-failures" "--useproto" 2>&1) ||
echo "${FAILURE_OUTPUT:-no-output}"
if [[ "${FAILURE_OUTPUT}" != *"Test_2"*"expected: [cluster1], actual: [instant-server], test type: cluster_name"* ]] || [[ "${FAILURE_OUTPUT}" == *"Test_1"* ]]; then
exit 1
fi