28 changes: 28 additions & 0 deletions doc/examples/runtime-configuration_2.output
@@ -0,0 +1,28 @@
//[example_output
# Example run 1
> runtime_configuration2 --log_level=all -- --some-wrong-random-string mock_device
Running 1 test case...
Entering test module "runtime_configuration2"
test.cpp:46: info: check framework::master_test_suite().argc == 3 has passed
test.cpp:47: fatal error: in "runtime_configuration2": critical check framework::master_test_suite().argv[1] == "--device-name" has failed [--some-wrong-random-string != --device-name]
Leaving test module "runtime_configuration2"

*** The test module "runtime_configuration2" was aborted; see standard output for details
*** 1 failure is detected in the test module "runtime_configuration2"

# Example run 2
> runtime_configuration2 --log_level=all -- --device-name mock_device
Running 1 test case...
Entering test module "runtime_configuration2"
test.cpp:46: info: check framework::master_test_suite().argc == 3 has passed
test.cpp:47: info: check framework::master_test_suite().argv[1] == "--device-name" has passed
test.cpp:53: info: check 'Cannot create the device mock_device' has passed
test.cpp:56: info: check 'Cannot initialize the device mock_device' has passed
test.cpp:72: Entering test case "check_device_has_meaningful_name"
test.cpp:74: info: check CommandLineDeviceInit::device->get_device_name() != "" has passed
test.cpp:72: Leaving test case "check_device_has_meaningful_name"; testing time: 127us
test.cpp:62: info: check 'Cannot tear-down the device mock_device' has passed
Leaving test module "runtime_configuration2"; testing time: 177us

*** No errors detected
//]
76 changes: 76 additions & 0 deletions doc/examples/runtime-configuration_2.run-fail.cpp
@@ -0,0 +1,76 @@
// Copyright (c) 2018 Raffi Enficiaud
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)

// See http://www.boost.org/libs/test for the library home page.

//[example_code
#define BOOST_TEST_MODULE runtime_configuration2
#include <boost/test/included/unit_test.hpp>
using namespace boost::unit_test;

/// The interface with the device driver.
class DeviceInterface {
public:
// acquires a specific device based on its name
static DeviceInterface* factory(std::string const& device_name);
virtual ~DeviceInterface(){}

virtual bool setup() = 0;
virtual bool teardown() = 0;
virtual std::string get_device_name() const = 0;
};

class MockDevice: public DeviceInterface {
bool setup() final {
return true;
}
bool teardown() final {
return true;
}
std::string get_device_name() const {
return "mock_device";
}
};

DeviceInterface* DeviceInterface::factory(std::string const& device_name) {
if(device_name == "mock_device") {
return new MockDevice();
}
return nullptr;
}

struct CommandLineDeviceInit {
CommandLineDeviceInit() {
BOOST_TEST_REQUIRE( framework::master_test_suite().argc == 3 );
BOOST_TEST_REQUIRE( framework::master_test_suite().argv[1] == "--device-name" );
}
void setup() {
device = DeviceInterface::factory(framework::master_test_suite().argv[2]);
BOOST_TEST_REQUIRE(
device != nullptr,
"Cannot create the device " << framework::master_test_suite().argv[2] );
BOOST_TEST_REQUIRE(
device->setup(),
"Cannot initialize the device " << framework::master_test_suite().argv[2] );
}
void teardown() {
if(device) {
BOOST_TEST(
device->teardown(),
"Cannot tear-down the device " << framework::master_test_suite().argv[2]);
}
delete device;
}
static DeviceInterface *device;
};
DeviceInterface* CommandLineDeviceInit::device = nullptr;

BOOST_TEST_GLOBAL_FIXTURE( CommandLineDeviceInit );

BOOST_AUTO_TEST_CASE(check_device_has_meaningful_name)
{
BOOST_TEST(CommandLineDeviceInit::device->get_device_name() != "");
}
//]
27 changes: 27 additions & 0 deletions doc/examples/runtime-configuration_3.output
@@ -0,0 +1,27 @@
//[example_output
# Example run 1
> runtime_configuration3 --log_level=all -- --create-parametrized 3
Running 3 test cases...
Entering test module "Master Test Suite"
test.cpp:59: Entering test case "name 0"
test.cpp:17: error: in "name 0": check i >= 1 has failed [0 < 1]
test.cpp:59: Leaving test case "name 0"; testing time: 179us
test.cpp:59: Entering test case "name 1"
test.cpp:17: info: check i >= 1 has passed
test.cpp:59: Leaving test case "name 1"; testing time: 45us
test.cpp:59: Entering test case "name 2"
test.cpp:17: info: check i >= 1 has passed
test.cpp:59: Leaving test case "name 2"; testing time: 34us
Leaving test module "Master Test Suite"; testing time: 443us

*** 1 failure is detected in the test module "Master Test Suite"

# Example run 2
> runtime_configuration3 --log_level=all -- --create-parametrized
Not enough parameters
Test setup error: std::runtime_error: test module initialization failed

# Example run 3
> runtime_configuration3 --log_level=all -- --create-parametrized dummy
Test setup error: boost::unit_test::framework::setup_error: Argument 'dummy' not integer
//]
64 changes: 64 additions & 0 deletions doc/examples/runtime-configuration_3.run-fail.cpp
@@ -0,0 +1,64 @@
// Copyright (c) 2019 Raffi Enficiaud
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)

// See http://www.boost.org/libs/test for the library home page.

//[example_code
#define BOOST_TEST_ALTERNATIVE_INIT_API
#include <boost/test/included/unit_test.hpp>
#include <functional>
#include <sstream>

using namespace boost::unit_test;

void test_function(int i) {
BOOST_TEST(i >= 1);
}

// helper
int read_integer(const std::string &str) {
std::istringstream buff( str );
int number = 0;
buff >> number;
if(buff.fail()) {
// it is also possible to raise a boost.test specific exception.
throw framework::setup_error("Argument '" + str + "' not integer");
}
return number;
}

bool init_unit_test()
{
int argc = boost::unit_test::framework::master_test_suite().argc;
char** argv = boost::unit_test::framework::master_test_suite().argv;

if( argc <= 1) {
return false; // returning false to indicate an error
}

if( std::string(argv[1]) == "--create-parametrized" ) {
if(argc < 3) {
// the logging availability depends on the logger type
BOOST_TEST_MESSAGE("Not enough parameters");
return false;
}

int number_tests = read_integer(argv[2]);
int test_start = 0;
if(argc > 3) {
test_start = read_integer(argv[3]);
}

for(int i = test_start; i < number_tests; i++) {
std::ostringstream ostr;
ostr << "name " << i;
// create test-cases, avoiding duplicate names
framework::master_test_suite().
add( BOOST_TEST_CASE_NAME( std::bind(&test_function, i), ostr.str().c_str() ) );
}
}
return true;
}
//]
5 changes: 5 additions & 0 deletions doc/examples/runtime-configuration_4-test-fail.txt
@@ -0,0 +1,5 @@
10.2 30.4
10.3 30.2
15.987984 15.9992
15.997984 15.9962

5 changes: 5 additions & 0 deletions doc/examples/runtime-configuration_4-test.txt
@@ -0,0 +1,5 @@
10.2 30.4
10.3 30.2
15.987984 15.9881
15.997984 15.9982

36 changes: 36 additions & 0 deletions doc/examples/runtime-configuration_4.output
@@ -0,0 +1,36 @@
//[example_output
# content of the file
> more test_file.txt
10.2 30.4
10.3 30.2
15.987984 15.9992
15.997984 15.9962

# Example run 1
> runtime_configuration4 --log_level=all -- --test-file test_file.txt
Running 2 test cases...
Entering test module "runtime_configuration4"
test.cpp:107: Entering test suite "command_line_test_file"
test.cpp:107: Entering test case "_0"
test.cpp:108: info: check input <= expected has passed
Assertion occurred in a following context:
input = 15.9879837; expected = 15.9991999;
test.cpp:107: Leaving test case "_0"; testing time: 433us
test.cpp:107: Entering test case "_1"
test.cpp:108: error: in "command_line_test_file/_1": check input <= expected has failed [15.9979839 > 15.9961996]
Failure occurred in a following context:
input = 15.9979839; expected = 15.9961996;
test.cpp:107: Leaving test case "_1"; testing time: 114us
test.cpp:107: Leaving test suite "command_line_test_file"; testing time: 616us
Leaving test module "runtime_configuration4"; testing time: 881us

*** 1 failure is detected in the test module "runtime_configuration4"

# Example run 2
> runtime_configuration4 --log_level=all -- --test-file non-existant.txt
Test setup error: Cannot open the file 'non-existant.txt'

# Example run 3
> runtime_configuration4 --log_level=all
Test setup error: Incorrect number of arguments
//]
110 changes: 110 additions & 0 deletions doc/examples/runtime-configuration_4.run-fail.cpp
@@ -0,0 +1,110 @@
// Copyright (c) 2019 Raffi Enficiaud
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)

// See http://www.boost.org/libs/test for the library home page.

//[example_code
#define BOOST_TEST_MODULE runtime_configuration4

#include <boost/test/included/unit_test.hpp>
#include <boost/test/data/test_case.hpp>

#include <iostream>
#include <functional>
#include <sstream>
#include <fstream>

// this dataset loads a file that contains a list of strings
// this list is used to create a dataset test case.
class file_dataset
{
private:
std::string m_filename;
std::size_t m_line_start;
std::size_t m_line_end;

public:
enum { arity = 2 };

public:
file_dataset(std::size_t line_start = 0, std::size_t line_end = std::size_t(-1))
: m_line_start(line_start)
, m_line_end(line_end)
{
int argc = boost::unit_test::framework::master_test_suite().argc;
char** argv = boost::unit_test::framework::master_test_suite().argv;

if(argc != 3)
throw std::logic_error("Incorrect number of arguments");
if(std::string(argv[1]) != "--test-file")
throw std::logic_error("First argument != '--test-file'");
if(!(m_line_start < std::size_t(-1)))
throw std::logic_error("Incorrect line start/end");

m_filename = argv[2];

std::ifstream file(m_filename);
if(!file.is_open())
throw std::logic_error("Cannot open the file '" + m_filename + "'");
std::size_t nb_lines = std::count_if(
std::istreambuf_iterator<char>(file),
std::istreambuf_iterator<char>(),
[](char c){ return c == '\n';});

m_line_end = (std::min)(nb_lines, m_line_end);
if(!(m_line_start <= m_line_end))
throw std::logic_error("Incorrect line start/end");
}

struct iterator {
iterator(std::string const& filename, std::size_t line_start)
: file(filename, std::ios::binary) {
if(!file.is_open())
throw std::runtime_error("Cannot open the file");
for(std::size_t i = 0; i < line_start; i++) {
getline(file, m_current_line);
}
}

auto operator*() const -> std::tuple<float, float> {
float a, b;
std::istringstream istr(m_current_line);
istr >> a >> b;
return std::tuple<float, float>(a, b);
}

void operator++() {
getline(file, m_current_line);
}
private:
std::ifstream file;
std::string m_current_line;
};

// size of the DS
boost::unit_test::data::size_t size() const {
return m_line_end - m_line_start;
}

// iterator over the lines of the file
iterator begin() const {
return iterator(m_filename, m_line_start);
}
};

namespace boost { namespace unit_test { namespace data {

namespace monomorphic {
template <>
struct is_dataset<file_dataset> : boost::mpl::true_ {};
}
}}}

BOOST_DATA_TEST_CASE(command_line_test_file,
boost::unit_test::data::make_delayed<file_dataset>( 3, 10 ),
input, expected) {
BOOST_TEST(input <= expected);
}
//]
27 changes: 21 additions & 6 deletions doc/runtime_configuration/runtime_configuration.qbk
@@ -1,14 +1,15 @@
[/
/ Copyright (c) 2003 Boost.Test contributors
/ Copyright (c) 2003 Boost.Test contributors
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]


[section:runtime_config Runtime parameters]



[h4 Boost.Test runtime parameters]
The __UTF__ supports multiple parameters that affect test module execution. To set the parameter's value you can
either use a runtime configuration subsystem interface from within the test module initialization function or you can
specify the value at runtime during test module invocation.
Expand All @@ -27,14 +28,28 @@ supplied by you. The command line argument format expected by the __UTF__ is:
The command line argument name is case sensitive. It is required to match exactly the name in parameter specification.
There should not be any spaces between '=' and either command line argument name or argument value.


The corresponding environment variable name is also case sensitive and is required to exactly match the name in the
parameter specification.

All information about supported parameters is summarized below in the reference section.
All information about the currently supported parameters of the __UTF__ is summarized in
the section [link boost_test.runtime_config.summary].

[h4 Test filtering]
The __UTF__ provides an extensive filtering facility making it easy to run a specific test or a subset of tests. The
[link boost_test.runtime_config.test_unit_filtering section on filtering] gives all the details for the command line interface.

Additionally, [link boost_test.tests_organization.test_suite test-suites] and [link boost_test.tests_organization.tests_grouping labels]
may be used in order to construct subset of tests in an efficient and easy way, while decorators
(eg. __decorator_disabled__, __decorator_precondition__) can be used to set the default run status of a test case.

[h4 Custom runtime parameters]
It is possible to extend further the command line interface by providing a custom command line interpretation logic. There are several
ways on how to integrate this logic in the __UTF__ and this is explained in details in the section
[link boost_test.runtime_config.custom_runtime_parameters Custom runtime parameters].


[include test_unit_filtering.qbk]
[include runtime_custom.qbk]
[include runtime_config_summary.qbk]


[endsect] [/ runtime configuration]
[endsect] [/ runtime configuration]
144 changes: 144 additions & 0 deletions doc/runtime_configuration/runtime_custom.qbk
@@ -0,0 +1,144 @@
[/
/ Copyright (c) 2003 Boost.Test contributors
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]

[section Custom command line arguments]

It is possible to pass custom command line arguments to the test module. The general format for passing custom
arguments is the following:

``
<boost_test_module> [<boost_test_arg1>...] [-- [<custom_parameter1>...]
``

This means that everything that is passed after "`--`" is considered as a custom parameter and will not be intercepted nor interpreted
by the __UTF__. This avoids any troubleshooting between the __UTF__ parameters and the custom ones.

There are several use cases for accessing the arguments passed on the command line:

* instanciating an object used in test cases and which is dependant on parameters external to the test-module:
the name of the graphic card, the credentials for a database connexion, etc. The rest of the test module would check
that the functions in test are not sensitive to this type of parametrization. One can also imagine running this same
test module with different parameters (different graphic cards...) in a batched manner,
* modifying the test tree by adding or parametrizing test cases: the arguments passed on the command line may contain
for instance a set of parameters that define test cases.

In the first scenario, [link ref_consuming_cmd_test_case test cases] or fixtures, including
[link ref_consuming_cmd_global_fixture global fixtures], may be used. Since those are part of the test tree, they can benefit from the __UTF__ rich set of assertions
and controlled execution environment.

In the second scenario, the command line argument interact directly with the content of the test tree: by passing specific
arguments, different set of tests are created. There are mainly two options for achieving this: using a dedicated
[link ref_consuming_cmd_init_function initialization function] or using [link ref_consuming_cmd_dataset data driven] test cases.
The error handling of the command line parameters needs however to be adapted.

[#ref_consuming_cmd_test_case][h4 Consuming custom arguments from a test case]
The [link boost_test.tests_organization.test_suite.master_test_suite master test suite] collects the custom arguments
passed to the test module in the following way:

* `argv[0]`, usually set by the operating system as the executable name, remains unchanged
* any argument interpreted by the test module is removed from `argv`
* the empty token `--` is removed as well
* any additional argument passed after the empty token is reported in `argv` starting at index `1`

[bt_example runtime-configuration_1..Basic custom command line..run-fail]

[#ref_consuming_cmd_global_fixture][h4 Consuming custom arguments from a global fixture]
Another possibility for consuming the custom command line arguments would be from within a
[link boost_test.tests_organization.fixtures.global global fixture]. This is especially useful
when external parameters are needed for instanciating global objects used in the test module.

The usage is the same as for test cases. The following example runs the test module twice with
different arguments, and illustrate the feature.

[tip The global fixture can check for the correctness of the custom arguments and may abort the full run
of the test module.]

[bt_example runtime-configuration_2..Command line arguments interpreted in a global fixtures..run-fail]

The above example instanciates a specific device through the `DeviceInterface::factory` member function. The
name of the device to instanciate is passed via the command line argument `--device-name`, and the instanciated
device is available through the global object `CommandLineDeviceInit::device`.
The module requires `3` arguments on the command line:

* `framework::master_test_suite().argv[0]` is the test module name as explained in the previous paragraph
* `framework::master_test_suite().argv[1]` should be equal to `--device-name`
* `framework::master_test_suite().argv[2]` should be the name of the device to instanciate

As it can be seen in the shell outputs, any command line argument consumed by the __UTF__ is removed from
`argc` / `argv`. Since global fixtures are running in the __UTF__ controlled environment, any fatal error reported
by the fixture (through the __BOOST_TEST_REQUIRE__ assertion) aborts the test execution. Non fatal errors
on the other hand do not abort the test-module and are reported as assertion failure, and would not prevent the execution
of the test case `check_device_has_meaningful_name`.

[note It is possible to have several global fixtures in a test module, spread over several compilation units.
Each of those fixture may in turn be accessing a specific part of the command line.]


[#ref_consuming_cmd_init_function][h4 Parametrizing the test tree from command line in the initialization function]
The initialization function are described in details in this [link boost_test.adv_scenarios.test_module_init_overview section].
The initialization function is called before any other test or fixture, and before entering the master test suite. The initialization
function is not considered as a test-case, although it is called under the controlled execution
environment of the __UTF__. This means that:

* the errors will be properly handled,
* loggers are not fully operational,
* it is not possible to use the __UTF__ assertion macros like __BOOST_TEST__ as it is not a test-case.

The following example shows how to use the command line arguments parsing described above to create/add new test cases
to the test tree. It also shows very limited support to messages (does not work for all loggers), and error handling.

[bt_example runtime-configuration_3..Init function parametrized from the command line..run-fail]

As seen in this example, the error handling is quite different than a regular test-case:

* For the /alternative/ initialization API (see
__BOOST_TEST_ALTERNATIVE_INIT_API__), the easiest way to indicate an error would be to return `false`
in case of failure.
* For the /obsolete/ and /alternative/, raising an exception such as `std::runtime_error` or
[classref boost::unit_test::framework::setup_error] as above works as well.

[#ref_consuming_cmd_dataset][h4 Data-driven test cases parametrized from the command line]
It is possible to use the command line arguments to manipulate the dataset generated by a data-drive test case.

By default, datasets are created before entering the `main` of the test module, and try to be efficient in the number
of copies of their arguments. It is however possible
to indicate a delay for the evaluation of the dataset by constructing the dataset with the `make_delayed` function.

With the `make_delayed`, the construction of the dataset will happen at the same time as the construction of the
test tree during the test module initialization, and not before. It is this way possible to access the
[link boost_test.tests_organization.test_suite.master_test_suite master test suite] and its command line arguments.

The example below shows a complex dataset generation from the content of an external file. The data contained
in the file participates to the definition of the test case.

[bt_example runtime-configuration_4..Dataset test case parametrized from the command line..run-fail]


* Using `make_delayed`, the tests generated from a dataset are instanciated during the framework setup. This
let the dataset generator access the `argc` and `argv` of the master test suite.
* The generation of the test-cases out of this dataset happens before the global fixture are reached (and before
any test cases), and after the initialization function.
* The generator of the dataset is [*not] considered being a test case and the __UTF__ assertions are not accessible.
However, the __UTF__ will catch the exceptions raised during the generation of the test-cases by the dataset.
To report an error, a `std::logic_error` or [classref boost::unit_test::framework::setup_error] can be raised
and will be reported by the __UTF__.

[/
[h4 Handling errors]
The handling of errors that happen during the command line parsing has been discussed through the examples above.

Some additional notes:

* an exception occuring in a global fixture or the initialization function will be caught by the framework and will abort
the test module
* a global fixture is attached to the [link boost_test.tests_organization.test_suite.master_test_suite master test suite], and
any failure there will be reported by the loggers properly.
* A global fixture cannot manipulate the test tree, while the data-driven tests or custom initialization functions can.

]

[endsect] [/ Custom runtime parameters]
9 changes: 4 additions & 5 deletions include/boost/test/data/monomorphic/delayed.hpp
Expand Up @@ -50,7 +50,6 @@ class delayed_dataset
public:
enum { arity = dataset_t::arity };
using iterator = decltype(std::declval<dataset_t>().begin());
using sample = typename dataset_t::sample;

delayed_dataset(Args... args)
: m_args(std::make_tuple(std::forward<Args>(args)...))
Expand All @@ -74,10 +73,10 @@ class delayed_dataset
private:

dataset_t& get() const {
if(!m_dataset) {
m_dataset = create(boost::unit_test::data::index_sequence_for<Args...>());
}
return *m_dataset;
if(!m_dataset) {
m_dataset = create(boost::unit_test::data::index_sequence_for<Args...>());
}
return *m_dataset;
}

template<std::size_t... I>
Expand Down
2 changes: 0 additions & 2 deletions include/boost/test/data/monomorphic/initializer_list.hpp
Expand Up @@ -41,8 +41,6 @@ namespace monomorphic {
template<typename T>
class init_list {
public:
typedef T sample;

enum { arity = 1 };

typedef typename std::vector<T>::const_iterator iterator;
Expand Down
4 changes: 3 additions & 1 deletion include/boost/test/data/monomorphic/singleton.hpp
Expand Up @@ -32,9 +32,11 @@ namespace monomorphic {
/// Models a single element data set
template<typename T>
class singleton {
public:
private:
typedef typename boost::decay<T>::type sample;

public:

enum { arity = 1 };

struct iterator {
Expand Down
33 changes: 32 additions & 1 deletion test/Jamfile.v2
Expand Up @@ -406,6 +406,37 @@ alias "smoke-ts"
[ run smoke-ts-included-3 : -c no --run_test=case2 -t some_suite : : : cla-runtest-repeated-mixed-long-no-fail3 ]
[ run-fail smoke-ts-included-3 : --result_code=1 --run_test=case2 -t some_suite : : : cla-runtest-repeated-mixed-long-fail3 ]
[ run smoke-ts-included-3 : --result_code=0 --run_test=case2 -t some_suite : : : cla-runtest-repeated-mixed-long-fail4 ]
;

exe custom-command-line-binary-1 : ../doc/examples/runtime-configuration_1.run-fail.cpp ;
exe custom-command-line-binary-2 : ../doc/examples/runtime-configuration_2.run-fail.cpp
: $(requirements_boost_test_full_support) ;
exe custom-command-line-binary-3 : ../doc/examples/runtime-configuration_3.run-fail.cpp
: $(requirements_boost_test_full_support) ;
exe custom-command-line-binary-4 : ../doc/examples/runtime-configuration_4.run-fail.cpp
: $(requirements_datasets) ;

alias "custom-command-line-ts"
:
# custom command line interface tests
[ run custom-command-line-binary-1 : -- --specific-param \"'additional value with quotes'\" : : : cla-specific-api1-test-1-1 ]
[ run custom-command-line-binary-1 : --log_level=all --no_color -- --specific-param \"'additional value with quotes'\" : : : cla-specific-api1-test-1-2 ]

[ run-fail custom-command-line-binary-2 : -- --random-string \"mock_device\" : : $(requirements_boost_test_full_support) : cla-specific-api1-test-2-0 ]
[ run custom-command-line-binary-2 : -- --device-name \"mock_device\" : : $(requirements_boost_test_full_support) : cla-specific-api1-test-2-1 ]
[ run custom-command-line-binary-2 : --log_level=all --no_color -- --device-name \"mock_device\" : : $(requirements_boost_test_full_support) : cla-specific-api1-test-2-2 ]

[ run-fail custom-command-line-binary-3 : -- : : $(requirements_boost_test_full_support) : cla-specific-api1-test-3-0 ]
[ run-fail custom-command-line-binary-3 : -- --create-parametrized : : $(requirements_boost_test_full_support) : cla-specific-api1-test-3-1 ]
[ run-fail custom-command-line-binary-3 : -- --create-parametrized 3 : : $(requirements_boost_test_full_support) : cla-specific-api1-test-3-2 ]
[ run custom-command-line-binary-3 : -- --create-parametrized 3 2 : : $(requirements_boost_test_full_support) : cla-specific-api1-test-3-3 ]

[ run-fail custom-command-line-binary-4 : -- : : $(requirements_datasets) : cla-specific-api1-test-4-0 ]
[ run-fail custom-command-line-binary-4 : -- --test-file : : $(requirements_datasets) : cla-specific-api1-test-4-1 ]
[ run-fail custom-command-line-binary-4 : -- --test-file ../doc/examples/runtime-configuration_4-test-fail.txt : : $(requirements_datasets) : cla-specific-api1-test-4-2 ]
[ run-fail custom-command-line-binary-4 : --log_level=all --no_color -- --test-file ../doc/examples/runtime-configuration_4-test-fail.txt : : $(requirements_datasets) : cla-specific-api1-test-4-3 ]
[ run custom-command-line-binary-4 : -- --test-file ../doc/examples/runtime-configuration_4-test.txt : : $(requirements_datasets) : cla-specific-api1-test-4-4 ]
[ run custom-command-line-binary-4 : --log_level=all --no_color -- --test-file ../doc/examples/runtime-configuration_4-test.txt : : $(requirements_datasets) : cla-specific-api1-test-4-5 ]

;

Expand All @@ -424,7 +455,7 @@ alias test
prg_exec_monitor-ts
execution_monitor-ts
doc-examples-ts
# boost_test_examples
custom-command-line-ts
;

#_________________________________________________________________________________________________#
Expand Down
Expand Up @@ -18,7 +18,6 @@
class dataset_loader
{
public:
typedef std::string sample;
enum { arity = 1 };

// this constructor access the master test suite
Expand Down Expand Up @@ -154,7 +153,6 @@ class dataset_loader_arity3
data_type m_expected;
data_type m_input;

typedef std::string sample;
enum { arity = 3 };

public:
Expand Down
Expand Up @@ -82,8 +82,6 @@ int non_copyable_type::nb_destructs = 0;
template <class return_t = int>
class fibonacci_dataset {
public:
// Samples type is int
using sample=return_t;
enum { arity = 1 };

struct iterator {
Expand Down