Skip to content

Commit

Permalink
Update stochastic tools module for distributed VPP output
Browse files Browse the repository at this point in the history
  • Loading branch information
aeslaughter committed Feb 13, 2020
1 parent ffbb053 commit 8e90b54
Show file tree
Hide file tree
Showing 7 changed files with 8 additions and 122 deletions.
Expand Up @@ -74,10 +74,4 @@ class StochasticResults : public GeneralVectorPostprocessor, SamplerInterface
protected:
/// Storage for declared vectors
std::vector<StochasticResultsData> _sample_vectors;

/// Parallel operation mode
const MooseEnum _parallel_type;

/// The rank data to output if parallel type is distributed
const processor_id_type _output_distributed_rank;
};
Expand Up @@ -33,52 +33,13 @@ StochasticResults::validParams()

params.addParam<std::vector<SamplerName>>("samplers",
"A list of sampler names of associated data.");

MooseEnum parallel_type("REPLICATED DISTRIBUTED", "REPLICATED");
params.addParam<MooseEnum>(
"parallel_type",
parallel_type,
"Specify if the stored data vector is replicated or distributed across processors.");

params.addParam<processor_id_type>(
"output_distributed_rank",
Moose::INVALID_PROCESSOR_ID,
"When 'parallel_type = DISTRIBUTED' set this to copy the data from the specified processor "
"for output. This is mainly for testing since the data from that rank will override the data "
"on the root process.");

params.set<bool>("_auto_broadcast") = false;
return params;
}

StochasticResults::StochasticResults(const InputParameters & parameters)
: GeneralVectorPostprocessor(parameters),
SamplerInterface(this),
_parallel_type(getParam<MooseEnum>("parallel_type")),
_output_distributed_rank(getParam<processor_id_type>("output_distributed_rank"))
: GeneralVectorPostprocessor(parameters), SamplerInterface(this)
{

if (_output_distributed_rank != Moose::INVALID_PROCESSOR_ID)
{
if (_parallel_type == "replicated")
paramError("output_distributed_rank",
"The output rank cannot be used with 'parallel_type' set to replicated.");
else if (_output_distributed_rank >= n_processors())
paramError("output_distributed_rank",
"The supplied value is greater than the number of available processors: ",
_output_distributed_rank);
}
else
{
if ((_parallel_type == "DISTRIBUTED") && (getOutputs().count("none") == 0) &&
(n_processors() > 1))
paramWarning("parallel_type",
"The parallel_type was set to DISTRIBUTED and output is enabled for the object, "
"when running in parallel the results output will only contain the data on the "
"root processor. Output can be disabled by setting 'outputs = none' in the "
"input block. If output is desired the 'output_distributed_rank' can be set.");
}

if (isParamValid("samplers"))
for (const SamplerName & name : getParam<std::vector<SamplerName>>("samplers"))
{
Expand All @@ -99,23 +60,12 @@ StochasticResults::initialize()
void
StochasticResults::finalize()
{
if (_parallel_type == "REPLICATED")
if (!isDistributed())
{
for (auto & data : _sample_vectors)
_communicator.gather(0, data.current);
}

else if (_output_distributed_rank != 0 && _output_distributed_rank != Moose::INVALID_PROCESSOR_ID)
{
if (processor_id() == _output_distributed_rank)
for (auto & data : _sample_vectors)
_communicator.send(0, data.current);

else if (processor_id() == 0)
for (auto & data : _sample_vectors)
_communicator.receive(_output_distributed_rank, data.current);
}

for (auto & data : _sample_vectors)
{
data.vector->insert(data.vector->end(), data.current.begin(), data.current.end());
Expand Down
Expand Up @@ -2,42 +2,9 @@
issues = '#14410'
design = 'StochasticResults.md'

[warning]
type = RunApp
input = master.i
expect_out = "The parallel_type was set to DISTRIBUTED and output is enabled"
min_parallel = 2 # warning is only triggered with MPI
allow_warnings = true # the warning is being tested

requirement = "The system shall error if stochastic data is distributed across processors and "
"output of the data is enabled."
[]

[errors]
requirement = "The system shall error when stochastic data is set to output on a specific "
"processor with"
[wrong_parallel_type]
type = RunException
input = master.i
cli_args = 'VectorPostprocessors/storage/output_distributed_rank=1 '
'VectorPostprocessors/storage/parallel_type=REPLICATED'
expect_err = "The output rank cannot be used with"

detail = "the supplied operation mode is replicated and"
[]
[bad_output_rank]
type = RunException
input = master.i
expect_err = "The supplied value is greater than"
cli_args = 'VectorPostprocessors/storage/output_distributed_rank=2'
max_parallel = 2

detail = "the provided rank number is too large for the number of processors being executed."
[]
[]

[parallel_type]
requirement = "The system shall support the collection of stochastic data that is"

[replicated]
type = CSVDiff
input = master.i
Expand All @@ -47,40 +14,15 @@
detail = "replicated on all processors and"
[]

[distributed_one]
type = CSVDiff
input = master.i
min_parallel = 3
max_parallel = 3
cli_args = 'Outputs/file_base=distributed_one_out '
'VectorPostprocessors/storage/output_distributed_rank=0'
csvdiff = distributed_one_out_storage_0002.csv

detail = "distributed across one,"
[]

[distributed_two]
type = CSVDiff
input = master.i
min_parallel = 3
max_parallel = 3
cli_args = 'Outputs/file_base=distributed_two_out '
'VectorPostprocessors/storage/output_distributed_rank=1'
csvdiff = distributed_two_out_storage_0002.csv

detail = "two, and"
[]

[distributed_three]
[distributed]
type = CSVDiff
input = master.i
min_parallel = 3
max_parallel = 3
cli_args = 'Outputs/file_base=distributed_three_out '
'VectorPostprocessors/storage/output_distributed_rank=2'
csvdiff = distributed_three_out_storage_0002.csv
cli_args = 'VectorPostprocessors/storage/parallel_type=DISTRIBUTED Outputs/file_base=distributed_out'
csvdiff = 'distributed_out_storage_0002.csv.0 distributed_out_storage_0002.csv.1 distributed_out_storage_0002.csv.2'

detail = "three processors."
detail = "distributed across many."
[]
[]
[]
Expand Up @@ -9,7 +9,7 @@

#pragma once

#include "GeneralUserObject.h"
#include "GeneralVectorPostprocessor.h"

/**
* Test class to make certain that CSV data is broadcast correctly.
Expand Down

0 comments on commit 8e90b54

Please sign in to comment.