Skip to content

Commit

Permalink
Merge pull request #11274 from friedmud/vpp_visualization_11272
Browse files Browse the repository at this point in the history
Vpp visualization
  • Loading branch information
friedmud committed May 3, 2018
2 parents 88f3130 + 60a0b9e commit 88584ee
Show file tree
Hide file tree
Showing 10 changed files with 258 additions and 9 deletions.
@@ -0,0 +1,23 @@
# VectorPostprocessorVisualizationAux

## Short Description

!syntax description /AuxKernels/VectorPostprocessorVisualizationAux

## Description

This object is intended to let you view VectorPostprocessor vectors that are of lenght `num_procs` (meaning there is one value per MPI process). This object will take those values and fill up an Auxiliary field with them so the values can be visualized.

## Important Notes

Note: the VectorPostprocessor must be syncing the vectors it's computing to all processors. By default many just compute to processor 0 (because that's where output occurrs).

For instance: this is the case for [WorkBalance](WorkBalance.md). By default it only syncs to processor 0, but it has a parameter (`sync_to_all_procs`) to tell it to create copies of the vectors on all processors.

!syntax parameters /AuxKernels/VectorPostprocessorVisualizationAux

!syntax inputs /AuxKernels/VectorPostprocessorVisualizationAux

!syntax children /AuxKernels/VectorPostprocessorVisualizationAux

!bibtex bibliography
Expand Up @@ -12,7 +12,9 @@ Currently computes: number of local elements, nodes, dofs and partition sides.

## Important Notes

Note that this VPP only computes the complete vector on processor 0. The vectors this VPP computes may be very large and there is no need to have a copy of them on every processor.
Note that, by default, this VPP only computes the complete vector on processor 0. The vectors this VPP computes may be very large and there is no need to have a copy of them on every processor.

However, you can modify this behavior by setting `sync_to_all_procs = true`

!syntax parameters /VectorPostprocessors/WorkBalance

Expand Down
49 changes: 49 additions & 0 deletions framework/include/auxkernels/VectorPostprocessorVisualizationAux.h
@@ -0,0 +1,49 @@
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html

#ifndef VECTORPOSTPROCESSORVISUALIZATIONAUX_H
#define VECTORPOSTPROCESSORVISUALIZATIONAUX_H

#include "AuxKernel.h"

// Forward Declarations
class VectorPostprocessorVisualizationAux;

template <>
InputParameters validParams<VectorPostprocessorVisualizationAux>();

/**
* Read values from a VectorPostprocessor that is producing vectors that are "number of processors"
* in length. Puts the value for each processor into an elemental auxiliary field.
*/
class VectorPostprocessorVisualizationAux : public AuxKernel
{
public:
VectorPostprocessorVisualizationAux(const InputParameters & parameters);

protected:
/**
* Note: this used for error checking. It's done very late because VPP's don't fill in their
* vectors until they are computed
*/
virtual void timestepSetup() override;

/**
* Get the value from the vector and assign it to the element
*/
virtual Real computeValue() override;

/// Holds the values we want to display
const VectorPostprocessorValue & _vpp_vector;

/// Optimization
processor_id_type _my_pid;
};

#endif // VECTORPOSTPROCESSORVISUALIZATIONAUX_H
4 changes: 3 additions & 1 deletion framework/include/vectorpostprocessors/WorkBalance.h
Expand Up @@ -40,9 +40,11 @@ class WorkBalance : public GeneralVectorPostprocessor
virtual void finalize() override;

protected:
// The system to count DoFs from
/// The system to count DoFs from
int _system;

bool _sync_to_all_procs;

dof_id_type _local_num_elems;
dof_id_type _local_num_nodes;
dof_id_type _local_num_dofs;
Expand Down
60 changes: 60 additions & 0 deletions framework/src/auxkernels/VectorPostprocessorVisualizationAux.C
@@ -0,0 +1,60 @@
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html

#include "VectorPostprocessorVisualizationAux.h"

registerMooseObject("MooseApp", VectorPostprocessorVisualizationAux);

template <>
InputParameters
validParams<VectorPostprocessorVisualizationAux>()
{
InputParameters params = validParams<AuxKernel>();

params.addClassDescription("Read values from a VectorPostprocessor that is producing vectors "
"that are 'number of processors' * in length. Puts the value for "
"each processor into an elemental auxiliary field.");

params.addRequiredParam<VectorPostprocessorName>(
"vpp", "The name of the VectorPostprocessor to pull the data from.");
params.addRequiredParam<std::string>(
"vector_name", "The name of the vector to use from the VectorPostprocessor");

return params;
}

VectorPostprocessorVisualizationAux::VectorPostprocessorVisualizationAux(
const InputParameters & parameters)
: AuxKernel(parameters),
_vpp_vector(getVectorPostprocessorValue("vpp", getParam<std::string>("vector_name"))),
_my_pid(processor_id())
{
}

void
VectorPostprocessorVisualizationAux::timestepSetup()
{
if (_vpp_vector.size() != n_processors())
mooseError("Error in VectorPostprocessor ",
name(),
". Vector ",
getParam<std::string>("vector_name"),
" in VectorPostprocessor ",
getParam<VectorPostprocessorName>("vpp"),
" does not contain num_procs number of entries. num_procs: ",
n_processors(),
" num_entries: ",
_vpp_vector.size());
}

Real
VectorPostprocessorVisualizationAux::computeValue()
{
return _vpp_vector[_my_pid];
}
3 changes: 2 additions & 1 deletion framework/src/problems/FEProblemBase.C
Expand Up @@ -2720,7 +2720,8 @@ VectorPostprocessorValue &
FEProblemBase::getVectorPostprocessorValue(const VectorPostprocessorName & name,
const std::string & vector_name)
{
return _vpps_data.getVectorPostprocessorValue(name, vector_name);
auto & val = _vpps_data.getVectorPostprocessorValue(name, vector_name);
return val;
}

VectorPostprocessorValue &
Expand Down
32 changes: 26 additions & 6 deletions framework/src/vectorpostprocessors/WorkBalance.C
Expand Up @@ -34,12 +34,20 @@ validParams<WorkBalance>()
system_enum,
"The system(s) to retrieve the number of DOFs from (NL, AUX, ALL). Default == ALL");

params.addParam<bool>("sync_to_all_procs",
false,
"Whether or not to sync the vectors to all processors. By default we only "
"sync them to processor 0 so they can be written out. Setting this to "
"true will use more communication, but is necessary if you expect these "
"vectors to be available on all processors");

return params;
}

WorkBalance::WorkBalance(const InputParameters & parameters)
: GeneralVectorPostprocessor(parameters),
_system(getParam<MooseEnum>("system")),
_sync_to_all_procs(getParam<bool>("sync_to_all_procs")),
_local_num_elems(0),
_local_num_nodes(0),
_local_num_dofs(0),
Expand Down Expand Up @@ -245,12 +253,24 @@ WorkBalance::execute()
void
WorkBalance::finalize()
{
// Gather the results down to processor 0
_communicator.gather(0, static_cast<Real>(_local_num_elems), _num_elems);
_communicator.gather(0, static_cast<Real>(_local_num_nodes), _num_nodes);
_communicator.gather(0, static_cast<Real>(_local_num_dofs), _num_dofs);
_communicator.gather(0, static_cast<Real>(_local_num_partition_sides), _num_partition_sides);
_communicator.gather(0, _local_partition_surface_area, _partition_surface_area);
if (!_sync_to_all_procs)
{
// Gather the results down to processor 0
_communicator.gather(0, static_cast<Real>(_local_num_elems), _num_elems);
_communicator.gather(0, static_cast<Real>(_local_num_nodes), _num_nodes);
_communicator.gather(0, static_cast<Real>(_local_num_dofs), _num_dofs);
_communicator.gather(0, static_cast<Real>(_local_num_partition_sides), _num_partition_sides);
_communicator.gather(0, _local_partition_surface_area, _partition_surface_area);
}
else
{
// Gather the results down to all procs
_communicator.allgather(static_cast<Real>(_local_num_elems), _num_elems);
_communicator.allgather(static_cast<Real>(_local_num_nodes), _num_nodes);
_communicator.allgather(static_cast<Real>(_local_num_dofs), _num_dofs);
_communicator.allgather(static_cast<Real>(_local_num_partition_sides), _num_partition_sides);
_communicator.allgather(_local_partition_surface_area, _partition_surface_area);
}

// Fill in the PID column - this just makes plotting easier
_pid.resize(_num_elems.size());
Expand Down
Binary file not shown.
10 changes: 10 additions & 0 deletions test/tests/auxkernels/vector_postprocessor_visualization/tests
@@ -0,0 +1,10 @@
[Tests]
[./test]
type = 'Exodiff'
input = 'vector_postprocessor_visualization.i'
exodiff = 'vector_postprocessor_visualization_out.e'
min_parallel = 3
max_parallel = 3
mesh_mode = replicated # Just because of the gold file
[../]
[]
@@ -0,0 +1,82 @@
[Mesh]
type = GeneratedMesh
dim = 2
nx = 10
ny = 10
parallel_type = REPLICATED
partitioner = linear
[]

[Variables]
[u]
[]
[]

[Kernels]
[diff]
type = Diffusion
variable = u
[]
[]

[BCs]
[left]
type = DirichletBC
variable = u
boundary = 'left'
value = 0
[]
[right]
type = DirichletBC
variable = u
boundary = 'right'
value = 1
[]
[]

[Executioner]
type = Steady
solve_type = PJFNK
petsc_options_iname = '-pc_type -pc_hypre_type'
petsc_options_value = 'hypre boomeramg'
[]

[Outputs]
exodus = true
[]

[AuxVariables]
[num_elems]
family = MONOMIAL
order = CONSTANT
[]
[partition_surface_area]
family = MONOMIAL
order = CONSTANT
[]
[]

[AuxKernels]
[wb_num_elems]
type = VectorPostprocessorVisualizationAux
vpp = 'wb'
vector_name = num_elems
variable = num_elems
execute_on = 'TIMESTEP_END'
[]
[wb_partition_surface_area]
type = VectorPostprocessorVisualizationAux
vpp = 'wb'
vector_name = partition_surface_area
variable = partition_surface_area
execute_on = 'TIMESTEP_END'
[]
[]

[VectorPostprocessors]
[wb]
type = WorkBalance
sync_to_all_procs = 'true'
execute_on = 'INITIAL'
[]
[]

0 comments on commit 88584ee

Please sign in to comment.