Skip to content
Permalink
Browse files

Merge branch 'python' into lbgpu_node_vel

  • Loading branch information...
KaiSzuttor committed Jul 10, 2019
2 parents 6c5ace0 + 326c261 commit 81b9c333007e2b440acb348c993a7d4eab4c7a6c
Showing with 736 additions and 2,221 deletions.
  1. +39 −0 src/core/LocalBox.hpp
  2. +6 −15 src/core/communication.cpp
  3. +0 −8 src/core/communication.hpp
  4. +52 −67 src/core/domain_decomposition.cpp
  5. +2 −2 src/core/electrostatics_magnetostatics/mmm2d.cpp
  6. +35 −45 src/core/electrostatics_magnetostatics/p3m-dipolar.cpp
  7. +0 −3 src/core/electrostatics_magnetostatics/p3m-dipolar.hpp
  8. +35 −19 src/core/electrostatics_magnetostatics/p3m.cpp
  9. +0 −3 src/core/electrostatics_magnetostatics/p3m.hpp
  10. +6 −1 src/core/electrostatics_magnetostatics/scafacos.cpp
  11. +1 −1 src/core/event.cpp
  12. +39 −88 src/core/grid.cpp
  13. +29 −60 src/core/grid.hpp
  14. +6 −3 src/core/grid_based_algorithms/halo.cpp
  15. +5 −6 src/core/grid_based_algorithms/lattice.cpp
  16. +3 −2 src/core/grid_based_algorithms/lattice.hpp
  17. +8 −3 src/core/grid_based_algorithms/lb.cpp
  18. +7 −5 src/core/grid_based_algorithms/lb_boundaries.cpp
  19. +3 −0 src/core/grid_based_algorithms/lb_particle_coupling.cpp
  20. +3 −1 src/core/grid_based_algorithms/lbgpu_cuda.cu
  21. +1 −7 src/core/integrate.cpp
  22. +13 −10 src/core/layered.cpp
  23. +1 −0 src/core/unit_tests/CMakeLists.txt
  24. +63 −0 src/core/unit_tests/LocalBox_test.cpp
  25. +35 −0 src/core/unit_tests/grid_test.cpp
  26. +17 −13 src/core/virtual_sites/lb_inertialess_tracers.cpp
  27. +25 −11 src/python/espressomd/actors.pyx
  28. +1 −3 src/python/espressomd/cellsystem.pyx
  29. +68 −68 src/python/espressomd/electrostatics.pyx
  30. +0 −2 src/python/espressomd/grid.pxd
  31. +2 −2 src/python/espressomd/polymer.pyx
  32. +1 −1 src/python/espressomd/reaction_ensemble.pyx
  33. +74 −18 src/python/espressomd/script_interface.pyx
  34. +1 −1 src/python/espressomd/visualization_opengl.pyx
  35. +69 −15 src/script_interface/ScriptInterface.dox
  36. +86 −0 src/utils/include/utils/mpi/cart_comm.hpp
  37. +0 −1,738 testsuite/python/data/engine_lbgpu_3pt.vtk
@@ -0,0 +1,39 @@
#ifndef ESPRESSO_LOCALBOX_HPP
#define ESPRESSO_LOCALBOX_HPP

#include <utils/Vector.hpp>

template <class T> class LocalBox {
Utils::Vector<T, 3> m_local_box_l = {1, 1, 1};
Utils::Vector<T, 3> m_lower_corner = {0, 0, 0};
Utils::Vector<T, 3> m_upper_corner = {1, 1, 1};
Utils::Array<int, 6> m_boundaries = {};

public:
LocalBox() = default;
LocalBox(Utils::Vector<T, 3> const &lower_corner,
Utils::Vector<T, 3> const &local_box_length,
Utils::Array<int, 6> const &boundaries)
: m_local_box_l(local_box_length), m_lower_corner(lower_corner),
m_upper_corner(lower_corner + local_box_length),
m_boundaries(boundaries) {}

/** Left (bottom, front) corner of this nodes local box. */
Utils::Vector<T, 3> const &my_left() const { return m_lower_corner; }
/** Right (top, back) corner of this nodes local box. */
Utils::Vector<T, 3> const &my_right() const { return m_upper_corner; }
/** Dimensions of the box a single node is responsible for. */
Utils::Vector<T, 3> const &length() const { return m_local_box_l; }
/** @brief Boundary information for the local box.
*
* This returns for each of the faces of the local box if
* it is a boundary of the simulation box. The format is
* as follows:
* (x low, x high, y low, y high, z low, z high).
*
* @return Array with boundary information.
*/
Utils::Array<int, 6> const &boundary() const { return m_boundaries; }
};

#endif
@@ -76,6 +76,7 @@
#include <boost/serialization/array.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/utility.hpp>
#include <utils/mpi/cart_comm.hpp>

using namespace std;

@@ -217,11 +218,12 @@ void mpi_init() {
#endif

MPI_Comm_size(MPI_COMM_WORLD, &n_nodes);
MPI_Dims_create(n_nodes, 3, node_grid.data());
node_grid = Utils::Mpi::dims_create<3>(n_nodes);

mpi_reshape_communicator({{node_grid[0], node_grid[1], node_grid[2]}},
/* periodicity */ {{1, 1, 1}});
MPI_Cart_coords(comm_cart, this_node, 3, node_pos.data());
comm_cart =
Utils::Mpi::cart_create(comm_cart, node_grid, /* reorder */ false);

this_node = comm_cart.rank();

Communication::m_callbacks =
std::make_unique<Communication::MpiCallbacks>(comm_cart);
@@ -236,17 +238,6 @@ void mpi_init() {
on_program_start();
}

void mpi_reshape_communicator(std::array<int, 3> const &node_grid,
std::array<int, 3> const &periodicity) {
MPI_Comm temp_comm;
MPI_Cart_create(MPI_COMM_WORLD, 3, const_cast<int *>(node_grid.data()),
const_cast<int *>(periodicity.data()), 0, &temp_comm);
comm_cart =
boost::mpi::communicator(temp_comm, boost::mpi::comm_take_ownership);

this_node = comm_cart.rank();
}

/****************** REQ_PLACE/REQ_PLACE_NEW ************/

void mpi_place_particle(int node, int id, const Utils::Vector3d &pos) {
@@ -128,14 +128,6 @@ auto mpi_call(Tag tag, TagArg &&tag_arg, R (*fp)(Args...), ArgRef &&... args) {
/** Process requests from master node. Slave nodes main loop. */
void mpi_loop();

/**
* @brief Replace the MPI communicator by a new one with the given periodicity
* and node grid.
*/
void mpi_reshape_communicator(std::array<int, 3> const &node_grid,
std::array<int, 3> const &periodicity = {
{1, 1, 1}});

/** Issue REQ_PLACE: move particle to a position on a node.
* Also calls \ref on_particle_change.
* \param id the particle to move.

0 comments on commit 81b9c33

Please sign in to comment.
You can’t perform that action at this time.