File: distributed_mesh.cpp

package info (click to toggle)
dolfinx 2019.2.0~git20210130.c14cb0a-5
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 5,584 kB
  • sloc: cpp: 48,110; python: 9,536; xml: 9,114; makefile: 261; sh: 17
file content (138 lines) | stat: -rw-r--r-- 4,704 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// Copyright (C) 2019 Igor A. Baratta
//
// This file is part of DOLFINX (https://www.fenicsproject.org)
//
// SPDX-License-Identifier:    LGPL-3.0-or-later
//
// Unit tests for Distributed Meshes

#include "cmap.h"
#include <catch.hpp>
#include <dolfinx.h>
#include <dolfinx/common/MPI.h>
#include <dolfinx/graph/kahip.h>
#include <dolfinx/io/XDMFFile.h>
#include <dolfinx/mesh/cell_types.h>
#include <dolfinx/mesh/graphbuild.h>

using namespace dolfinx;
using namespace dolfinx::mesh;

namespace
{

void create_mesh_file()
{
  // Create mesh using all processes and save xdmf
  auto cmap = fem::create_coordinate_map(create_coordinate_map_cmap);
  auto mesh = std::make_shared<mesh::Mesh>(generation::RectangleMesh::create(
      MPI_COMM_WORLD,
      {Eigen::Vector3d(0.0, 0.0, 0.0), Eigen::Vector3d(1.0, 1.0, 0.0)},
      {32, 32}, cmap, mesh::GhostMode::shared_facet));

  // Save mesh in XDMF format
  io::XDMFFile file(MPI_COMM_WORLD, "mesh.xdmf", "w");
  file.write_mesh(*mesh);
}

void test_distributed_mesh(mesh::CellPartitionFunction partitioner)
{
  MPI_Comm mpi_comm{MPI_COMM_WORLD};
  int mpi_size = dolfinx::MPI::size(mpi_comm);

  // Create a communicator with subset of the original group of processes
  int subset_size = (mpi_size > 1) ? ceil(mpi_size / 2) : 1;
  std::vector<int> ranks(subset_size);
  std::iota(ranks.begin(), ranks.end(), 0);

  MPI_Group comm_group;
  MPI_Comm_group(mpi_comm, &comm_group);

  MPI_Group new_group;
  MPI_Group_incl(comm_group, subset_size, ranks.data(), &new_group);

  MPI_Comm subset_comm;
  MPI_Comm_create_group(MPI_COMM_WORLD, new_group, 0, &subset_comm);

  // Create coordinate map
  auto cmap = fem::create_coordinate_map(create_coordinate_map_cmap);

  // read mesh data
  Eigen::Array<double, -1, -1, Eigen::RowMajor> x(0, 3);
  Eigen::Array<std::int64_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
      cells(0, dolfinx::mesh::num_cell_vertices(cmap.cell_shape()));
  graph::AdjacencyList<std::int32_t> dest(0);

  if (subset_comm != MPI_COMM_NULL)
  {
    int nparts{mpi_size};
    io::XDMFFile infile(subset_comm, "mesh.xdmf", "r");
    cells = infile.read_topology_data("mesh");
    x = infile.read_geometry_data("mesh");
    auto [data, offsets] = graph::create_adjacency_data(cells);
    dest = partitioner(
        subset_comm, nparts, cmap.cell_shape(),
        graph::AdjacencyList<std::int64_t>(std::move(data), std::move(offsets)),
        mesh::GhostMode::shared_facet);
  }

  auto [data, offsets] = graph::create_adjacency_data(cells);
  graph::AdjacencyList<std::int64_t> cells_topology(std::move(data),
                                                    std::move(offsets));

  // Distribute cells to destination ranks
  const auto [cell_nodes, src, original_cell_index, ghost_owners]
      = graph::build::distribute(mpi_comm, cells_topology, dest);

  dolfinx::mesh::Topology topology = mesh::create_topology(
      mpi_comm, cell_nodes, original_cell_index, ghost_owners,
      cmap.cell_shape(), mesh::GhostMode::shared_facet);
  int tdim = topology.dim();
  dolfinx::mesh::Geometry geometry
      = mesh::create_geometry(mpi_comm, topology, cmap, cell_nodes, x);

  auto mesh = std::make_shared<dolfinx::mesh::Mesh>(
      mpi_comm, std::move(topology), std::move(geometry));

  CHECK(mesh->topology().index_map(tdim)->size_global() == 2048);
  CHECK(mesh->topology().index_map(tdim)->size_local() > 0);

  CHECK(mesh->topology().index_map(0)->size_global() == 1089);
  CHECK(mesh->topology().index_map(0)->size_local() > 0);

  CHECK(mesh->geometry().x().rows()
        == mesh->topology().index_map(0)->size_local()
               + mesh->topology().index_map(0)->num_ghosts());
}
} // namespace

TEST_CASE("Distributed Mesh", "[distributed_mesh]")
{
  create_mesh_file();

  SECTION("SCOTCH")
  {
    CHECK_NOTHROW(test_distributed_mesh(
        static_cast<graph::AdjacencyList<std::int32_t> (*)(
            MPI_Comm, int, const mesh::CellType,
            const graph::AdjacencyList<std::int64_t>&, mesh::GhostMode)>(
            &mesh::partition_cells_graph)));
  }

#ifdef HASKIP
  SECTION("KAHIP with Lambda")
  {
    auto kahip
        = [](MPI_Comm mpi_comm, int nparts, const mesh::CellType cell_type,
             const graph::AdjacencyList<std::int64_t>& cells,
             mesh::GhostMode ghost_mode) {
            const auto [dual_graph, graph_info]
                = mesh::build_dual_graph(mpi_comm, cells, cell_type);
            bool ghosting = (ghost_mode != mesh::GhostMode::none);
            return graph::kahip::partition(mpi_comm, nparts, dual_graph, -1,
                                           ghosting);
          };
    CHECK_NOTHROW(test_distributed_mesh(kahip));
  }
#endif
}