1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
|
#include <iostream>
#include <vector>
#include <boost/scope_exit.hpp>
#include <amgcl/backend/builtin.hpp>
#include <amgcl/adapter/crs_tuple.hpp>
#include <amgcl/mpi/distributed_matrix.hpp>
#include <amgcl/io/mm.hpp>
#include <amgcl/profiler.hpp>
namespace amgcl {
profiler<> prof;
}
int main(int argc, char *argv[]) {
MPI_Init(&argc, &argv);
BOOST_SCOPE_EXIT(void) {
MPI_Finalize();
} BOOST_SCOPE_EXIT_END
amgcl::mpi::communicator comm(MPI_COMM_WORLD);
int n = 16;
int chunk_len = (n + comm.size - 1) / comm.size;
int chunk_beg = std::min(n, chunk_len * comm.rank);
int chunk_end = std::min(n, chunk_len * (comm.rank + 1));
int chunk = chunk_end - chunk_beg;
std::vector<int> ptr; ptr.reserve(chunk + 1); ptr.push_back(0);
std::vector<int> col; col.reserve(chunk * 4);
std::vector<double> val; val.reserve(chunk * 4);
for(int i = 0, j = chunk_beg; i < chunk; ++i, ++j) {
if (j > 0) {
col.push_back(j - 1);
val.push_back(-1);
}
col.push_back(j);
val.push_back(2);
if (j+1 < n) {
col.push_back(j+1);
val.push_back(-1);
}
if (j+5 < n) {
col.push_back(j+5);
val.push_back(-0.1);
}
ptr.push_back(col.size());
}
typedef amgcl::backend::builtin<double> Backend;
typedef amgcl::mpi::distributed_matrix<Backend> Matrix;
Matrix A(comm, std::tie(chunk, ptr, col, val), chunk);
{
std::ostringstream fname;
fname << "A_loc_" << comm.rank << ".mtx";
amgcl::io::mm_write(fname.str(), *A.local());
}
{
std::ostringstream fname;
fname << "A_rem_" << comm.rank << ".mtx";
amgcl::io::mm_write(fname.str(), *A.remote());
}
auto B = transpose(A);
{
std::ostringstream fname;
fname << "B_loc_" << comm.rank << ".mtx";
amgcl::io::mm_write(fname.str(), *B->local());
}
{
std::ostringstream fname;
fname << "B_rem_" << comm.rank << ".mtx";
amgcl::io::mm_write(fname.str(), *B->remote());
}
}
|