File: HashSpGEMMTest.cpp

package info (click to toggle)
combblas 2.0.0-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 190,476 kB
  • sloc: cpp: 55,912; ansic: 25,134; sh: 3,691; makefile: 548; csh: 66; python: 49; perl: 21
file content (88 lines) | stat: -rw-r--r-- 2,609 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#include <mpi.h>
#include <sys/time.h>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <sstream>
#include "CombBLAS/CombBLAS.h"
#include "CombBLAS/CommGrid3D.h"
#include "CombBLAS/SpParMat3D.h"
#include "CombBLAS/ParFriends.h"

using namespace std;
using namespace combblas;

#define EPS 0.0001

#ifdef _OPENMP
int cblas_splits = omp_get_max_threads();
#else
int cblas_splits = 1;
#endif


// Simple helper class for declarations: Just the numerical type is templated
// The index type and the sequential matrix type stays the same for the whole code
// In this case, they are "int" and "SpDCCols"
template <class NT>
class PSpMat
{
public:
    typedef SpDCCols < int64_t, NT > DCCols;
    typedef SpParMat < int64_t, NT, DCCols > MPI_DCCols;
};

int main(int argc, char* argv[])
{
    int nprocs, myrank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&myrank);

    if(argc < 4){
        if(myrank == 0)
        {
            cout << "Usage: ./<Binary> <MatrixA> <MatrixB> <MatrixCC>" << endl;
        }
        MPI_Finalize();
        return -1;
    }
    else {
        string Aname(argv[1]);
        string Bname(argv[2]);
        string CCname(argv[3]);
        shared_ptr<CommGrid> fullWorld;
        fullWorld.reset( new CommGrid(MPI_COMM_WORLD, 0, 0) );
        
        double t0, t1;
        
        typedef PlusTimesSRing<double, double> PTFF;

        SpParMat<int64_t, double, SpDCCols < int64_t, double >> A2D(fullWorld);
        SpParMat<int64_t, double, SpDCCols < int64_t, double >> B2D(fullWorld);
        SpParMat<int64_t, double, SpDCCols < int64_t, double >> CC2D(fullWorld);

        A2D.ParallelReadMM(Aname, true, maximum<double>());
        B2D.ParallelReadMM(Bname, true, maximum<double>());
        CC2D.ParallelReadMM(CCname, true, maximum<double>());

        SpDCCols < int64_t, double >* Alocal = A2D.seqptr();
        SpDCCols < int64_t, double >* Blocal = B2D.seqptr();
        SpDCCols < int64_t, double >* CClocal = CC2D.seqptr();

        //Clocal = LocalSpGEMMHash<PTFF, double>(*Alocal, *Blocal, false, false, true);
        SpTuples<int64_t, double>* Clocal_tuples = LocalSpGEMM<PTFF, double>(*Alocal, *Blocal, false, false);
        SpDCCols < int64_t, double >* Clocal = new SpDCCols < int64_t, double >(*Clocal_tuples, false);

        if(*Clocal == *CClocal){
            if(myrank == 0) fprintf(stderr, "Correct\n");
        }
        else{
            if(myrank == 0) fprintf(stderr, "Not correct\n");
        }
        
    }
    MPI_Finalize();
    return 0;
}