1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
|
/*
Copyright (C) 2008-2021 Michele Martone
This file is part of librsb.
librsb is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
librsb is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public
License along with librsb; see the file COPYING.
If not, see <http://www.gnu.org/licenses/>.
*/
/* @cond INNERDOC */
/**
* @file
* @author Michele Martone
* @brief Code for matrix format conversion.
* */
#include "rsb_common.h"
RSB_INTERNALS_COMMON_HEAD_DECLS
static rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
{
/**
\ingroup gr_internals
TODO: move somewhere else
TODO: need temporary memory to pass to e.g. rsb__do_switch_compressed_array_to_fullword_coo() and thus avoid allocations.
// to free the unnecessary data:
// RSB_CONDITIONAL_FREE(mtxAp
// RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices)
*/
rsb_err_t errval = RSB_ERR_NO_ERROR;
if(rsb__is_coo_matrix(mtxAp))
{
if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bpntr,mtxAp->nnz,do_shift?mtxAp->roff:0),
rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bindx,mtxAp->nnz,do_shift?mtxAp->coff:0);
goto err;
}
errval = rsb__do_switch_compressed_array_to_fullword_coo(mtxAp->bpntr,mtxAp->Mdim,do_shift?mtxAp->roff:0,NULL);
mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
RSB_DO_FLAG_DEL(mtxAp->flags,(RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS));
if(RSB_SOME_ERROR(errval))
{
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bindx,mtxAp->nnz,do_shift?mtxAp->coff:0);
err:
RSB_DO_FLAG_SUBST(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES),(RSB_FLAG_WANT_COO_STORAGE));
// rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
// rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
RSB_DO_ERR_RETURN(errval)
}
rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
{
/**
\ingroup gr_internals
TODO: move somewhere else
TODO: flags checks
FIXME: UNTESTED
// to free the unnecessary data:
// RSB_CONDITIONAL_FREE(mtxAp)
// RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices)
*/
rsb_err_t errval = RSB_ERR_NO_ERROR;
if(RSB_UNLIKELY(!mtxAp))
{
RSB_ERROR(RSB_ERRM_ES);
return RSB_ERR_BADARGS;
}
if(RSB_UNLIKELY(rsb__is_root_matrix(mtxAp)))
{
if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS))
errval = RSB_ERR_BADARGS;
else
errval = rsb__do_switch_recursive_matrix_to_fullword_storage(mtxAp);
if(RSB_SOME_ERROR(errval))
{
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
}
/*deleted by rsb__do_switch_recursive_matrix_to_fullword_storage*/
//RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE);
if(rsb__is_recursive_matrix(mtxAp->flags))
{
rsb_submatrix_idx_t i,j;
struct rsb_mtx_t * submatrix;
RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
if(submatrix)
RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(submatrix,do_shift));
}
else
errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(mtxAp,do_shift);
err:
RSB_DO_ERR_RETURN(errval)
}
static rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_parallel(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
{
/**
\ingroup gr_internals
*/
rsb_err_t errval = RSB_ERR_NO_ERROR;
const rsb_submatrix_idx_t all_leaf_matrices_n = mtxAp->all_leaf_matrices_n;
rsb_submatrix_idx_t n;
//rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
#pragma omp parallel for schedule(static,1) reduction(|:errval) shared(mtxAp) RSB_NTC
for(n=0;n<all_leaf_matrices_n;++n)
{
struct rsb_mtx_t *submatrix = mtxAp->all_leaf_matrices[n].mtxlp;
RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(submatrix,do_shift));
}
//rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET , NULL);
RSB_DO_ERR_RETURN(errval)
}
rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_mtx_t * coop)
{
/**
\ingroup gr_internals
Makes sense only for in place allocated.
On exit, the pointer matrix is deallocated.
FIXME: Here it would make sense to use a recursive merge algorithm.
*/
rsb_err_t errval = RSB_ERR_NO_ERROR;
struct rsb_mtx_t *fsm = NULL;
//rsb_flags_t flags;
struct rsb_coo_mtx_t coo;
int wmb = 1; /* want merge based (new: 20140727) */
RSB_BZERO_P(&coo);
if(RSB_UNLIKELY(!mtxAp))
{
RSB_ERROR(RSB_ERRM_E_MTXAP);
return RSB_ERR_BADARGS;
}
if(mtxAp->all_leaf_matrices_n == 1)
wmb = 0; /* merge routine will not convert a single leaf's format */
#if 0
fsm = rsb__do_get_first_submatrix(mtxAp);
if(!fsm)
{
errval = RSB_ERR_INTERNAL_ERROR;
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
flags = mtxAp->flags;
errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(mtxAp,RSB_BOOL_TRUE);
RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices);
RSB_INIT_COO_FROM_MTX(coop,mtxAp);
RSB_BIND_COO_TO_MTX(coop,fsm);
RSB_CONDITIONAL_FREE(mtxAp);
//if((errval = rsb__util_sort_row_major_parallel(coop->VA,coop->IA,coop->JA,coop->nnz,coop->nr,coop->nc,coop->typecode,flags))!=RSB_ERR_NO_ERROR)
if((errval = rsb__util_sort_row_major_bucket_based_parallel(coop->VA,coop->IA,coop->JA,coop->nnz,coop->nr,coop->nc,coop->typecode,flags))!=RSB_ERR_NO_ERROR)
goto err;
#else
if(wmb)
{
errval = rsb__leaves_merge_multiple(mtxAp, NULL, NULL, NULL, 0, 1);
if(RSB_SOME_ERROR(errval))
{
RSB_PERR_GOTO(err, RSB_ERRM_ES);
}
fsm = rsb__do_get_first_submatrix(mtxAp);
if(!fsm)
{
errval = RSB_ERR_INTERNAL_ERROR;
RSB_PERR_GOTO(err, RSB_ERRM_ES);
}
RSB_INIT_COO_FROM_MTX(coop, mtxAp);
RSB_BIND_COO_TO_MTX(coop, fsm);
RSB_ASSERT(coop->VA || coop->nnz == 0);
RSB_ASSERT(coop->IA || coop->nnz == 0);
RSB_ASSERT(coop->JA || coop->nnz == 0);
}
else
{
fsm = rsb__do_get_first_submatrix(mtxAp);
if(!fsm)
{
errval = RSB_ERR_INTERNAL_ERROR;
RSB_PERR_GOTO(err, RSB_ERRM_ES);
}
RSB_INIT_CXX_FROM_MTX(&coo, mtxAp);
coo.nr = coo.nc = 0; // to have rsb__allocate_coo_matrix_t allocate nnz and not more
if(rsb__allocate_coo_matrix_t(&coo) != &coo)
{
errval = RSB_ERR_INTERNAL_ERROR;
RSB_PERR_GOTO(err, RSB_ERRM_ES);
}
errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N, NULL, mtxAp, coo.VA, coo.IA, coo.JA, 0, mtxAp->nr-1, &coo.nnz, RSB_FLAG_NOFLAGS);
if(RSB_SOME_ERROR(errval))
{
RSB_PERR_GOTO(err, RSB_ERRM_ES);
}
RSB_INIT_COO_FROM_MTX(coop, mtxAp);
RSB_BIND_COO_TO_MTX(coop, fsm);
RSB_COO_MEMCPY_parallel(coop->VA, coop->IA, coop->JA, coo.VA, coo.IA, coo.JA, 0, 0, coo.nnz, mtxAp->el_size);
rsb__destroy_coo_matrix_t(&coo);
}
fsm->VA = NULL;
fsm->bpntr = NULL;
fsm->bindx = NULL;
rsb__destroy_inner(mtxAp);
#endif
err:
RSB_DO_ERR_RETURN(errval)
}
rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_mtx_t * coop)
{
/**
\ingroup gr_internals
TODO: move somewhere else
FIXME: UNTESTED,TEMPORARY, makes sense only for in place allocated
this conversion gives you sorted coordinates.
on exit, the pointer matrix is deallocated
FIXME: error behaviour is undefined
*/
rsb_err_t errval = RSB_ERR_NO_ERROR;
//struct rsb_coo_mtx_t coo;
struct rsb_mtx_t *fsm = NULL;
if(RSB_UNLIKELY(!mtxAp))
{
RSB_ERROR(RSB_ERRM_ES);
return RSB_ERR_BADARGS;
}
#if 0
RSB_INIT_CXX_FROM_MTX(&coo,mtxAp);
if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
{
errval = RSB_ERR_ENOMEM;
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&coo.nnz,RSB_FLAG_NOFLAGS);
if(RSB_SOME_ERROR(errval)) goto err;
//rsb__destroy_inner(mtxAp);
rsb__do_mtx_free(mtxAp);
coop->VA = coo.VA;
coop->IA = coo.IA;
coop->JA = coo.JA;
RSB_INIT_COO_FROM_MTX(coop,&coo);
// mtxAp->VA = coo.VA;
// mtxAp->bpntr = coo.IA;
// mtxAp->bindx = coo.JA;
#else
fsm = rsb__do_get_first_submatrix(mtxAp);
if(!fsm)
{
errval = RSB_ERR_INTERNAL_ERROR;
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_parallel(mtxAp,RSB_BOOL_TRUE);
if(RSB_SOME_ERROR(errval))
{
RSB_PERR_GOTO(err,RSB_ERRM_ES);
}
RSB_BIND_COO_TO_MTX(coop,fsm);
RSB_INIT_COO_FROM_MTX(coop,mtxAp);
fsm->VA = NULL;
fsm->bpntr = NULL;
fsm->bindx = NULL;
rsb__destroy_inner(mtxAp);
#endif
err:
RSB_DO_ERR_RETURN(errval)
}
/* @endcond */
|