Actual source code: mpimatmatmult.c
1: /*
2: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
3: C = A * B
4: */
5: #include <../src/mat/impls/aij/seq/aij.h>
6: #include <../src/mat/utils/freespace.h>
7: #include <../src/mat/impls/aij/mpi/mpiaij.h>
8: #include <petscbt.h>
9: #include <../src/mat/impls/dense/mpi/mpidense.h>
10: #include <petsc/private/vecimpl.h>
11: #include <petsc/private/sfimpl.h>
13: #if defined(PETSC_HAVE_HYPRE)
14: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
15: #endif
17: PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
18: {
19: Mat_Product *product = C->product;
20: Mat B = product->B;
22: PetscFunctionBegin;
23: PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B));
24: PetscCall(MatDestroy(&B));
25: PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C));
26: PetscFunctionReturn(PETSC_SUCCESS);
27: }
29: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
30: {
31: Mat_Product *product = C->product;
32: Mat A = product->A, B = product->B;
33: MatProductAlgorithm alg = product->alg;
34: PetscReal fill = product->fill;
35: PetscBool flg;
37: PetscFunctionBegin;
38: /* scalable */
39: PetscCall(PetscStrcmp(alg, "scalable", &flg));
40: if (flg) {
41: PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
42: PetscFunctionReturn(PETSC_SUCCESS);
43: }
45: /* nonscalable */
46: PetscCall(PetscStrcmp(alg, "nonscalable", &flg));
47: if (flg) {
48: PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
49: PetscFunctionReturn(PETSC_SUCCESS);
50: }
52: /* seqmpi */
53: PetscCall(PetscStrcmp(alg, "seqmpi", &flg));
54: if (flg) {
55: PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C));
56: PetscFunctionReturn(PETSC_SUCCESS);
57: }
59: /* backend general code */
60: PetscCall(PetscStrcmp(alg, "backend", &flg));
61: if (flg) {
62: PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
63: PetscFunctionReturn(PETSC_SUCCESS);
64: }
66: #if defined(PETSC_HAVE_HYPRE)
67: PetscCall(PetscStrcmp(alg, "hypre", &flg));
68: if (flg) {
69: PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C));
70: PetscFunctionReturn(PETSC_SUCCESS);
71: }
72: #endif
73: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
74: }
76: PetscErrorCode MatProductCtxDestroy_MPIAIJ_MatMatMult(PetscCtxRt data)
77: {
78: MatProductCtx_APMPI *ptap = *(MatProductCtx_APMPI **)data;
80: PetscFunctionBegin;
81: PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r));
82: PetscCall(PetscFree(ptap->bufa));
83: PetscCall(MatDestroy(&ptap->P_loc));
84: PetscCall(MatDestroy(&ptap->P_oth));
85: PetscCall(MatDestroy(&ptap->Pt));
86: PetscCall(PetscFree(ptap->api));
87: PetscCall(PetscFree(ptap->apj));
88: PetscCall(PetscFree(ptap->apa));
89: PetscCall(PetscFree(ptap));
90: PetscFunctionReturn(PETSC_SUCCESS);
91: }
93: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
94: {
95: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
96: Mat_SeqAIJ *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
97: Mat_SeqAIJ *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
98: PetscScalar *cda, *coa;
99: Mat_SeqAIJ *p_loc, *p_oth;
100: PetscScalar *apa, *ca;
101: PetscInt cm = C->rmap->n;
102: MatProductCtx_APMPI *ptap;
103: PetscInt *api, *apj, *apJ, i, k;
104: PetscInt cstart = C->cmap->rstart;
105: PetscInt cdnz, conz, k0, k1;
106: const PetscScalar *dummy1, *dummy2, *dummy3, *dummy4;
107: MPI_Comm comm;
108: PetscMPIInt size;
110: PetscFunctionBegin;
111: MatCheckProduct(C, 3);
112: ptap = (MatProductCtx_APMPI *)C->product->data;
113: PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
114: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
115: PetscCallMPI(MPI_Comm_size(comm, &size));
116: PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
118: /* flag CPU mask for C */
119: #if defined(PETSC_HAVE_DEVICE)
120: if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
121: if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
122: if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
123: #endif
125: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
126: /* update numerical values of P_oth and P_loc */
127: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
128: PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
130: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
131: /* get data from symbolic products */
132: p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
133: p_oth = NULL;
134: if (size > 1) p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
136: /* get apa for storing dense row A[i,:]*P */
137: apa = ptap->apa;
139: api = ptap->api;
140: apj = ptap->apj;
141: /* trigger copy to CPU */
142: PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy1));
143: PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy2));
144: PetscCall(MatSeqAIJGetArrayRead(ptap->P_loc, &dummy3));
145: if (ptap->P_oth) PetscCall(MatSeqAIJGetArrayRead(ptap->P_oth, &dummy4));
146: PetscCall(MatSeqAIJGetArrayWrite(c->A, &cda));
147: PetscCall(MatSeqAIJGetArrayWrite(c->B, &coa));
148: for (i = 0; i < cm; i++) {
149: /* compute apa = A[i,:]*P */
150: AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);
152: /* set values in C */
153: apJ = PetscSafePointerPlusOffset(apj, api[i]);
154: cdnz = cd->i[i + 1] - cd->i[i];
155: conz = co->i[i + 1] - co->i[i];
157: /* 1st off-diagonal part of C */
158: ca = PetscSafePointerPlusOffset(coa, co->i[i]);
159: k = 0;
160: for (k0 = 0; k0 < conz; k0++) {
161: if (apJ[k] >= cstart) break;
162: ca[k0] = apa[apJ[k]];
163: apa[apJ[k++]] = 0.0;
164: }
166: /* diagonal part of C */
167: ca = PetscSafePointerPlusOffset(cda, cd->i[i]);
168: for (k1 = 0; k1 < cdnz; k1++) {
169: ca[k1] = apa[apJ[k]];
170: apa[apJ[k++]] = 0.0;
171: }
173: /* 2nd off-diagonal part of C */
174: ca = PetscSafePointerPlusOffset(coa, co->i[i]);
175: for (; k0 < conz; k0++) {
176: ca[k0] = apa[apJ[k]];
177: apa[apJ[k++]] = 0.0;
178: }
179: }
180: PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy1));
181: PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy2));
182: PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_loc, &dummy3));
183: if (ptap->P_oth) PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_oth, &dummy4));
184: PetscCall(MatSeqAIJRestoreArrayWrite(c->A, &cda));
185: PetscCall(MatSeqAIJRestoreArrayWrite(c->B, &coa));
187: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
188: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
189: PetscFunctionReturn(PETSC_SUCCESS);
190: }
192: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
193: {
194: MPI_Comm comm;
195: PetscMPIInt size;
196: MatProductCtx_APMPI *ptap;
197: PetscFreeSpaceList free_space = NULL, current_space = NULL;
198: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
199: Mat_SeqAIJ *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
200: PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
201: PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
202: PetscInt *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
203: PetscInt am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
204: PetscBT lnkbt;
205: PetscReal afill;
206: MatType mtype;
208: PetscFunctionBegin;
209: MatCheckProduct(C, 4);
210: PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
211: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
212: PetscCallMPI(MPI_Comm_size(comm, &size));
214: /* create struct MatProductCtx_APMPI and attached it to C later */
215: PetscCall(PetscNew(&ptap));
217: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
218: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
220: /* get P_loc by taking all local rows of P */
221: PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
223: p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
224: pi_loc = p_loc->i;
225: pj_loc = p_loc->j;
226: if (size > 1) {
227: p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
228: pi_oth = p_oth->i;
229: pj_oth = p_oth->j;
230: } else {
231: p_oth = NULL;
232: pi_oth = NULL;
233: pj_oth = NULL;
234: }
236: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
237: PetscCall(PetscMalloc1(am + 1, &api));
238: ptap->api = api;
239: api[0] = 0;
241: /* create and initialize a linked list */
242: PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
244: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
245: PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
246: current_space = free_space;
248: MatPreallocateBegin(comm, am, pn, dnz, onz);
249: for (i = 0; i < am; i++) {
250: /* diagonal portion of A */
251: nzi = adi[i + 1] - adi[i];
252: for (j = 0; j < nzi; j++) {
253: row = *adj++;
254: pnz = pi_loc[row + 1] - pi_loc[row];
255: Jptr = pj_loc + pi_loc[row];
256: /* add non-zero cols of P into the sorted linked list lnk */
257: PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
258: }
259: /* off-diagonal portion of A */
260: nzi = aoi[i + 1] - aoi[i];
261: for (j = 0; j < nzi; j++) {
262: row = *aoj++;
263: pnz = pi_oth[row + 1] - pi_oth[row];
264: Jptr = pj_oth + pi_oth[row];
265: PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
266: }
267: /* add possible missing diagonal entry */
268: if (C->force_diagonals) {
269: j = i + rstart; /* column index */
270: PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt));
271: }
273: apnz = lnk[0];
274: api[i + 1] = api[i] + apnz;
276: /* if free space is not available, double the total space in the list */
277: if (current_space->local_remaining < apnz) {
278: PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), ¤t_space));
279: nspacedouble++;
280: }
282: /* Copy data into free space, then initialize lnk */
283: PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt));
284: PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
286: current_space->array += apnz;
287: current_space->local_used += apnz;
288: current_space->local_remaining -= apnz;
289: }
291: /* Allocate space for apj, initialize apj, and */
292: /* destroy list of free space and other temporary array(s) */
293: PetscCall(PetscMalloc1(api[am], &ptap->apj));
294: apj = ptap->apj;
295: PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
296: PetscCall(PetscLLDestroy(lnk, lnkbt));
298: /* malloc apa to store dense row A[i,:]*P */
299: PetscCall(PetscCalloc1(pN, &ptap->apa));
301: /* set and assemble symbolic parallel matrix C */
302: PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
303: PetscCall(MatSetBlockSizesFromMats(C, A, P));
305: PetscCall(MatGetType(A, &mtype));
306: PetscCall(MatSetType(C, mtype));
307: PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
308: MatPreallocateEnd(dnz, onz);
310: PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
311: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
312: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
313: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
314: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
316: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
317: C->ops->productnumeric = MatProductNumeric_AB;
319: /* attach the supporting struct to C for reuse */
320: C->product->data = ptap;
321: C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
323: /* set MatInfo */
324: afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
325: if (afill < 1.0) afill = 1.0;
326: C->info.mallocs = nspacedouble;
327: C->info.fill_ratio_given = fill;
328: C->info.fill_ratio_needed = afill;
330: #if defined(PETSC_USE_INFO)
331: if (api[am]) {
332: PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
333: PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
334: } else {
335: PetscCall(PetscInfo(C, "Empty matrix product\n"));
336: }
337: #endif
338: PetscFunctionReturn(PETSC_SUCCESS);
339: }
341: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
342: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);
344: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
345: {
346: Mat_Product *product = C->product;
347: Mat A = product->A, B = product->B;
349: PetscFunctionBegin;
350: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
351: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
353: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
354: C->ops->productsymbolic = MatProductSymbolic_AB;
355: PetscFunctionReturn(PETSC_SUCCESS);
356: }
358: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
359: {
360: Mat_Product *product = C->product;
361: Mat A = product->A, B = product->B;
363: PetscFunctionBegin;
364: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
365: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
367: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
368: C->ops->productsymbolic = MatProductSymbolic_AtB;
369: PetscFunctionReturn(PETSC_SUCCESS);
370: }
372: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
373: {
374: Mat_Product *product = C->product;
376: PetscFunctionBegin;
377: switch (product->type) {
378: case MATPRODUCT_AB:
379: PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
380: break;
381: case MATPRODUCT_AtB:
382: PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
383: break;
384: default:
385: break;
386: }
387: PetscFunctionReturn(PETSC_SUCCESS);
388: }
390: typedef struct {
391: Mat workB, workB1;
392: MPI_Request *rwaits, *swaits;
393: PetscInt nsends, nrecvs;
394: MPI_Datatype *stype, *rtype;
395: PetscInt blda;
396: } MPIAIJ_MPIDense;
398: static PetscErrorCode MatMPIAIJ_MPIDenseDestroy(PetscCtxRt ctx)
399: {
400: MPIAIJ_MPIDense *contents = *(MPIAIJ_MPIDense **)ctx;
401: PetscInt i;
403: PetscFunctionBegin;
404: PetscCall(MatDestroy(&contents->workB));
405: PetscCall(MatDestroy(&contents->workB1));
406: for (i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i]));
407: for (i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
408: PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits));
409: PetscCall(PetscFree(contents));
410: PetscFunctionReturn(PETSC_SUCCESS);
411: }
413: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
414: {
415: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data;
416: PetscInt nz = aij->B->cmap->n, blda, m, M, n, N;
417: MPIAIJ_MPIDense *contents;
418: VecScatter ctx = aij->Mvctx;
419: PetscInt Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, numBb;
420: MPI_Comm comm;
421: MPI_Datatype type1, *stype, *rtype;
422: const PetscInt *sindices, *sstarts, *rstarts;
423: PetscMPIInt *disp, nsends, nrecvs, nrows_to, nrows_from;
424: PetscBool cisdense;
426: PetscFunctionBegin;
427: MatCheckProduct(C, 4);
428: PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
429: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
430: PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense));
431: if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name));
432: PetscCall(MatGetLocalSize(C, &m, &n));
433: PetscCall(MatGetSize(C, &M, &N));
434: if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN));
435: PetscCall(MatSetBlockSizesFromMats(C, A, B));
436: PetscCall(MatSetUp(C));
437: PetscCall(MatDenseGetLDA(B, &blda));
438: PetscCall(PetscNew(&contents));
440: PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
441: PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
443: /* Create column block of B and C for memory scalability when BN is too large */
444: /* Estimate Bbn, column size of Bb */
445: if (nz) {
446: Bbn1 = 2 * Am * BN / nz;
447: if (!Bbn1) Bbn1 = 1;
448: } else Bbn1 = BN;
450: bs = B->cmap->bs;
451: Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
452: if (Bbn1 > BN) Bbn1 = BN;
453: PetscCallMPI(MPIU_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm));
455: /* Enable runtime option for Bbn */
456: PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatProduct", "Mat");
457: PetscCall(PetscOptionsDeprecated("-matmatmult_Bbn", "-matproduct_batch_size", "3.25", NULL));
458: PetscCall(PetscOptionsInt("-matproduct_batch_size", "Number of columns in Bb", "MatProduct", Bbn, &Bbn, NULL));
459: PetscOptionsEnd();
460: Bbn = PetscMin(Bbn, BN);
462: if (Bbn > 0 && Bbn < BN) {
463: numBb = BN / Bbn;
464: Bbn1 = BN - numBb * Bbn;
465: } else numBb = 0;
467: if (numBb) {
468: PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb));
469: if (Bbn1) { /* Create workB1 for the remaining columns */
470: PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1));
471: /* Create work matrix used to store off processor rows of B needed for local product */
472: PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1));
473: } else contents->workB1 = NULL;
474: }
476: /* Create work matrix used to store off processor rows of B needed for local product */
477: PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB));
479: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
480: PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits));
481: contents->stype = stype;
482: contents->nsends = nsends;
484: contents->rtype = rtype;
485: contents->nrecvs = nrecvs;
486: contents->blda = blda;
488: PetscCall(PetscMalloc1(Bm, &disp));
489: for (PetscMPIInt i = 0; i < nsends; i++) {
490: PetscCall(PetscMPIIntCast(sstarts[i + 1] - sstarts[i], &nrows_to));
491: for (PetscInt j = 0; j < nrows_to; j++) PetscCall(PetscMPIIntCast(sindices[sstarts[i] + j], &disp[j])); /* rowB to be sent */
492: PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1));
493: PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]));
494: PetscCallMPI(MPI_Type_commit(&stype[i]));
495: PetscCallMPI(MPI_Type_free(&type1));
496: }
498: for (PetscMPIInt i = 0; i < nrecvs; i++) {
499: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
500: PetscCall(PetscMPIIntCast(rstarts[i + 1] - rstarts[i], &nrows_from));
501: disp[0] = 0;
502: PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1));
503: PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]));
504: PetscCallMPI(MPI_Type_commit(&rtype[i]));
505: PetscCallMPI(MPI_Type_free(&type1));
506: }
508: PetscCall(PetscFree(disp));
509: PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
510: PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
511: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
512: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
513: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
514: PetscCall(MatProductClear(aij->A));
515: PetscCall(MatProductClear(((Mat_MPIDense *)B->data)->A));
516: PetscCall(MatProductClear(((Mat_MPIDense *)C->data)->A));
517: PetscCall(MatProductCreateWithMat(aij->A, ((Mat_MPIDense *)B->data)->A, NULL, ((Mat_MPIDense *)C->data)->A));
518: PetscCall(MatProductSetType(((Mat_MPIDense *)C->data)->A, MATPRODUCT_AB));
519: PetscCall(MatProductSetFromOptions(((Mat_MPIDense *)C->data)->A));
520: PetscCall(MatProductSymbolic(((Mat_MPIDense *)C->data)->A));
521: C->product->data = contents;
522: C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
523: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
524: PetscFunctionReturn(PETSC_SUCCESS);
525: }
527: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);
529: /*
530: Performs an efficient scatter on the rows of B needed by this process; this is
531: a modification of the VecScatterBegin_() routines.
533: Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
534: */
536: static PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
537: {
538: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data;
539: const PetscScalar *b;
540: PetscScalar *rvalues;
541: VecScatter ctx = aij->Mvctx;
542: const PetscInt *sindices, *sstarts, *rstarts;
543: const PetscMPIInt *sprocs, *rprocs;
544: PetscMPIInt nsends, nrecvs;
545: MPI_Request *swaits, *rwaits;
546: MPI_Comm comm;
547: PetscMPIInt tag = ((PetscObject)ctx)->tag, ncols, nrows, nsends_mpi, nrecvs_mpi;
548: MPIAIJ_MPIDense *contents;
549: Mat workB;
550: MPI_Datatype *stype, *rtype;
551: PetscInt blda;
553: PetscFunctionBegin;
554: MatCheckProduct(C, 4);
555: PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
556: PetscCall(PetscMPIIntCast(B->cmap->N, &ncols));
557: PetscCall(PetscMPIIntCast(aij->B->cmap->n, &nrows));
558: contents = (MPIAIJ_MPIDense *)C->product->data;
559: PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/));
560: PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/));
561: PetscCall(PetscMPIIntCast(nsends, &nsends_mpi));
562: PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi));
563: if (Bbidx == 0) workB = *outworkB = contents->workB;
564: else workB = *outworkB = contents->workB1;
565: PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows);
566: swaits = contents->swaits;
567: rwaits = contents->rwaits;
569: PetscCall(MatDenseGetArrayRead(B, &b));
570: PetscCall(MatDenseGetLDA(B, &blda));
571: PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda);
572: PetscCall(MatDenseGetArray(workB, &rvalues));
574: /* Post recv, use MPI derived data type to save memory */
575: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
576: rtype = contents->rtype;
577: for (PetscMPIInt i = 0; i < nrecvs; i++) PetscCallMPI(MPIU_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i));
579: stype = contents->stype;
580: for (PetscMPIInt i = 0; i < nsends; i++) PetscCallMPI(MPIU_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i));
582: if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE));
583: if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE));
585: PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL));
586: PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL));
587: PetscCall(MatDenseRestoreArrayRead(B, &b));
588: PetscCall(MatDenseRestoreArray(workB, &rvalues));
589: PetscFunctionReturn(PETSC_SUCCESS);
590: }
592: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
593: {
594: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data;
595: Mat_MPIDense *bdense = (Mat_MPIDense *)B->data;
596: Mat_MPIDense *cdense = (Mat_MPIDense *)C->data;
597: Mat workB;
598: MPIAIJ_MPIDense *contents;
600: PetscFunctionBegin;
601: MatCheckProduct(C, 3);
602: PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
603: contents = (MPIAIJ_MPIDense *)C->product->data;
604: /* diagonal block of A times all local rows of B, first make sure that everything is up-to-date */
605: if (!cdense->A->product) {
606: PetscCall(MatProductCreateWithMat(aij->A, bdense->A, NULL, cdense->A));
607: PetscCall(MatProductSetType(cdense->A, MATPRODUCT_AB));
608: PetscCall(MatProductSetFromOptions(cdense->A));
609: PetscCall(MatProductSymbolic(cdense->A));
610: } else PetscCall(MatProductReplaceMats(aij->A, bdense->A, NULL, cdense->A));
611: if (PetscDefined(HAVE_CUPM) && !cdense->A->product->clear) {
612: PetscBool flg;
614: PetscCall(PetscObjectTypeCompare((PetscObject)C, MATMPIDENSE, &flg));
615: if (flg) PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &flg));
616: if (!flg) cdense->A->product->clear = PETSC_TRUE; /* if either A or C is a device Mat, make sure MatProductClear() is called */
617: }
618: PetscCall(MatProductNumeric(cdense->A));
619: if (contents->workB->cmap->n == B->cmap->N) {
620: /* get off processor parts of B needed to complete C=A*B */
621: PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB));
623: /* off-diagonal block of A times nonlocal rows of B */
624: PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
625: } else {
626: Mat Bb, Cb;
627: PetscInt BN = B->cmap->N, n = contents->workB->cmap->n;
628: PetscBool ccpu;
630: PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n);
631: /* Prevent from unneeded copies back and forth from the GPU
632: when getting and restoring the submatrix
633: We need a proper GPU code for AIJ * dense in parallel */
634: PetscCall(MatBoundToCPU(C, &ccpu));
635: PetscCall(MatBindToCPU(C, PETSC_TRUE));
636: for (PetscInt i = 0; i < BN; i += n) {
637: PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb));
638: PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb));
640: /* get off processor parts of B needed to complete C=A*B */
641: PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB));
643: /* off-diagonal block of A times nonlocal rows of B */
644: cdense = (Mat_MPIDense *)Cb->data;
645: PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
646: PetscCall(MatDenseRestoreSubMatrix(B, &Bb));
647: PetscCall(MatDenseRestoreSubMatrix(C, &Cb));
648: }
649: PetscCall(MatBindToCPU(C, ccpu));
650: }
651: PetscFunctionReturn(PETSC_SUCCESS);
652: }
654: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
655: {
656: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
657: Mat_SeqAIJ *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
658: Mat_SeqAIJ *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
659: PetscInt *adi = ad->i, *adj, *aoi = ao->i, *aoj;
660: PetscScalar *ada, *aoa, *cda = cd->a, *coa = co->a;
661: Mat_SeqAIJ *p_loc, *p_oth;
662: PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
663: PetscScalar *pa_loc, *pa_oth, *pa, valtmp, *ca;
664: PetscInt cm = C->rmap->n, anz, pnz;
665: MatProductCtx_APMPI *ptap;
666: PetscScalar *apa_sparse;
667: const PetscScalar *dummy;
668: PetscInt *api, *apj, *apJ, i, j, k, row;
669: PetscInt cstart = C->cmap->rstart;
670: PetscInt cdnz, conz, k0, k1, nextp;
671: MPI_Comm comm;
672: PetscMPIInt size;
674: PetscFunctionBegin;
675: MatCheckProduct(C, 3);
676: ptap = (MatProductCtx_APMPI *)C->product->data;
677: PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
678: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
679: PetscCallMPI(MPI_Comm_size(comm, &size));
680: PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
682: /* flag CPU mask for C */
683: #if defined(PETSC_HAVE_DEVICE)
684: if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
685: if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
686: if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
687: #endif
688: apa_sparse = ptap->apa;
690: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
691: /* update numerical values of P_oth and P_loc */
692: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
693: PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
695: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
696: /* get data from symbolic products */
697: p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
698: pi_loc = p_loc->i;
699: pj_loc = p_loc->j;
700: pa_loc = p_loc->a;
701: if (size > 1) {
702: p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
703: pi_oth = p_oth->i;
704: pj_oth = p_oth->j;
705: pa_oth = p_oth->a;
706: } else {
707: p_oth = NULL;
708: pi_oth = NULL;
709: pj_oth = NULL;
710: pa_oth = NULL;
711: }
713: /* trigger copy to CPU */
714: PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
715: PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
716: PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
717: PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
718: api = ptap->api;
719: apj = ptap->apj;
720: for (i = 0; i < cm; i++) {
721: apJ = apj + api[i];
723: /* diagonal portion of A */
724: anz = adi[i + 1] - adi[i];
725: adj = ad->j + adi[i];
726: ada = ad->a + adi[i];
727: for (j = 0; j < anz; j++) {
728: row = adj[j];
729: pnz = pi_loc[row + 1] - pi_loc[row];
730: pj = pj_loc + pi_loc[row];
731: pa = pa_loc + pi_loc[row];
732: /* perform sparse axpy */
733: valtmp = ada[j];
734: nextp = 0;
735: for (k = 0; nextp < pnz; k++) {
736: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
737: apa_sparse[k] += valtmp * pa[nextp++];
738: }
739: }
740: PetscCall(PetscLogFlops(2.0 * pnz));
741: }
743: /* off-diagonal portion of A */
744: anz = aoi[i + 1] - aoi[i];
745: aoj = PetscSafePointerPlusOffset(ao->j, aoi[i]);
746: aoa = PetscSafePointerPlusOffset(ao->a, aoi[i]);
747: for (j = 0; j < anz; j++) {
748: row = aoj[j];
749: pnz = pi_oth[row + 1] - pi_oth[row];
750: pj = pj_oth + pi_oth[row];
751: pa = pa_oth + pi_oth[row];
752: /* perform sparse axpy */
753: valtmp = aoa[j];
754: nextp = 0;
755: for (k = 0; nextp < pnz; k++) {
756: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
757: apa_sparse[k] += valtmp * pa[nextp++];
758: }
759: }
760: PetscCall(PetscLogFlops(2.0 * pnz));
761: }
763: /* set values in C */
764: cdnz = cd->i[i + 1] - cd->i[i];
765: conz = co->i[i + 1] - co->i[i];
767: /* 1st off-diagonal part of C */
768: ca = PetscSafePointerPlusOffset(coa, co->i[i]);
769: k = 0;
770: for (k0 = 0; k0 < conz; k0++) {
771: if (apJ[k] >= cstart) break;
772: ca[k0] = apa_sparse[k];
773: apa_sparse[k] = 0.0;
774: k++;
775: }
777: /* diagonal part of C */
778: ca = cda + cd->i[i];
779: for (k1 = 0; k1 < cdnz; k1++) {
780: ca[k1] = apa_sparse[k];
781: apa_sparse[k] = 0.0;
782: k++;
783: }
785: /* 2nd off-diagonal part of C */
786: ca = PetscSafePointerPlusOffset(coa, co->i[i]);
787: for (; k0 < conz; k0++) {
788: ca[k0] = apa_sparse[k];
789: apa_sparse[k] = 0.0;
790: k++;
791: }
792: }
793: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
794: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
795: PetscFunctionReturn(PETSC_SUCCESS);
796: }
798: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
799: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
800: {
801: MPI_Comm comm;
802: PetscMPIInt size;
803: MatProductCtx_APMPI *ptap;
804: PetscFreeSpaceList free_space = NULL, current_space = NULL;
805: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
806: Mat_SeqAIJ *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
807: PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
808: PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
809: PetscInt i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
810: PetscInt am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
811: PetscReal afill;
812: MatType mtype;
814: PetscFunctionBegin;
815: MatCheckProduct(C, 4);
816: PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
817: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
818: PetscCallMPI(MPI_Comm_size(comm, &size));
820: /* create struct MatProductCtx_APMPI and attached it to C later */
821: PetscCall(PetscNew(&ptap));
823: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
824: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
826: /* get P_loc by taking all local rows of P */
827: PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
829: p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
830: pi_loc = p_loc->i;
831: pj_loc = p_loc->j;
832: if (size > 1) {
833: p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
834: pi_oth = p_oth->i;
835: pj_oth = p_oth->j;
836: } else {
837: p_oth = NULL;
838: pi_oth = NULL;
839: pj_oth = NULL;
840: }
842: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
843: PetscCall(PetscMalloc1(am + 1, &api));
844: ptap->api = api;
845: api[0] = 0;
847: PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk));
849: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
850: PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
851: current_space = free_space;
852: MatPreallocateBegin(comm, am, pn, dnz, onz);
853: for (i = 0; i < am; i++) {
854: /* diagonal portion of A */
855: nzi = adi[i + 1] - adi[i];
856: for (j = 0; j < nzi; j++) {
857: row = *adj++;
858: pnz = pi_loc[row + 1] - pi_loc[row];
859: Jptr = pj_loc + pi_loc[row];
860: /* Expand list if it is not long enough */
861: if (pnz + apnz_max > lsize) {
862: lsize = pnz + apnz_max;
863: PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
864: }
865: /* add non-zero cols of P into the sorted linked list lnk */
866: PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
867: apnz = *lnk; /* The first element in the list is the number of items in the list */
868: api[i + 1] = api[i] + apnz;
869: if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
870: }
871: /* off-diagonal portion of A */
872: nzi = aoi[i + 1] - aoi[i];
873: for (j = 0; j < nzi; j++) {
874: row = *aoj++;
875: pnz = pi_oth[row + 1] - pi_oth[row];
876: Jptr = pj_oth + pi_oth[row];
877: /* Expand list if it is not long enough */
878: if (pnz + apnz_max > lsize) {
879: lsize = pnz + apnz_max;
880: PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
881: }
882: /* add non-zero cols of P into the sorted linked list lnk */
883: PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
884: apnz = *lnk; /* The first element in the list is the number of items in the list */
885: api[i + 1] = api[i] + apnz;
886: if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
887: }
889: /* add missing diagonal entry */
890: if (C->force_diagonals) {
891: j = i + rstart; /* column index */
892: PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk));
893: }
895: apnz = *lnk;
896: api[i + 1] = api[i] + apnz;
897: if (apnz > apnz_max) apnz_max = apnz;
899: /* if free space is not available, double the total space in the list */
900: if (current_space->local_remaining < apnz) {
901: PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), ¤t_space));
902: nspacedouble++;
903: }
905: /* Copy data into free space, then initialize lnk */
906: PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk));
907: PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
909: current_space->array += apnz;
910: current_space->local_used += apnz;
911: current_space->local_remaining -= apnz;
912: }
914: /* Allocate space for apj, initialize apj, and */
915: /* destroy list of free space and other temporary array(s) */
916: PetscCall(PetscMalloc1(api[am], &ptap->apj));
917: apj = ptap->apj;
918: PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
919: PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
921: /* create and assemble symbolic parallel matrix C */
922: PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
923: PetscCall(MatSetBlockSizesFromMats(C, A, P));
924: PetscCall(MatGetType(A, &mtype));
925: PetscCall(MatSetType(C, mtype));
926: PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
927: MatPreallocateEnd(dnz, onz);
929: /* malloc apa for assembly C */
930: PetscCall(PetscCalloc1(apnz_max, &ptap->apa));
932: PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
933: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
934: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
935: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
936: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
938: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
939: C->ops->productnumeric = MatProductNumeric_AB;
941: /* attach the supporting struct to C for reuse */
942: C->product->data = ptap;
943: C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
945: /* set MatInfo */
946: afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
947: if (afill < 1.0) afill = 1.0;
948: C->info.mallocs = nspacedouble;
949: C->info.fill_ratio_given = fill;
950: C->info.fill_ratio_needed = afill;
952: #if defined(PETSC_USE_INFO)
953: if (api[am]) {
954: PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
955: PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
956: } else {
957: PetscCall(PetscInfo(C, "Empty matrix product\n"));
958: }
959: #endif
960: PetscFunctionReturn(PETSC_SUCCESS);
961: }
963: /* This function is needed for the seqMPI matrix-matrix multiplication. */
964: /* Three input arrays are merged to one output array. The size of the */
965: /* output array is also output. Duplicate entries only show up once. */
966: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
967: {
968: int i = 0, j = 0, k = 0, l = 0;
970: /* Traverse all three arrays */
971: while (i < size1 && j < size2 && k < size3) {
972: if (in1[i] < in2[j] && in1[i] < in3[k]) {
973: out[l++] = in1[i++];
974: } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
975: out[l++] = in2[j++];
976: } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
977: out[l++] = in3[k++];
978: } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
979: out[l++] = in1[i];
980: i++, j++;
981: } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
982: out[l++] = in1[i];
983: i++, k++;
984: } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
985: out[l++] = in2[j];
986: k++, j++;
987: } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
988: out[l++] = in1[i];
989: i++, j++, k++;
990: }
991: }
993: /* Traverse two remaining arrays */
994: while (i < size1 && j < size2) {
995: if (in1[i] < in2[j]) {
996: out[l++] = in1[i++];
997: } else if (in1[i] > in2[j]) {
998: out[l++] = in2[j++];
999: } else {
1000: out[l++] = in1[i];
1001: i++, j++;
1002: }
1003: }
1005: while (i < size1 && k < size3) {
1006: if (in1[i] < in3[k]) {
1007: out[l++] = in1[i++];
1008: } else if (in1[i] > in3[k]) {
1009: out[l++] = in3[k++];
1010: } else {
1011: out[l++] = in1[i];
1012: i++, k++;
1013: }
1014: }
1016: while (k < size3 && j < size2) {
1017: if (in3[k] < in2[j]) {
1018: out[l++] = in3[k++];
1019: } else if (in3[k] > in2[j]) {
1020: out[l++] = in2[j++];
1021: } else {
1022: out[l++] = in3[k];
1023: k++, j++;
1024: }
1025: }
1027: /* Traverse one remaining array */
1028: while (i < size1) out[l++] = in1[i++];
1029: while (j < size2) out[l++] = in2[j++];
1030: while (k < size3) out[l++] = in3[k++];
1032: *size4 = l;
1033: }
1035: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
1036: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
1037: /* matrix-matrix multiplications. */
1038: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1039: {
1040: MPI_Comm comm;
1041: PetscMPIInt size;
1042: MatProductCtx_APMPI *ptap;
1043: PetscFreeSpaceList free_space_diag = NULL, current_space = NULL;
1044: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1045: Mat_SeqAIJ *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc;
1046: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data;
1047: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
1048: PetscInt adponz, adpdnz;
1049: PetscInt *pi_loc, *dnz, *onz;
1050: PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1051: PetscInt *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1052: PetscInt am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1053: PetscBT lnkbt;
1054: PetscReal afill;
1055: PetscMPIInt rank;
1056: Mat adpd, aopoth;
1057: MatType mtype;
1058: const char *prefix;
1060: PetscFunctionBegin;
1061: MatCheckProduct(C, 4);
1062: PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
1063: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1064: PetscCallMPI(MPI_Comm_size(comm, &size));
1065: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1066: PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));
1068: /* create struct MatProductCtx_APMPI and attached it to C later */
1069: PetscCall(PetscNew(&ptap));
1071: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1072: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
1074: /* get P_loc by taking all local rows of P */
1075: PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
1077: p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
1078: pi_loc = p_loc->i;
1080: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1081: PetscCall(PetscMalloc1(am + 1, &api));
1082: PetscCall(PetscMalloc1(am + 1, &adpoi));
1084: adpoi[0] = 0;
1085: ptap->api = api;
1086: api[0] = 0;
1088: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1089: PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
1090: MatPreallocateBegin(comm, am, pn, dnz, onz);
1092: /* Symbolic calc of A_loc_diag * P_loc_diag */
1093: PetscCall(MatGetOptionsPrefix(A, &prefix));
1094: PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd));
1095: PetscCall(MatGetOptionsPrefix(A, &prefix));
1096: PetscCall(MatSetOptionsPrefix(adpd, prefix));
1097: PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_"));
1099: PetscCall(MatProductSetType(adpd, MATPRODUCT_AB));
1100: PetscCall(MatProductSetAlgorithm(adpd, "sorted"));
1101: PetscCall(MatProductSetFill(adpd, fill));
1102: PetscCall(MatProductSetFromOptions(adpd));
1104: adpd->force_diagonals = C->force_diagonals;
1105: PetscCall(MatProductSymbolic(adpd));
1107: adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1108: adpdi = adpd_seq->i;
1109: adpdj = adpd_seq->j;
1110: p_off = (Mat_SeqAIJ *)p->B->data;
1111: poff_i = p_off->i;
1112: poff_j = p_off->j;
1114: /* j_temp stores indices of a result row before they are added to the linked list */
1115: PetscCall(PetscMalloc1(pN, &j_temp));
1117: /* Symbolic calc of the A_diag * p_loc_off */
1118: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1119: PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag));
1120: current_space = free_space_diag;
1122: for (i = 0; i < am; i++) {
1123: /* A_diag * P_loc_off */
1124: nzi = adi[i + 1] - adi[i];
1125: for (j = 0; j < nzi; j++) {
1126: row = *adj++;
1127: pnz = poff_i[row + 1] - poff_i[row];
1128: Jptr = poff_j + poff_i[row];
1129: for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1130: /* add non-zero cols of P into the sorted linked list lnk */
1131: PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt));
1132: }
1134: adponz = lnk[0];
1135: adpoi[i + 1] = adpoi[i] + adponz;
1137: /* if free space is not available, double the total space in the list */
1138: if (current_space->local_remaining < adponz) {
1139: PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), ¤t_space));
1140: nspacedouble++;
1141: }
1143: /* Copy data into free space, then initialize lnk */
1144: PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt));
1146: current_space->array += adponz;
1147: current_space->local_used += adponz;
1148: current_space->local_remaining -= adponz;
1149: }
1151: /* Symbolic calc of A_off * P_oth */
1152: PetscCall(MatSetOptionsPrefix(a->B, prefix));
1153: PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_"));
1154: PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth));
1155: PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1156: aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1157: aopothi = aopoth_seq->i;
1158: aopothj = aopoth_seq->j;
1160: /* Allocate space for apj, adpj, aopj, ... */
1161: /* destroy lists of free space and other temporary array(s) */
1163: PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am], &ptap->apj));
1164: PetscCall(PetscMalloc1(adpoi[am], &adpoj));
1166: /* Copy from linked list to j-array */
1167: PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj));
1168: PetscCall(PetscLLDestroy(lnk, lnkbt));
1170: adpoJ = adpoj;
1171: adpdJ = adpdj;
1172: aopJ = aopothj;
1173: apj = ptap->apj;
1174: apJ = apj; /* still empty */
1176: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1177: /* A_diag * P_loc_diag to get A*P */
1178: for (i = 0; i < am; i++) {
1179: aopnz = aopothi[i + 1] - aopothi[i];
1180: adponz = adpoi[i + 1] - adpoi[i];
1181: adpdnz = adpdi[i + 1] - adpdi[i];
1183: /* Correct indices from A_diag*P_diag */
1184: for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1185: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1186: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1187: PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz));
1189: aopJ += aopnz;
1190: adpoJ += adponz;
1191: adpdJ += adpdnz;
1192: apJ += apnz;
1193: api[i + 1] = api[i] + apnz;
1194: }
1196: /* malloc apa to store dense row A[i,:]*P */
1197: PetscCall(PetscCalloc1(pN, &ptap->apa));
1199: /* create and assemble symbolic parallel matrix C */
1200: PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
1201: PetscCall(MatSetBlockSizesFromMats(C, A, P));
1202: PetscCall(MatGetType(A, &mtype));
1203: PetscCall(MatSetType(C, mtype));
1204: PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1205: MatPreallocateEnd(dnz, onz);
1207: PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1208: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
1209: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1210: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1211: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1213: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1214: C->ops->productnumeric = MatProductNumeric_AB;
1216: /* attach the supporting struct to C for reuse */
1217: C->product->data = ptap;
1218: C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
1220: /* set MatInfo */
1221: afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1222: if (afill < 1.0) afill = 1.0;
1223: C->info.mallocs = nspacedouble;
1224: C->info.fill_ratio_given = fill;
1225: C->info.fill_ratio_needed = afill;
1227: #if defined(PETSC_USE_INFO)
1228: if (api[am]) {
1229: PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
1230: PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
1231: } else {
1232: PetscCall(PetscInfo(C, "Empty matrix product\n"));
1233: }
1234: #endif
1236: PetscCall(MatDestroy(&aopoth));
1237: PetscCall(MatDestroy(&adpd));
1238: PetscCall(PetscFree(j_temp));
1239: PetscCall(PetscFree(adpoj));
1240: PetscCall(PetscFree(adpoi));
1241: PetscFunctionReturn(PETSC_SUCCESS);
1242: }
1244: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1245: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1246: {
1247: MatProductCtx_APMPI *ptap;
1248: Mat Pt;
1250: PetscFunctionBegin;
1251: MatCheckProduct(C, 3);
1252: ptap = (MatProductCtx_APMPI *)C->product->data;
1253: PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1254: PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1256: Pt = ptap->Pt;
1257: PetscCall(MatTransposeSetPrecursor(P, Pt));
1258: PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt));
1259: PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C));
1260: PetscFunctionReturn(PETSC_SUCCESS);
1261: }
1263: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1264: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1265: {
1266: MatProductCtx_APMPI *ptap;
1267: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data;
1268: MPI_Comm comm;
1269: PetscMPIInt size, rank;
1270: PetscFreeSpaceList free_space = NULL, current_space = NULL;
1271: PetscInt pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1272: PetscInt *lnk, i, k, rstart;
1273: PetscBT lnkbt;
1274: PetscMPIInt tagi, tagj, *len_si, *len_s, *len_ri, nrecv, proc, nsend;
1275: PETSC_UNUSED PetscMPIInt icompleted = 0;
1276: PetscInt **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1277: PetscInt len, *dnz, *onz, *owners, nzi;
1278: PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1279: MPI_Request *swaits, *rwaits;
1280: MPI_Status *sstatus, rstatus;
1281: PetscLayout rowmap;
1282: PetscInt *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1283: PetscMPIInt *len_r, *id_r; /* array of length of comm->size, store send/recv matrix values */
1284: PetscInt *Jptr, *prmap = p->garray, con, j, Crmax;
1285: Mat_SeqAIJ *a_loc, *c_loc, *c_oth;
1286: PetscHMapI ta;
1287: MatType mtype;
1288: const char *prefix;
1290: PetscFunctionBegin;
1291: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1292: PetscCallMPI(MPI_Comm_size(comm, &size));
1293: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1295: /* create symbolic parallel matrix C */
1296: PetscCall(MatGetType(A, &mtype));
1297: PetscCall(MatSetType(C, mtype));
1299: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1301: /* create struct MatProductCtx_APMPI and attached it to C later */
1302: PetscCall(PetscNew(&ptap));
1304: /* (0) compute Rd = Pd^T, Ro = Po^T */
1305: PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd));
1306: PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro));
1308: /* (1) compute symbolic A_loc */
1309: PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc));
1311: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1312: PetscCall(MatGetOptionsPrefix(A, &prefix));
1313: PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix));
1314: PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_"));
1315: PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth));
1316: PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth));
1318: /* (3) send coj of C_oth to other processors */
1319: /* determine row ownership */
1320: PetscCall(PetscLayoutCreate(comm, &rowmap));
1321: rowmap->n = pn;
1322: rowmap->bs = 1;
1323: PetscCall(PetscLayoutSetUp(rowmap));
1324: owners = rowmap->range;
1326: /* determine the number of messages to send, their lengths */
1327: PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 1, &owners_co));
1328: PetscCall(PetscArrayzero(len_s, size));
1329: PetscCall(PetscArrayzero(len_si, size));
1331: c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1332: coi = c_oth->i;
1333: coj = c_oth->j;
1334: con = ptap->C_oth->rmap->n;
1335: proc = 0;
1336: for (i = 0; i < con; i++) {
1337: while (prmap[i] >= owners[proc + 1]) proc++;
1338: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1339: len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1340: }
1342: len = 0; /* max length of buf_si[], see (4) */
1343: owners_co[0] = 0;
1344: nsend = 0;
1345: for (proc = 0; proc < size; proc++) {
1346: owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1347: if (len_s[proc]) {
1348: nsend++;
1349: len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1350: len += len_si[proc];
1351: }
1352: }
1354: /* determine the number and length of messages to receive for coi and coj */
1355: PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv));
1356: PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri));
1358: /* post the Irecv and Isend of coj */
1359: PetscCall(PetscCommGetNewTag(comm, &tagj));
1360: PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits));
1361: PetscCall(PetscMalloc1(nsend, &swaits));
1362: for (proc = 0, k = 0; proc < size; proc++) {
1363: if (!len_s[proc]) continue;
1364: i = owners_co[proc];
1365: PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1366: k++;
1367: }
1369: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1370: PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix));
1371: PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_"));
1372: PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc));
1373: PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc));
1374: c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;
1376: /* receives coj are complete */
1377: for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1378: PetscCall(PetscFree(rwaits));
1379: if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1381: /* add received column indices into ta to update Crmax */
1382: a_loc = (Mat_SeqAIJ *)ptap->A_loc->data;
1384: /* create and initialize a linked list */
1385: PetscCall(PetscHMapICreateWithSize(an, &ta)); /* for compute Crmax */
1386: MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);
1388: for (k = 0; k < nrecv; k++) { /* k-th received message */
1389: Jptr = buf_rj[k];
1390: for (j = 0; j < len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1391: }
1392: PetscCall(PetscHMapIGetSize(ta, &Crmax));
1393: PetscCall(PetscHMapIDestroy(&ta));
1395: /* (4) send and recv coi */
1396: PetscCall(PetscCommGetNewTag(comm, &tagi));
1397: PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits));
1398: PetscCall(PetscMalloc1(len, &buf_s));
1399: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1400: for (proc = 0, k = 0; proc < size; proc++) {
1401: if (!len_s[proc]) continue;
1402: /* form outgoing message for i-structure:
1403: buf_si[0]: nrows to be sent
1404: [1:nrows]: row index (global)
1405: [nrows+1:2*nrows+1]: i-structure index
1406: */
1407: nrows = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1408: buf_si_i = buf_si + nrows + 1;
1409: buf_si[0] = nrows;
1410: buf_si_i[0] = 0;
1411: nrows = 0;
1412: for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1413: nzi = coi[i + 1] - coi[i];
1414: buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi; /* i-structure */
1415: buf_si[nrows + 1] = prmap[i] - owners[proc]; /* local row index */
1416: nrows++;
1417: }
1418: PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1419: k++;
1420: buf_si += len_si[proc];
1421: }
1422: for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1423: PetscCall(PetscFree(rwaits));
1424: if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1426: PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co));
1427: PetscCall(PetscFree(len_ri));
1428: PetscCall(PetscFree(swaits));
1429: PetscCall(PetscFree(buf_s));
1431: /* (5) compute the local portion of C */
1432: /* set initial free space to be Crmax, sufficient for holding nonzeros in each row of C */
1433: PetscCall(PetscFreeSpaceGet(Crmax, &free_space));
1434: current_space = free_space;
1436: PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci));
1437: for (k = 0; k < nrecv; k++) {
1438: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1439: nrows = *buf_ri_k[k];
1440: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1441: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
1442: }
1444: MatPreallocateBegin(comm, pn, an, dnz, onz);
1445: PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt));
1446: for (i = 0; i < pn; i++) { /* for each local row of C */
1447: /* add C_loc into C */
1448: nzi = c_loc->i[i + 1] - c_loc->i[i];
1449: Jptr = c_loc->j + c_loc->i[i];
1450: PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1452: /* add received col data into lnk */
1453: for (k = 0; k < nrecv; k++) { /* k-th received message */
1454: if (i == *nextrow[k]) { /* i-th row */
1455: nzi = *(nextci[k] + 1) - *nextci[k];
1456: Jptr = buf_rj[k] + *nextci[k];
1457: PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1458: nextrow[k]++;
1459: nextci[k]++;
1460: }
1461: }
1463: /* add missing diagonal entry */
1464: if (C->force_diagonals) {
1465: k = i + owners[rank]; /* column index */
1466: PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt));
1467: }
1469: nzi = lnk[0];
1471: /* copy data into free space, then initialize lnk */
1472: PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt));
1473: PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz));
1474: }
1475: PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1476: PetscCall(PetscLLDestroy(lnk, lnkbt));
1477: PetscCall(PetscFreeSpaceDestroy(free_space));
1479: /* local sizes and preallocation */
1480: PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE));
1481: PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs));
1482: PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs));
1483: PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1484: MatPreallocateEnd(dnz, onz);
1486: /* add C_loc and C_oth to C */
1487: PetscCall(MatGetOwnershipRange(C, &rstart, NULL));
1488: for (i = 0; i < pn; i++) {
1489: ncols = c_loc->i[i + 1] - c_loc->i[i];
1490: cols = c_loc->j + c_loc->i[i];
1491: row = rstart + i;
1492: PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1494: if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES));
1495: }
1496: for (i = 0; i < con; i++) {
1497: ncols = c_oth->i[i + 1] - c_oth->i[i];
1498: cols = c_oth->j + c_oth->i[i];
1499: row = prmap[i];
1500: PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1501: }
1502: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1503: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1504: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1506: /* members in merge */
1507: PetscCall(PetscFree(id_r));
1508: PetscCall(PetscFree(len_r));
1509: PetscCall(PetscFree(buf_ri[0]));
1510: PetscCall(PetscFree(buf_ri));
1511: PetscCall(PetscFree(buf_rj[0]));
1512: PetscCall(PetscFree(buf_rj));
1513: PetscCall(PetscLayoutDestroy(&rowmap));
1515: /* attach the supporting struct to C for reuse */
1516: C->product->data = ptap;
1517: C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
1518: PetscFunctionReturn(PETSC_SUCCESS);
1519: }
1521: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1522: {
1523: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data;
1524: Mat_SeqAIJ *c_seq;
1525: MatProductCtx_APMPI *ptap;
1526: Mat A_loc, C_loc, C_oth;
1527: PetscInt i, rstart, rend, cm, ncols, row;
1528: const PetscInt *cols;
1529: const PetscScalar *vals;
1531: PetscFunctionBegin;
1532: MatCheckProduct(C, 3);
1533: ptap = (MatProductCtx_APMPI *)C->product->data;
1534: PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1535: PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1536: PetscCall(MatZeroEntries(C));
1538: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1539: /* 1) get R = Pd^T, Ro = Po^T */
1540: PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd));
1541: PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd));
1542: PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro));
1543: PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro));
1545: /* 2) compute numeric A_loc */
1546: PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc));
1548: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1549: A_loc = ptap->A_loc;
1550: PetscCall(ptap->C_loc->ops->matmultnumeric(ptap->Rd, A_loc, ptap->C_loc));
1551: PetscCall(ptap->C_oth->ops->matmultnumeric(ptap->Ro, A_loc, ptap->C_oth));
1552: C_loc = ptap->C_loc;
1553: C_oth = ptap->C_oth;
1555: /* add C_loc and C_oth to C */
1556: PetscCall(MatGetOwnershipRange(C, &rstart, &rend));
1558: /* C_loc -> C */
1559: cm = C_loc->rmap->N;
1560: c_seq = (Mat_SeqAIJ *)C_loc->data;
1561: cols = c_seq->j;
1562: vals = c_seq->a;
1563: for (i = 0; i < cm; i++) {
1564: ncols = c_seq->i[i + 1] - c_seq->i[i];
1565: row = rstart + i;
1566: PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1567: cols += ncols;
1568: vals += ncols;
1569: }
1571: /* Co -> C, off-processor part */
1572: cm = C_oth->rmap->N;
1573: c_seq = (Mat_SeqAIJ *)C_oth->data;
1574: cols = c_seq->j;
1575: vals = c_seq->a;
1576: for (i = 0; i < cm; i++) {
1577: ncols = c_seq->i[i + 1] - c_seq->i[i];
1578: row = p->garray[i];
1579: PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1580: cols += ncols;
1581: vals += ncols;
1582: }
1583: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1584: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1585: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1586: PetscFunctionReturn(PETSC_SUCCESS);
1587: }
1589: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1590: {
1591: MatMergeSeqsToMPI *merge;
1592: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data;
1593: Mat_SeqAIJ *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data;
1594: MatProductCtx_APMPI *ap;
1595: PetscInt *adj;
1596: PetscInt i, j, k, anz, pnz, row, *cj, nexta;
1597: MatScalar *ada, *ca, valtmp;
1598: PetscInt am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1599: MPI_Comm comm;
1600: PetscMPIInt size, rank, taga, *len_s, proc;
1601: PetscInt *owners, nrows, **buf_ri_k, **nextrow, **nextci;
1602: PetscInt **buf_ri, **buf_rj;
1603: PetscInt cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1604: MPI_Request *s_waits, *r_waits;
1605: MPI_Status *status;
1606: MatScalar **abuf_r, *ba_i, *pA, *coa, *ba;
1607: const PetscScalar *dummy;
1608: PetscInt *ai, *aj, *coi, *coj, *poJ, *pdJ;
1609: Mat A_loc;
1610: Mat_SeqAIJ *a_loc;
1612: PetscFunctionBegin;
1613: MatCheckProduct(C, 3);
1614: ap = (MatProductCtx_APMPI *)C->product->data;
1615: PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data");
1616: PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1617: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
1618: PetscCallMPI(MPI_Comm_size(comm, &size));
1619: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1621: merge = ap->merge;
1623: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1624: /* get data from symbolic products */
1625: coi = merge->coi;
1626: coj = merge->coj;
1627: PetscCall(PetscCalloc1(coi[pon], &coa));
1628: bi = merge->bi;
1629: bj = merge->bj;
1630: owners = merge->rowmap->range;
1631: PetscCall(PetscCalloc1(bi[cm], &ba));
1633: /* get A_loc by taking all local rows of A */
1634: A_loc = ap->A_loc;
1635: PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc));
1636: a_loc = (Mat_SeqAIJ *)A_loc->data;
1637: ai = a_loc->i;
1638: aj = a_loc->j;
1640: /* trigger copy to CPU */
1641: PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy));
1642: PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy));
1643: PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy));
1644: PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy));
1645: for (i = 0; i < am; i++) {
1646: anz = ai[i + 1] - ai[i];
1647: adj = aj + ai[i];
1648: ada = a_loc->a + ai[i];
1650: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1651: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1652: pnz = po->i[i + 1] - po->i[i];
1653: poJ = po->j + po->i[i];
1654: pA = po->a + po->i[i];
1655: for (j = 0; j < pnz; j++) {
1656: row = poJ[j];
1657: cj = coj + coi[row];
1658: ca = coa + coi[row];
1659: /* perform sparse axpy */
1660: nexta = 0;
1661: valtmp = pA[j];
1662: for (k = 0; nexta < anz; k++) {
1663: if (cj[k] == adj[nexta]) {
1664: ca[k] += valtmp * ada[nexta];
1665: nexta++;
1666: }
1667: }
1668: PetscCall(PetscLogFlops(2.0 * anz));
1669: }
1671: /* put the value into Cd (diagonal part) */
1672: pnz = pd->i[i + 1] - pd->i[i];
1673: pdJ = pd->j + pd->i[i];
1674: pA = pd->a + pd->i[i];
1675: for (j = 0; j < pnz; j++) {
1676: row = pdJ[j];
1677: cj = bj + bi[row];
1678: ca = ba + bi[row];
1679: /* perform sparse axpy */
1680: nexta = 0;
1681: valtmp = pA[j];
1682: for (k = 0; nexta < anz; k++) {
1683: if (cj[k] == adj[nexta]) {
1684: ca[k] += valtmp * ada[nexta];
1685: nexta++;
1686: }
1687: }
1688: PetscCall(PetscLogFlops(2.0 * anz));
1689: }
1690: }
1692: /* 3) send and recv matrix values coa */
1693: buf_ri = merge->buf_ri;
1694: buf_rj = merge->buf_rj;
1695: len_s = merge->len_s;
1696: PetscCall(PetscCommGetNewTag(comm, &taga));
1697: PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
1699: PetscCall(PetscMalloc2(merge->nsend, &s_waits, size, &status));
1700: for (proc = 0, k = 0; proc < size; proc++) {
1701: if (!len_s[proc]) continue;
1702: i = merge->owners_co[proc];
1703: PetscCallMPI(MPIU_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
1704: k++;
1705: }
1706: if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
1707: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
1709: PetscCall(PetscFree2(s_waits, status));
1710: PetscCall(PetscFree(r_waits));
1711: PetscCall(PetscFree(coa));
1713: /* 4) insert local Cseq and received values into Cmpi */
1714: PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1715: for (k = 0; k < merge->nrecv; k++) {
1716: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1717: nrows = *buf_ri_k[k];
1718: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1719: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
1720: }
1722: for (i = 0; i < cm; i++) {
1723: row = owners[rank] + i; /* global row index of C_seq */
1724: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1725: ba_i = ba + bi[i];
1726: bnz = bi[i + 1] - bi[i];
1727: /* add received vals into ba */
1728: for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1729: /* i-th row */
1730: if (i == *nextrow[k]) {
1731: cnz = *(nextci[k] + 1) - *nextci[k];
1732: cj = buf_rj[k] + *nextci[k];
1733: ca = abuf_r[k] + *nextci[k];
1734: nextcj = 0;
1735: for (j = 0; nextcj < cnz; j++) {
1736: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1737: ba_i[j] += ca[nextcj++];
1738: }
1739: }
1740: nextrow[k]++;
1741: nextci[k]++;
1742: PetscCall(PetscLogFlops(2.0 * cnz));
1743: }
1744: }
1745: PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES));
1746: }
1747: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1748: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1750: PetscCall(PetscFree(ba));
1751: PetscCall(PetscFree(abuf_r[0]));
1752: PetscCall(PetscFree(abuf_r));
1753: PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1754: PetscFunctionReturn(PETSC_SUCCESS);
1755: }
1757: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1758: {
1759: Mat A_loc;
1760: MatProductCtx_APMPI *ap;
1761: PetscFreeSpaceList free_space = NULL, current_space = NULL;
1762: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1763: PetscInt *pdti, *pdtj, *poti, *potj, *ptJ;
1764: PetscInt nnz;
1765: PetscInt *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1766: PetscInt am = A->rmap->n, pn = P->cmap->n;
1767: MPI_Comm comm;
1768: PetscMPIInt size, rank, tagi, tagj, *len_si, *len_s, *len_ri, proc;
1769: PetscInt **buf_rj, **buf_ri, **buf_ri_k;
1770: PetscInt len, *dnz, *onz, *owners;
1771: PetscInt nzi, *bi, *bj;
1772: PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1773: MPI_Request *swaits, *rwaits;
1774: MPI_Status *sstatus, rstatus;
1775: MatMergeSeqsToMPI *merge;
1776: PetscInt *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1777: PetscReal afill = 1.0, afill_tmp;
1778: PetscInt rstart = P->cmap->rstart, rmax, Armax;
1779: Mat_SeqAIJ *a_loc;
1780: PetscHMapI ta;
1781: MatType mtype;
1783: PetscFunctionBegin;
1784: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1785: /* check if matrix local sizes are compatible */
1786: PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart,
1787: A->rmap->rend, P->rmap->rstart, P->rmap->rend);
1789: PetscCallMPI(MPI_Comm_size(comm, &size));
1790: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1792: /* create struct MatProductCtx_APMPI and attached it to C later */
1793: PetscCall(PetscNew(&ap));
1795: /* get A_loc by taking all local rows of A */
1796: PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc));
1798: ap->A_loc = A_loc;
1799: a_loc = (Mat_SeqAIJ *)A_loc->data;
1800: ai = a_loc->i;
1801: aj = a_loc->j;
1803: /* determine symbolic Co=(p->B)^T*A - send to others */
1804: PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
1805: PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
1806: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1807: >= (num of nonzero rows of C_seq) - pn */
1808: PetscCall(PetscMalloc1(pon + 1, &coi));
1809: coi[0] = 0;
1811: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1812: nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1813: PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1814: current_space = free_space;
1816: /* create and initialize a linked list */
1817: PetscCall(PetscHMapICreateWithSize(A->cmap->n + a->B->cmap->N, &ta));
1818: MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1819: PetscCall(PetscHMapIGetSize(ta, &Armax));
1821: PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1823: for (i = 0; i < pon; i++) {
1824: pnz = poti[i + 1] - poti[i];
1825: ptJ = potj + poti[i];
1826: for (j = 0; j < pnz; j++) {
1827: row = ptJ[j]; /* row of A_loc == col of Pot */
1828: anz = ai[row + 1] - ai[row];
1829: Jptr = aj + ai[row];
1830: /* add non-zero cols of AP into the sorted linked list lnk */
1831: PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1832: }
1833: nnz = lnk[0];
1835: /* If free space is not available, double the total space in the list */
1836: if (current_space->local_remaining < nnz) {
1837: PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), ¤t_space));
1838: nspacedouble++;
1839: }
1841: /* Copy data into free space, and zero out denserows */
1842: PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
1844: current_space->array += nnz;
1845: current_space->local_used += nnz;
1846: current_space->local_remaining -= nnz;
1848: coi[i + 1] = coi[i] + nnz;
1849: }
1851: PetscCall(PetscMalloc1(coi[pon], &coj));
1852: PetscCall(PetscFreeSpaceContiguous(&free_space, coj));
1853: PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */
1855: afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1856: if (afill_tmp > afill) afill = afill_tmp;
1858: /* send j-array (coj) of Co to other processors */
1859: /* determine row ownership */
1860: PetscCall(PetscNew(&merge));
1861: PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
1863: merge->rowmap->n = pn;
1864: merge->rowmap->bs = 1;
1866: PetscCall(PetscLayoutSetUp(merge->rowmap));
1867: owners = merge->rowmap->range;
1869: /* determine the number of messages to send, their lengths */
1870: PetscCall(PetscCalloc1(size, &len_si));
1871: PetscCall(PetscCalloc1(size, &merge->len_s));
1873: len_s = merge->len_s;
1874: merge->nsend = 0;
1876: PetscCall(PetscMalloc1(size + 1, &owners_co));
1878: proc = 0;
1879: for (i = 0; i < pon; i++) {
1880: while (prmap[i] >= owners[proc + 1]) proc++;
1881: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1882: len_s[proc] += coi[i + 1] - coi[i];
1883: }
1885: len = 0; /* max length of buf_si[] */
1886: owners_co[0] = 0;
1887: for (proc = 0; proc < size; proc++) {
1888: owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1889: if (len_s[proc]) {
1890: merge->nsend++;
1891: len_si[proc] = 2 * (len_si[proc] + 1);
1892: len += len_si[proc];
1893: }
1894: }
1896: /* determine the number and length of messages to receive for coi and coj */
1897: PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
1898: PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
1900: /* post the Irecv and Isend of coj */
1901: PetscCall(PetscCommGetNewTag(comm, &tagj));
1902: PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits));
1903: PetscCall(PetscMalloc1(merge->nsend, &swaits));
1904: for (proc = 0, k = 0; proc < size; proc++) {
1905: if (!len_s[proc]) continue;
1906: i = owners_co[proc];
1907: PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1908: k++;
1909: }
1911: /* receives and sends of coj are complete */
1912: PetscCall(PetscMalloc1(size, &sstatus));
1913: for (i = 0; i < merge->nrecv; i++) {
1914: PETSC_UNUSED PetscMPIInt icompleted;
1915: PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1916: }
1917: PetscCall(PetscFree(rwaits));
1918: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1920: /* add received column indices into table to update Armax */
1921: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1922: for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1923: Jptr = buf_rj[k];
1924: for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1925: }
1926: PetscCall(PetscHMapIGetSize(ta, &Armax));
1928: /* send and recv coi */
1929: PetscCall(PetscCommGetNewTag(comm, &tagi));
1930: PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits));
1931: PetscCall(PetscMalloc1(len, &buf_s));
1932: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1933: for (proc = 0, k = 0; proc < size; proc++) {
1934: if (!len_s[proc]) continue;
1935: /* form outgoing message for i-structure:
1936: buf_si[0]: nrows to be sent
1937: [1:nrows]: row index (global)
1938: [nrows+1:2*nrows+1]: i-structure index
1939: */
1940: nrows = len_si[proc] / 2 - 1;
1941: buf_si_i = buf_si + nrows + 1;
1942: buf_si[0] = nrows;
1943: buf_si_i[0] = 0;
1944: nrows = 0;
1945: for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1946: nzi = coi[i + 1] - coi[i];
1947: buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi; /* i-structure */
1948: buf_si[nrows + 1] = prmap[i] - owners[proc]; /* local row index */
1949: nrows++;
1950: }
1951: PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1952: k++;
1953: buf_si += len_si[proc];
1954: }
1955: i = merge->nrecv;
1956: while (i--) {
1957: PETSC_UNUSED PetscMPIInt icompleted;
1958: PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1959: }
1960: PetscCall(PetscFree(rwaits));
1961: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1962: PetscCall(PetscFree(len_si));
1963: PetscCall(PetscFree(len_ri));
1964: PetscCall(PetscFree(swaits));
1965: PetscCall(PetscFree(sstatus));
1966: PetscCall(PetscFree(buf_s));
1968: /* compute the local portion of C (mpi mat) */
1969: /* allocate bi array and free space for accumulating nonzero column info */
1970: PetscCall(PetscMalloc1(pn + 1, &bi));
1971: bi[0] = 0;
1973: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1974: nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1975: PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1976: current_space = free_space;
1978: PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1979: for (k = 0; k < merge->nrecv; k++) {
1980: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1981: nrows = *buf_ri_k[k];
1982: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1983: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
1984: }
1986: PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1987: MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1988: rmax = 0;
1989: for (i = 0; i < pn; i++) {
1990: /* add pdt[i,:]*AP into lnk */
1991: pnz = pdti[i + 1] - pdti[i];
1992: ptJ = pdtj + pdti[i];
1993: for (j = 0; j < pnz; j++) {
1994: row = ptJ[j]; /* row of AP == col of Pt */
1995: anz = ai[row + 1] - ai[row];
1996: Jptr = aj + ai[row];
1997: /* add non-zero cols of AP into the sorted linked list lnk */
1998: PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1999: }
2001: /* add received col data into lnk */
2002: for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
2003: if (i == *nextrow[k]) { /* i-th row */
2004: nzi = *(nextci[k] + 1) - *nextci[k];
2005: Jptr = buf_rj[k] + *nextci[k];
2006: PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk));
2007: nextrow[k]++;
2008: nextci[k]++;
2009: }
2010: }
2012: /* add missing diagonal entry */
2013: if (C->force_diagonals) {
2014: k = i + owners[rank]; /* column index */
2015: PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk));
2016: }
2018: nnz = lnk[0];
2020: /* if free space is not available, make more free space */
2021: if (current_space->local_remaining < nnz) {
2022: PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), ¤t_space));
2023: nspacedouble++;
2024: }
2025: /* copy data into free space, then initialize lnk */
2026: PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
2027: PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz));
2029: current_space->array += nnz;
2030: current_space->local_used += nnz;
2031: current_space->local_remaining -= nnz;
2033: bi[i + 1] = bi[i] + nnz;
2034: if (nnz > rmax) rmax = nnz;
2035: }
2036: PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
2038: PetscCall(PetscMalloc1(bi[pn], &bj));
2039: PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
2040: afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2041: if (afill_tmp > afill) afill = afill_tmp;
2042: PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2043: PetscCall(PetscHMapIDestroy(&ta));
2044: PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
2045: PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
2047: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
2048: PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE));
2049: PetscCall(MatSetBlockSizes(C, P->cmap->bs, A->cmap->bs));
2050: PetscCall(MatGetType(A, &mtype));
2051: PetscCall(MatSetType(C, mtype));
2052: PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
2053: MatPreallocateEnd(dnz, onz);
2054: PetscCall(MatSetBlockSize(C, 1));
2055: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
2056: for (i = 0; i < pn; i++) {
2057: row = i + rstart;
2058: nnz = bi[i + 1] - bi[i];
2059: Jptr = bj + bi[i];
2060: PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES));
2061: }
2062: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
2063: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
2064: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
2065: merge->bi = bi;
2066: merge->bj = bj;
2067: merge->coi = coi;
2068: merge->coj = coj;
2069: merge->buf_ri = buf_ri;
2070: merge->buf_rj = buf_rj;
2071: merge->owners_co = owners_co;
2073: /* attach the supporting struct to C for reuse */
2074: C->product->data = ap;
2075: C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2076: ap->merge = merge;
2078: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2080: #if defined(PETSC_USE_INFO)
2081: if (bi[pn] != 0) {
2082: PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
2083: PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill));
2084: } else {
2085: PetscCall(PetscInfo(C, "Empty matrix product\n"));
2086: }
2087: #endif
2088: PetscFunctionReturn(PETSC_SUCCESS);
2089: }
2091: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2092: {
2093: Mat_Product *product = C->product;
2094: Mat A = product->A, B = product->B;
2095: PetscReal fill = product->fill;
2096: PetscBool flg;
2098: PetscFunctionBegin;
2099: /* scalable */
2100: PetscCall(PetscStrcmp(product->alg, "scalable", &flg));
2101: if (flg) {
2102: PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
2103: goto next;
2104: }
2106: /* nonscalable */
2107: PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg));
2108: if (flg) {
2109: PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
2110: goto next;
2111: }
2113: /* matmatmult */
2114: PetscCall(PetscStrcmp(product->alg, "at*b", &flg));
2115: if (flg) {
2116: Mat At;
2117: MatProductCtx_APMPI *ptap;
2119: PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
2120: PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C));
2121: ptap = (MatProductCtx_APMPI *)C->product->data;
2122: if (ptap) {
2123: ptap->Pt = At;
2124: C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2125: }
2126: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2127: goto next;
2128: }
2130: /* backend general code */
2131: PetscCall(PetscStrcmp(product->alg, "backend", &flg));
2132: if (flg) {
2133: PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2134: PetscFunctionReturn(PETSC_SUCCESS);
2135: }
2137: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");
2139: next:
2140: C->ops->productnumeric = MatProductNumeric_AtB;
2141: PetscFunctionReturn(PETSC_SUCCESS);
2142: }
2144: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2145: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2146: {
2147: Mat_Product *product = C->product;
2148: Mat A = product->A, B = product->B;
2149: #if defined(PETSC_HAVE_HYPRE)
2150: const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2151: PetscInt nalg = 5;
2152: #else
2153: const char *algTypes[4] = {
2154: "scalable",
2155: "nonscalable",
2156: "seqmpi",
2157: "backend",
2158: };
2159: PetscInt nalg = 4;
2160: #endif
2161: PetscInt alg = 1; /* set nonscalable algorithm as default */
2162: PetscBool flg;
2163: MPI_Comm comm;
2165: PetscFunctionBegin;
2166: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2168: /* Set "nonscalable" as default algorithm */
2169: PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2170: if (flg) {
2171: PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2173: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2174: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2175: MatInfo Ainfo, Binfo;
2176: PetscInt nz_local;
2177: PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2179: PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2180: PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2181: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2183: if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2184: PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2186: if (alg_scalable) {
2187: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2188: PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2189: PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2190: }
2191: }
2192: }
2194: /* Get runtime option */
2195: if (product->api_user) {
2196: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2197: PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2198: PetscOptionsEnd();
2199: } else {
2200: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2201: PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2202: PetscOptionsEnd();
2203: }
2204: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2206: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2207: PetscFunctionReturn(PETSC_SUCCESS);
2208: }
2210: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2211: {
2212: PetscFunctionBegin;
2213: PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2214: C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2215: PetscFunctionReturn(PETSC_SUCCESS);
2216: }
2218: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2219: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2220: {
2221: Mat_Product *product = C->product;
2222: Mat A = product->A, B = product->B;
2223: const char *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2224: PetscInt nalg = 4;
2225: PetscInt alg = 1; /* set default algorithm */
2226: PetscBool flg;
2227: MPI_Comm comm;
2229: PetscFunctionBegin;
2230: /* Check matrix local sizes */
2231: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2232: PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2233: A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
2235: /* Set default algorithm */
2236: PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2237: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2239: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2240: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2241: MatInfo Ainfo, Binfo;
2242: PetscInt nz_local;
2243: PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2245: PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2246: PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2247: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2249: if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2250: PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2252: if (alg_scalable) {
2253: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2254: PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2255: PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2256: }
2257: }
2259: /* Get runtime option */
2260: if (product->api_user) {
2261: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2262: PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2263: PetscOptionsEnd();
2264: } else {
2265: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2266: PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2267: PetscOptionsEnd();
2268: }
2269: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2271: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2272: PetscFunctionReturn(PETSC_SUCCESS);
2273: }
2275: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2276: {
2277: Mat_Product *product = C->product;
2278: Mat A = product->A, P = product->B;
2279: MPI_Comm comm;
2280: PetscBool flg;
2281: PetscInt alg = 1; /* set default algorithm */
2282: #if !defined(PETSC_HAVE_HYPRE)
2283: const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2284: PetscInt nalg = 5;
2285: #else
2286: const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2287: PetscInt nalg = 6;
2288: #endif
2289: PetscInt pN = P->cmap->N;
2291: PetscFunctionBegin;
2292: /* Check matrix local sizes */
2293: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2294: PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2295: A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2296: PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2297: A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);
2299: /* Set "nonscalable" as default algorithm */
2300: PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2301: if (flg) {
2302: PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2304: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2305: if (pN > 100000) {
2306: MatInfo Ainfo, Pinfo;
2307: PetscInt nz_local;
2308: PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2310: PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2311: PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo));
2312: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2314: if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2315: PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2317: if (alg_scalable) {
2318: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2319: PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2320: }
2321: }
2322: }
2324: /* Get runtime option */
2325: if (product->api_user) {
2326: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2327: PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2328: PetscOptionsEnd();
2329: } else {
2330: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2331: PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2332: PetscOptionsEnd();
2333: }
2334: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2336: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2337: PetscFunctionReturn(PETSC_SUCCESS);
2338: }
2340: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2341: {
2342: Mat_Product *product = C->product;
2343: Mat A = product->A, R = product->B;
2345: PetscFunctionBegin;
2346: /* Check matrix local sizes */
2347: PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n,
2348: A->rmap->n, R->rmap->n, R->cmap->n);
2350: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2351: PetscFunctionReturn(PETSC_SUCCESS);
2352: }
2354: /*
2355: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2356: */
2357: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2358: {
2359: Mat_Product *product = C->product;
2360: PetscBool flg = PETSC_FALSE;
2361: PetscInt alg = 1; /* default algorithm */
2362: const char *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2363: PetscInt nalg = 3;
2365: PetscFunctionBegin;
2366: /* Set default algorithm */
2367: PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2368: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2370: /* Get runtime option */
2371: if (product->api_user) {
2372: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2373: PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2374: PetscOptionsEnd();
2375: } else {
2376: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2377: PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg));
2378: PetscOptionsEnd();
2379: }
2380: if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2382: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2383: C->ops->productsymbolic = MatProductSymbolic_ABC;
2384: PetscFunctionReturn(PETSC_SUCCESS);
2385: }
2387: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2388: {
2389: Mat_Product *product = C->product;
2391: PetscFunctionBegin;
2392: switch (product->type) {
2393: case MATPRODUCT_AB:
2394: PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2395: break;
2396: case MATPRODUCT_ABt:
2397: PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C));
2398: break;
2399: case MATPRODUCT_AtB:
2400: PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2401: break;
2402: case MATPRODUCT_PtAP:
2403: PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2404: break;
2405: case MATPRODUCT_RARt:
2406: PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2407: break;
2408: case MATPRODUCT_ABC:
2409: PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2410: break;
2411: default:
2412: break;
2413: }
2414: PetscFunctionReturn(PETSC_SUCCESS);
2415: }