Actual source code: mpimatmatmult.c

  1: /*
  2:   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
  3:           C = A * B
  4: */
  5: #include <../src/mat/impls/aij/seq/aij.h>
  6: #include <../src/mat/utils/freespace.h>
  7: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  8: #include <petscbt.h>
  9: #include <../src/mat/impls/dense/mpi/mpidense.h>
 10: #include <petsc/private/vecimpl.h>
 11: #include <petsc/private/sfimpl.h>

 13: #if defined(PETSC_HAVE_HYPRE)
 14: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
 15: #endif

 17: PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
 18: {
 19:   Mat_Product *product = C->product;
 20:   Mat          B       = product->B;

 22:   PetscFunctionBegin;
 23:   PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B));
 24:   PetscCall(MatDestroy(&B));
 25:   PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C));
 26:   PetscFunctionReturn(PETSC_SUCCESS);
 27: }

 29: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
 30: {
 31:   Mat_Product        *product = C->product;
 32:   Mat                 A = product->A, B = product->B;
 33:   MatProductAlgorithm alg  = product->alg;
 34:   PetscReal           fill = product->fill;
 35:   PetscBool           flg;

 37:   PetscFunctionBegin;
 38:   /* scalable */
 39:   PetscCall(PetscStrcmp(alg, "scalable", &flg));
 40:   if (flg) {
 41:     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
 42:     PetscFunctionReturn(PETSC_SUCCESS);
 43:   }

 45:   /* nonscalable */
 46:   PetscCall(PetscStrcmp(alg, "nonscalable", &flg));
 47:   if (flg) {
 48:     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
 49:     PetscFunctionReturn(PETSC_SUCCESS);
 50:   }

 52:   /* seqmpi */
 53:   PetscCall(PetscStrcmp(alg, "seqmpi", &flg));
 54:   if (flg) {
 55:     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C));
 56:     PetscFunctionReturn(PETSC_SUCCESS);
 57:   }

 59:   /* backend general code */
 60:   PetscCall(PetscStrcmp(alg, "backend", &flg));
 61:   if (flg) {
 62:     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
 63:     PetscFunctionReturn(PETSC_SUCCESS);
 64:   }

 66: #if defined(PETSC_HAVE_HYPRE)
 67:   PetscCall(PetscStrcmp(alg, "hypre", &flg));
 68:   if (flg) {
 69:     PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C));
 70:     PetscFunctionReturn(PETSC_SUCCESS);
 71:   }
 72: #endif
 73:   SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
 74: }

 76: PetscErrorCode MatProductCtxDestroy_MPIAIJ_MatMatMult(PetscCtxRt data)
 77: {
 78:   MatProductCtx_APMPI *ptap = *(MatProductCtx_APMPI **)data;

 80:   PetscFunctionBegin;
 81:   PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r));
 82:   PetscCall(PetscFree(ptap->bufa));
 83:   PetscCall(MatDestroy(&ptap->P_loc));
 84:   PetscCall(MatDestroy(&ptap->P_oth));
 85:   PetscCall(MatDestroy(&ptap->Pt));
 86:   PetscCall(PetscFree(ptap->api));
 87:   PetscCall(PetscFree(ptap->apj));
 88:   PetscCall(PetscFree(ptap->apa));
 89:   PetscCall(PetscFree(ptap));
 90:   PetscFunctionReturn(PETSC_SUCCESS);
 91: }

 93: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
 94: {
 95:   Mat_MPIAIJ          *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
 96:   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
 97:   Mat_SeqAIJ          *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
 98:   PetscScalar         *cda, *coa;
 99:   Mat_SeqAIJ          *p_loc, *p_oth;
100:   PetscScalar         *apa, *ca;
101:   PetscInt             cm = C->rmap->n;
102:   MatProductCtx_APMPI *ptap;
103:   PetscInt            *api, *apj, *apJ, i, k;
104:   PetscInt             cstart = C->cmap->rstart;
105:   PetscInt             cdnz, conz, k0, k1;
106:   const PetscScalar   *dummy1, *dummy2, *dummy3, *dummy4;
107:   MPI_Comm             comm;
108:   PetscMPIInt          size;

110:   PetscFunctionBegin;
111:   MatCheckProduct(C, 3);
112:   ptap = (MatProductCtx_APMPI *)C->product->data;
113:   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
114:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
115:   PetscCallMPI(MPI_Comm_size(comm, &size));
116:   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");

118:   /* flag CPU mask for C */
119: #if defined(PETSC_HAVE_DEVICE)
120:   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
121:   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
122:   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
123: #endif

125:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
126:   /* update numerical values of P_oth and P_loc */
127:   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
128:   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));

130:   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
131:   /* get data from symbolic products */
132:   p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
133:   p_oth = NULL;
134:   if (size > 1) p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;

136:   /* get apa for storing dense row A[i,:]*P */
137:   apa = ptap->apa;

139:   api = ptap->api;
140:   apj = ptap->apj;
141:   /* trigger copy to CPU */
142:   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy1));
143:   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy2));
144:   PetscCall(MatSeqAIJGetArrayRead(ptap->P_loc, &dummy3));
145:   if (ptap->P_oth) PetscCall(MatSeqAIJGetArrayRead(ptap->P_oth, &dummy4));
146:   PetscCall(MatSeqAIJGetArrayWrite(c->A, &cda));
147:   PetscCall(MatSeqAIJGetArrayWrite(c->B, &coa));
148:   for (i = 0; i < cm; i++) {
149:     /* compute apa = A[i,:]*P */
150:     AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);

152:     /* set values in C */
153:     apJ  = PetscSafePointerPlusOffset(apj, api[i]);
154:     cdnz = cd->i[i + 1] - cd->i[i];
155:     conz = co->i[i + 1] - co->i[i];

157:     /* 1st off-diagonal part of C */
158:     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
159:     k  = 0;
160:     for (k0 = 0; k0 < conz; k0++) {
161:       if (apJ[k] >= cstart) break;
162:       ca[k0]        = apa[apJ[k]];
163:       apa[apJ[k++]] = 0.0;
164:     }

166:     /* diagonal part of C */
167:     ca = PetscSafePointerPlusOffset(cda, cd->i[i]);
168:     for (k1 = 0; k1 < cdnz; k1++) {
169:       ca[k1]        = apa[apJ[k]];
170:       apa[apJ[k++]] = 0.0;
171:     }

173:     /* 2nd off-diagonal part of C */
174:     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
175:     for (; k0 < conz; k0++) {
176:       ca[k0]        = apa[apJ[k]];
177:       apa[apJ[k++]] = 0.0;
178:     }
179:   }
180:   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy1));
181:   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy2));
182:   PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_loc, &dummy3));
183:   if (ptap->P_oth) PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_oth, &dummy4));
184:   PetscCall(MatSeqAIJRestoreArrayWrite(c->A, &cda));
185:   PetscCall(MatSeqAIJRestoreArrayWrite(c->B, &coa));

187:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
188:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
189:   PetscFunctionReturn(PETSC_SUCCESS);
190: }

192: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
193: {
194:   MPI_Comm             comm;
195:   PetscMPIInt          size;
196:   MatProductCtx_APMPI *ptap;
197:   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
198:   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
199:   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
200:   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
201:   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
202:   PetscInt            *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
203:   PetscInt             am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
204:   PetscBT              lnkbt;
205:   PetscReal            afill;
206:   MatType              mtype;

208:   PetscFunctionBegin;
209:   MatCheckProduct(C, 4);
210:   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
211:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
212:   PetscCallMPI(MPI_Comm_size(comm, &size));

214:   /* create struct MatProductCtx_APMPI and attached it to C later */
215:   PetscCall(PetscNew(&ptap));

217:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
218:   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));

220:   /* get P_loc by taking all local rows of P */
221:   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));

223:   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
224:   pi_loc = p_loc->i;
225:   pj_loc = p_loc->j;
226:   if (size > 1) {
227:     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
228:     pi_oth = p_oth->i;
229:     pj_oth = p_oth->j;
230:   } else {
231:     p_oth  = NULL;
232:     pi_oth = NULL;
233:     pj_oth = NULL;
234:   }

236:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
237:   PetscCall(PetscMalloc1(am + 1, &api));
238:   ptap->api = api;
239:   api[0]    = 0;

241:   /* create and initialize a linked list */
242:   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));

244:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
245:   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
246:   current_space = free_space;

248:   MatPreallocateBegin(comm, am, pn, dnz, onz);
249:   for (i = 0; i < am; i++) {
250:     /* diagonal portion of A */
251:     nzi = adi[i + 1] - adi[i];
252:     for (j = 0; j < nzi; j++) {
253:       row  = *adj++;
254:       pnz  = pi_loc[row + 1] - pi_loc[row];
255:       Jptr = pj_loc + pi_loc[row];
256:       /* add non-zero cols of P into the sorted linked list lnk */
257:       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
258:     }
259:     /* off-diagonal portion of A */
260:     nzi = aoi[i + 1] - aoi[i];
261:     for (j = 0; j < nzi; j++) {
262:       row  = *aoj++;
263:       pnz  = pi_oth[row + 1] - pi_oth[row];
264:       Jptr = pj_oth + pi_oth[row];
265:       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
266:     }
267:     /* add possible missing diagonal entry */
268:     if (C->force_diagonals) {
269:       j = i + rstart; /* column index */
270:       PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt));
271:     }

273:     apnz       = lnk[0];
274:     api[i + 1] = api[i] + apnz;

276:     /* if free space is not available, double the total space in the list */
277:     if (current_space->local_remaining < apnz) {
278:       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
279:       nspacedouble++;
280:     }

282:     /* Copy data into free space, then initialize lnk */
283:     PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt));
284:     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));

286:     current_space->array += apnz;
287:     current_space->local_used += apnz;
288:     current_space->local_remaining -= apnz;
289:   }

291:   /* Allocate space for apj, initialize apj, and */
292:   /* destroy list of free space and other temporary array(s) */
293:   PetscCall(PetscMalloc1(api[am], &ptap->apj));
294:   apj = ptap->apj;
295:   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
296:   PetscCall(PetscLLDestroy(lnk, lnkbt));

298:   /* malloc apa to store dense row A[i,:]*P */
299:   PetscCall(PetscCalloc1(pN, &ptap->apa));

301:   /* set and assemble symbolic parallel matrix C */
302:   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
303:   PetscCall(MatSetBlockSizesFromMats(C, A, P));

305:   PetscCall(MatGetType(A, &mtype));
306:   PetscCall(MatSetType(C, mtype));
307:   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
308:   MatPreallocateEnd(dnz, onz);

310:   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
311:   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
312:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
313:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
314:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));

316:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
317:   C->ops->productnumeric = MatProductNumeric_AB;

319:   /* attach the supporting struct to C for reuse */
320:   C->product->data    = ptap;
321:   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;

323:   /* set MatInfo */
324:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
325:   if (afill < 1.0) afill = 1.0;
326:   C->info.mallocs           = nspacedouble;
327:   C->info.fill_ratio_given  = fill;
328:   C->info.fill_ratio_needed = afill;

330: #if defined(PETSC_USE_INFO)
331:   if (api[am]) {
332:     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
333:     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
334:   } else {
335:     PetscCall(PetscInfo(C, "Empty matrix product\n"));
336:   }
337: #endif
338:   PetscFunctionReturn(PETSC_SUCCESS);
339: }

341: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
342: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);

344: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
345: {
346:   Mat_Product *product = C->product;
347:   Mat          A = product->A, B = product->B;

349:   PetscFunctionBegin;
350:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
351:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);

353:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
354:   C->ops->productsymbolic = MatProductSymbolic_AB;
355:   PetscFunctionReturn(PETSC_SUCCESS);
356: }

358: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
359: {
360:   Mat_Product *product = C->product;
361:   Mat          A = product->A, B = product->B;

363:   PetscFunctionBegin;
364:   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
365:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);

367:   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
368:   C->ops->productsymbolic          = MatProductSymbolic_AtB;
369:   PetscFunctionReturn(PETSC_SUCCESS);
370: }

372: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
373: {
374:   Mat_Product *product = C->product;

376:   PetscFunctionBegin;
377:   switch (product->type) {
378:   case MATPRODUCT_AB:
379:     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
380:     break;
381:   case MATPRODUCT_AtB:
382:     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
383:     break;
384:   default:
385:     break;
386:   }
387:   PetscFunctionReturn(PETSC_SUCCESS);
388: }

390: typedef struct {
391:   Mat           workB, workB1;
392:   MPI_Request  *rwaits, *swaits;
393:   PetscInt      nsends, nrecvs;
394:   MPI_Datatype *stype, *rtype;
395:   PetscInt      blda;
396: } MPIAIJ_MPIDense;

398: static PetscErrorCode MatMPIAIJ_MPIDenseDestroy(PetscCtxRt ctx)
399: {
400:   MPIAIJ_MPIDense *contents = *(MPIAIJ_MPIDense **)ctx;

402:   PetscFunctionBegin;
403:   PetscCall(MatDestroy(&contents->workB));
404:   PetscCall(MatDestroy(&contents->workB1));
405:   for (PetscInt i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i]));
406:   for (PetscInt i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
407:   PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits));
408:   PetscCall(PetscFree(contents));
409:   PetscFunctionReturn(PETSC_SUCCESS);
410: }

412: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
413: {
414:   Mat_MPIAIJ      *aij = (Mat_MPIAIJ *)A->data;
415:   PetscInt         nz  = aij->B->cmap->n, blda, m, M, n, N;
416:   MPIAIJ_MPIDense *contents;
417:   VecScatter       ctx = aij->Mvctx;
418:   PetscInt         Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, numBb;
419:   MPI_Comm         comm;
420:   MPI_Datatype     type1, *stype, *rtype;
421:   const PetscInt  *sindices, *sstarts, *rstarts;
422:   PetscMPIInt     *disp, nsends, nrecvs, nrows_to, nrows_from;
423:   PetscBool        cisdense;

425:   PetscFunctionBegin;
426:   MatCheckProduct(C, 4);
427:   PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
428:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
429:   PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense));
430:   if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name));
431:   PetscCall(MatGetLocalSize(C, &m, &n));
432:   PetscCall(MatGetSize(C, &M, &N));
433:   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN));
434:   PetscCall(MatSetBlockSizesFromMats(C, A, B));
435:   PetscCall(MatSetUp(C));
436:   PetscCall(MatDenseGetLDA(B, &blda));
437:   PetscCall(PetscNew(&contents));

439:   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
440:   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));

442:   /* Create column block of B and C for memory scalability when BN is too large */
443:   /* Estimate Bbn, column size of Bb */
444:   if (nz) {
445:     Bbn1 = 2 * Am * BN / nz;
446:     if (!Bbn1) Bbn1 = 1;
447:   } else Bbn1 = BN;

449:   bs   = B->cmap->bs;
450:   Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
451:   if (Bbn1 > BN) Bbn1 = BN;
452:   PetscCallMPI(MPIU_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm));

454:   /* Enable runtime option for Bbn */
455:   PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatProduct", "Mat");
456:   PetscCall(PetscOptionsDeprecated("-matmatmult_Bbn", "-matproduct_batch_size", "3.25", NULL));
457:   PetscCall(PetscOptionsInt("-matproduct_batch_size", "Number of columns in Bb", "MatProduct", Bbn, &Bbn, NULL));
458:   PetscOptionsEnd();
459:   Bbn = PetscMin(Bbn, BN);

461:   if (Bbn > 0 && Bbn < BN) {
462:     numBb = BN / Bbn;
463:     Bbn1  = BN - numBb * Bbn;
464:   } else numBb = 0;

466:   if (numBb) {
467:     PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb));
468:     if (Bbn1) { /* Create workB1 for the remaining columns */
469:       PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1));
470:       /* Create work matrix used to store off processor rows of B needed for local product */
471:       PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1));
472:     } else contents->workB1 = NULL;
473:   }

475:   /* Create work matrix used to store off processor rows of B needed for local product */
476:   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB));

478:   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
479:   PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits));
480:   contents->stype  = stype;
481:   contents->nsends = nsends;

483:   contents->rtype  = rtype;
484:   contents->nrecvs = nrecvs;
485:   contents->blda   = blda;

487:   PetscCall(PetscMalloc1(Bm, &disp));
488:   for (PetscMPIInt i = 0; i < nsends; i++) {
489:     PetscCall(PetscMPIIntCast(sstarts[i + 1] - sstarts[i], &nrows_to));
490:     for (PetscInt j = 0; j < nrows_to; j++) PetscCall(PetscMPIIntCast(sindices[sstarts[i] + j], &disp[j])); /* rowB to be sent */
491:     PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1));
492:     PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]));
493:     PetscCallMPI(MPI_Type_commit(&stype[i]));
494:     PetscCallMPI(MPI_Type_free(&type1));
495:   }

497:   for (PetscMPIInt i = 0; i < nrecvs; i++) {
498:     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
499:     PetscCall(PetscMPIIntCast(rstarts[i + 1] - rstarts[i], &nrows_from));
500:     disp[0] = 0;
501:     PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1));
502:     PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]));
503:     PetscCallMPI(MPI_Type_commit(&rtype[i]));
504:     PetscCallMPI(MPI_Type_free(&type1));
505:   }

507:   PetscCall(PetscFree(disp));
508:   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
509:   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
510:   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
511:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
512:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
513:   PetscCall(MatProductClear(aij->A));
514:   PetscCall(MatProductClear(((Mat_MPIDense *)B->data)->A));
515:   PetscCall(MatProductClear(((Mat_MPIDense *)C->data)->A));
516:   PetscCall(MatProductCreateWithMat(aij->A, ((Mat_MPIDense *)B->data)->A, NULL, ((Mat_MPIDense *)C->data)->A));
517:   PetscCall(MatProductSetType(((Mat_MPIDense *)C->data)->A, MATPRODUCT_AB));
518:   PetscCall(MatProductSetFromOptions(((Mat_MPIDense *)C->data)->A));
519:   PetscCall(MatProductSymbolic(((Mat_MPIDense *)C->data)->A));
520:   C->product->data       = contents;
521:   C->product->destroy    = MatMPIAIJ_MPIDenseDestroy;
522:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
523:   PetscFunctionReturn(PETSC_SUCCESS);
524: }

526: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);

528: /*
529:     Performs an efficient scatter on the rows of B needed by this process; this is
530:     a modification of the VecScatterBegin_() routines.

532:     Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
533: */

535: static PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
536: {
537:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
538:   const PetscScalar *b;
539:   PetscScalar       *rvalues;
540:   VecScatter         ctx = aij->Mvctx;
541:   const PetscInt    *sindices, *sstarts, *rstarts;
542:   const PetscMPIInt *sprocs, *rprocs;
543:   PetscMPIInt        nsends, nrecvs;
544:   MPI_Request       *swaits, *rwaits;
545:   MPI_Comm           comm;
546:   PetscMPIInt        tag = ((PetscObject)ctx)->tag, ncols, nrows, nsends_mpi, nrecvs_mpi;
547:   MPIAIJ_MPIDense   *contents;
548:   Mat                workB;
549:   MPI_Datatype      *stype, *rtype;
550:   PetscInt           blda;

552:   PetscFunctionBegin;
553:   MatCheckProduct(C, 4);
554:   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
555:   PetscCall(PetscMPIIntCast(B->cmap->N, &ncols));
556:   PetscCall(PetscMPIIntCast(aij->B->cmap->n, &nrows));
557:   contents = (MPIAIJ_MPIDense *)C->product->data;
558:   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/));
559:   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/));
560:   PetscCall(PetscMPIIntCast(nsends, &nsends_mpi));
561:   PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi));
562:   if (Bbidx == 0) workB = *outworkB = contents->workB;
563:   else workB = *outworkB = contents->workB1;
564:   PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows);
565:   swaits = contents->swaits;
566:   rwaits = contents->rwaits;

568:   PetscCall(MatDenseGetArrayRead(B, &b));
569:   PetscCall(MatDenseGetLDA(B, &blda));
570:   PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda);
571:   PetscCall(MatDenseGetArray(workB, &rvalues));

573:   /* Post recv, use MPI derived data type to save memory */
574:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
575:   rtype = contents->rtype;
576:   for (PetscMPIInt i = 0; i < nrecvs; i++) PetscCallMPI(MPIU_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i));

578:   stype = contents->stype;
579:   for (PetscMPIInt i = 0; i < nsends; i++) PetscCallMPI(MPIU_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i));

581:   if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE));
582:   if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE));

584:   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL));
585:   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL));
586:   PetscCall(MatDenseRestoreArrayRead(B, &b));
587:   PetscCall(MatDenseRestoreArray(workB, &rvalues));
588:   PetscFunctionReturn(PETSC_SUCCESS);
589: }

591: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
592: {
593:   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ *)A->data;
594:   Mat_MPIDense    *bdense = (Mat_MPIDense *)B->data;
595:   Mat_MPIDense    *cdense = (Mat_MPIDense *)C->data;
596:   Mat              workB;
597:   MPIAIJ_MPIDense *contents;

599:   PetscFunctionBegin;
600:   MatCheckProduct(C, 3);
601:   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
602:   contents = (MPIAIJ_MPIDense *)C->product->data;
603:   /* diagonal block of A times all local rows of B, first make sure that everything is up-to-date */
604:   if (!cdense->A->product) {
605:     PetscCall(MatProductCreateWithMat(aij->A, bdense->A, NULL, cdense->A));
606:     PetscCall(MatProductSetType(cdense->A, MATPRODUCT_AB));
607:     PetscCall(MatProductSetFromOptions(cdense->A));
608:     PetscCall(MatProductSymbolic(cdense->A));
609:   } else PetscCall(MatProductReplaceMats(aij->A, bdense->A, NULL, cdense->A));
610:   if (PetscDefined(HAVE_CUPM) && !cdense->A->product->clear) {
611:     PetscBool flg;

613:     PetscCall(PetscObjectTypeCompare((PetscObject)C, MATMPIDENSE, &flg));
614:     if (flg) PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &flg));
615:     if (!flg) cdense->A->product->clear = PETSC_TRUE; /* if either A or C is a device Mat, make sure MatProductClear() is called */
616:   }
617:   PetscCall(MatProductNumeric(cdense->A));
618:   if (contents->workB->cmap->n == B->cmap->N) {
619:     /* get off processor parts of B needed to complete C=A*B */
620:     PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB));

622:     /* off-diagonal block of A times nonlocal rows of B */
623:     PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
624:   } else {
625:     Mat       Bb, Cb;
626:     PetscInt  BN = B->cmap->N, n = contents->workB->cmap->n;
627:     PetscBool ccpu;

629:     PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n);
630:     /* Prevent from unneeded copies back and forth from the GPU
631:        when getting and restoring the submatrix
632:        We need a proper GPU code for AIJ * dense in parallel */
633:     PetscCall(MatBoundToCPU(C, &ccpu));
634:     PetscCall(MatBindToCPU(C, PETSC_TRUE));
635:     for (PetscInt i = 0; i < BN; i += n) {
636:       PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb));
637:       PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb));

639:       /* get off processor parts of B needed to complete C=A*B */
640:       PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB));

642:       /* off-diagonal block of A times nonlocal rows of B */
643:       cdense = (Mat_MPIDense *)Cb->data;
644:       PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
645:       PetscCall(MatDenseRestoreSubMatrix(B, &Bb));
646:       PetscCall(MatDenseRestoreSubMatrix(C, &Cb));
647:     }
648:     PetscCall(MatBindToCPU(C, ccpu));
649:   }
650:   PetscFunctionReturn(PETSC_SUCCESS);
651: }

653: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
654: {
655:   Mat_MPIAIJ          *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
656:   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
657:   Mat_SeqAIJ          *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
658:   PetscInt            *adi = ad->i, *adj, *aoi = ao->i, *aoj;
659:   PetscScalar         *ada, *aoa, *cda = cd->a, *coa = co->a;
660:   Mat_SeqAIJ          *p_loc, *p_oth;
661:   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
662:   PetscScalar         *pa_loc, *pa_oth, *pa, valtmp, *ca;
663:   PetscInt             cm = C->rmap->n, anz, pnz;
664:   MatProductCtx_APMPI *ptap;
665:   PetscScalar         *apa_sparse;
666:   const PetscScalar   *dummy;
667:   PetscInt            *api, *apj, *apJ, i, j, k, row;
668:   PetscInt             cstart = C->cmap->rstart;
669:   PetscInt             cdnz, conz, k0, k1, nextp;
670:   MPI_Comm             comm;
671:   PetscMPIInt          size;

673:   PetscFunctionBegin;
674:   MatCheckProduct(C, 3);
675:   ptap = (MatProductCtx_APMPI *)C->product->data;
676:   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
677:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
678:   PetscCallMPI(MPI_Comm_size(comm, &size));
679:   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");

681:   /* flag CPU mask for C */
682: #if defined(PETSC_HAVE_DEVICE)
683:   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
684:   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
685:   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
686: #endif
687:   apa_sparse = ptap->apa;

689:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
690:   /* update numerical values of P_oth and P_loc */
691:   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
692:   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));

694:   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
695:   /* get data from symbolic products */
696:   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
697:   pi_loc = p_loc->i;
698:   pj_loc = p_loc->j;
699:   pa_loc = p_loc->a;
700:   if (size > 1) {
701:     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
702:     pi_oth = p_oth->i;
703:     pj_oth = p_oth->j;
704:     pa_oth = p_oth->a;
705:   } else {
706:     p_oth  = NULL;
707:     pi_oth = NULL;
708:     pj_oth = NULL;
709:     pa_oth = NULL;
710:   }

712:   /* trigger copy to CPU */
713:   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
714:   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
715:   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
716:   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
717:   api = ptap->api;
718:   apj = ptap->apj;
719:   for (i = 0; i < cm; i++) {
720:     apJ = apj + api[i];

722:     /* diagonal portion of A */
723:     anz = adi[i + 1] - adi[i];
724:     adj = ad->j + adi[i];
725:     ada = ad->a + adi[i];
726:     for (j = 0; j < anz; j++) {
727:       row = adj[j];
728:       pnz = pi_loc[row + 1] - pi_loc[row];
729:       pj  = pj_loc + pi_loc[row];
730:       pa  = pa_loc + pi_loc[row];
731:       /* perform sparse axpy */
732:       valtmp = ada[j];
733:       nextp  = 0;
734:       for (k = 0; nextp < pnz; k++) {
735:         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
736:           apa_sparse[k] += valtmp * pa[nextp++];
737:         }
738:       }
739:       PetscCall(PetscLogFlops(2.0 * pnz));
740:     }

742:     /* off-diagonal portion of A */
743:     anz = aoi[i + 1] - aoi[i];
744:     aoj = PetscSafePointerPlusOffset(ao->j, aoi[i]);
745:     aoa = PetscSafePointerPlusOffset(ao->a, aoi[i]);
746:     for (j = 0; j < anz; j++) {
747:       row = aoj[j];
748:       pnz = pi_oth[row + 1] - pi_oth[row];
749:       pj  = pj_oth + pi_oth[row];
750:       pa  = pa_oth + pi_oth[row];
751:       /* perform sparse axpy */
752:       valtmp = aoa[j];
753:       nextp  = 0;
754:       for (k = 0; nextp < pnz; k++) {
755:         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
756:           apa_sparse[k] += valtmp * pa[nextp++];
757:         }
758:       }
759:       PetscCall(PetscLogFlops(2.0 * pnz));
760:     }

762:     /* set values in C */
763:     cdnz = cd->i[i + 1] - cd->i[i];
764:     conz = co->i[i + 1] - co->i[i];

766:     /* 1st off-diagonal part of C */
767:     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
768:     k  = 0;
769:     for (k0 = 0; k0 < conz; k0++) {
770:       if (apJ[k] >= cstart) break;
771:       ca[k0]        = apa_sparse[k];
772:       apa_sparse[k] = 0.0;
773:       k++;
774:     }

776:     /* diagonal part of C */
777:     ca = cda + cd->i[i];
778:     for (k1 = 0; k1 < cdnz; k1++) {
779:       ca[k1]        = apa_sparse[k];
780:       apa_sparse[k] = 0.0;
781:       k++;
782:     }

784:     /* 2nd off-diagonal part of C */
785:     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
786:     for (; k0 < conz; k0++) {
787:       ca[k0]        = apa_sparse[k];
788:       apa_sparse[k] = 0.0;
789:       k++;
790:     }
791:   }
792:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
793:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
794:   PetscFunctionReturn(PETSC_SUCCESS);
795: }

797: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
798: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
799: {
800:   MPI_Comm             comm;
801:   PetscMPIInt          size;
802:   MatProductCtx_APMPI *ptap;
803:   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
804:   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
805:   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
806:   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
807:   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
808:   PetscInt             i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
809:   PetscInt             am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
810:   PetscReal            afill;
811:   MatType              mtype;

813:   PetscFunctionBegin;
814:   MatCheckProduct(C, 4);
815:   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
816:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
817:   PetscCallMPI(MPI_Comm_size(comm, &size));

819:   /* create struct MatProductCtx_APMPI and attached it to C later */
820:   PetscCall(PetscNew(&ptap));

822:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
823:   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));

825:   /* get P_loc by taking all local rows of P */
826:   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));

828:   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
829:   pi_loc = p_loc->i;
830:   pj_loc = p_loc->j;
831:   if (size > 1) {
832:     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
833:     pi_oth = p_oth->i;
834:     pj_oth = p_oth->j;
835:   } else {
836:     p_oth  = NULL;
837:     pi_oth = NULL;
838:     pj_oth = NULL;
839:   }

841:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
842:   PetscCall(PetscMalloc1(am + 1, &api));
843:   ptap->api = api;
844:   api[0]    = 0;

846:   PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk));

848:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
849:   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
850:   current_space = free_space;
851:   MatPreallocateBegin(comm, am, pn, dnz, onz);
852:   for (i = 0; i < am; i++) {
853:     /* diagonal portion of A */
854:     nzi = adi[i + 1] - adi[i];
855:     for (j = 0; j < nzi; j++) {
856:       row  = *adj++;
857:       pnz  = pi_loc[row + 1] - pi_loc[row];
858:       Jptr = pj_loc + pi_loc[row];
859:       /* Expand list if it is not long enough */
860:       if (pnz + apnz_max > lsize) {
861:         lsize = pnz + apnz_max;
862:         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
863:       }
864:       /* add non-zero cols of P into the sorted linked list lnk */
865:       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
866:       apnz       = *lnk; /* The first element in the list is the number of items in the list */
867:       api[i + 1] = api[i] + apnz;
868:       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
869:     }
870:     /* off-diagonal portion of A */
871:     nzi = aoi[i + 1] - aoi[i];
872:     for (j = 0; j < nzi; j++) {
873:       row  = *aoj++;
874:       pnz  = pi_oth[row + 1] - pi_oth[row];
875:       Jptr = pj_oth + pi_oth[row];
876:       /* Expand list if it is not long enough */
877:       if (pnz + apnz_max > lsize) {
878:         lsize = pnz + apnz_max;
879:         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
880:       }
881:       /* add non-zero cols of P into the sorted linked list lnk */
882:       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
883:       apnz       = *lnk; /* The first element in the list is the number of items in the list */
884:       api[i + 1] = api[i] + apnz;
885:       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
886:     }

888:     /* add missing diagonal entry */
889:     if (C->force_diagonals) {
890:       j = i + rstart; /* column index */
891:       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk));
892:     }

894:     apnz       = *lnk;
895:     api[i + 1] = api[i] + apnz;
896:     if (apnz > apnz_max) apnz_max = apnz;

898:     /* if free space is not available, double the total space in the list */
899:     if (current_space->local_remaining < apnz) {
900:       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
901:       nspacedouble++;
902:     }

904:     /* Copy data into free space, then initialize lnk */
905:     PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk));
906:     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));

908:     current_space->array += apnz;
909:     current_space->local_used += apnz;
910:     current_space->local_remaining -= apnz;
911:   }

913:   /* Allocate space for apj, initialize apj, and */
914:   /* destroy list of free space and other temporary array(s) */
915:   PetscCall(PetscMalloc1(api[am], &ptap->apj));
916:   apj = ptap->apj;
917:   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
918:   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));

920:   /* create and assemble symbolic parallel matrix C */
921:   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
922:   PetscCall(MatSetBlockSizesFromMats(C, A, P));
923:   PetscCall(MatGetType(A, &mtype));
924:   PetscCall(MatSetType(C, mtype));
925:   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
926:   MatPreallocateEnd(dnz, onz);

928:   /* malloc apa for assembly C */
929:   PetscCall(PetscCalloc1(apnz_max, &ptap->apa));

931:   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
932:   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
933:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
934:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
935:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));

937:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
938:   C->ops->productnumeric = MatProductNumeric_AB;

940:   /* attach the supporting struct to C for reuse */
941:   C->product->data    = ptap;
942:   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;

944:   /* set MatInfo */
945:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
946:   if (afill < 1.0) afill = 1.0;
947:   C->info.mallocs           = nspacedouble;
948:   C->info.fill_ratio_given  = fill;
949:   C->info.fill_ratio_needed = afill;

951: #if defined(PETSC_USE_INFO)
952:   if (api[am]) {
953:     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
954:     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
955:   } else {
956:     PetscCall(PetscInfo(C, "Empty matrix product\n"));
957:   }
958: #endif
959:   PetscFunctionReturn(PETSC_SUCCESS);
960: }

962: /* This function is needed for the seqMPI matrix-matrix multiplication.  */
963: /* Three input arrays are merged to one output array. The size of the    */
964: /* output array is also output. Duplicate entries only show up once.     */
965: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
966: {
967:   int i = 0, j = 0, k = 0, l = 0;

969:   /* Traverse all three arrays */
970:   while (i < size1 && j < size2 && k < size3) {
971:     if (in1[i] < in2[j] && in1[i] < in3[k]) {
972:       out[l++] = in1[i++];
973:     } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
974:       out[l++] = in2[j++];
975:     } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
976:       out[l++] = in3[k++];
977:     } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
978:       out[l++] = in1[i];
979:       i++, j++;
980:     } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
981:       out[l++] = in1[i];
982:       i++, k++;
983:     } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
984:       out[l++] = in2[j];
985:       k++, j++;
986:     } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
987:       out[l++] = in1[i];
988:       i++, j++, k++;
989:     }
990:   }

992:   /* Traverse two remaining arrays */
993:   while (i < size1 && j < size2) {
994:     if (in1[i] < in2[j]) {
995:       out[l++] = in1[i++];
996:     } else if (in1[i] > in2[j]) {
997:       out[l++] = in2[j++];
998:     } else {
999:       out[l++] = in1[i];
1000:       i++, j++;
1001:     }
1002:   }

1004:   while (i < size1 && k < size3) {
1005:     if (in1[i] < in3[k]) {
1006:       out[l++] = in1[i++];
1007:     } else if (in1[i] > in3[k]) {
1008:       out[l++] = in3[k++];
1009:     } else {
1010:       out[l++] = in1[i];
1011:       i++, k++;
1012:     }
1013:   }

1015:   while (k < size3 && j < size2) {
1016:     if (in3[k] < in2[j]) {
1017:       out[l++] = in3[k++];
1018:     } else if (in3[k] > in2[j]) {
1019:       out[l++] = in2[j++];
1020:     } else {
1021:       out[l++] = in3[k];
1022:       k++, j++;
1023:     }
1024:   }

1026:   /* Traverse one remaining array */
1027:   while (i < size1) out[l++] = in1[i++];
1028:   while (j < size2) out[l++] = in2[j++];
1029:   while (k < size3) out[l++] = in3[k++];

1031:   *size4 = l;
1032: }

1034: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1035: /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1036: /* matrix-matrix multiplications.  */
1037: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1038: {
1039:   MPI_Comm             comm;
1040:   PetscMPIInt          size;
1041:   MatProductCtx_APMPI *ptap;
1042:   PetscFreeSpaceList   free_space_diag = NULL, current_space = NULL;
1043:   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
1044:   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc;
1045:   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data;
1046:   Mat_SeqAIJ          *adpd_seq, *p_off, *aopoth_seq;
1047:   PetscInt             adponz, adpdnz;
1048:   PetscInt            *pi_loc, *dnz, *onz;
1049:   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1050:   PetscInt            *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1051:   PetscInt             am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1052:   PetscBT              lnkbt;
1053:   PetscReal            afill;
1054:   PetscMPIInt          rank;
1055:   Mat                  adpd, aopoth;
1056:   MatType              mtype;
1057:   const char          *prefix;

1059:   PetscFunctionBegin;
1060:   MatCheckProduct(C, 4);
1061:   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
1062:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1063:   PetscCallMPI(MPI_Comm_size(comm, &size));
1064:   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1065:   PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));

1067:   /* create struct MatProductCtx_APMPI and attached it to C later */
1068:   PetscCall(PetscNew(&ptap));

1070:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1071:   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));

1073:   /* get P_loc by taking all local rows of P */
1074:   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));

1076:   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
1077:   pi_loc = p_loc->i;

1079:   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1080:   PetscCall(PetscMalloc1(am + 1, &api));
1081:   PetscCall(PetscMalloc1(am + 1, &adpoi));

1083:   adpoi[0]  = 0;
1084:   ptap->api = api;
1085:   api[0]    = 0;

1087:   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1088:   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
1089:   MatPreallocateBegin(comm, am, pn, dnz, onz);

1091:   /* Symbolic calc of A_loc_diag * P_loc_diag */
1092:   PetscCall(MatGetOptionsPrefix(A, &prefix));
1093:   PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd));
1094:   PetscCall(MatGetOptionsPrefix(A, &prefix));
1095:   PetscCall(MatSetOptionsPrefix(adpd, prefix));
1096:   PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_"));

1098:   PetscCall(MatProductSetType(adpd, MATPRODUCT_AB));
1099:   PetscCall(MatProductSetAlgorithm(adpd, "sorted"));
1100:   PetscCall(MatProductSetFill(adpd, fill));
1101:   PetscCall(MatProductSetFromOptions(adpd));

1103:   adpd->force_diagonals = C->force_diagonals;
1104:   PetscCall(MatProductSymbolic(adpd));

1106:   adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1107:   adpdi    = adpd_seq->i;
1108:   adpdj    = adpd_seq->j;
1109:   p_off    = (Mat_SeqAIJ *)p->B->data;
1110:   poff_i   = p_off->i;
1111:   poff_j   = p_off->j;

1113:   /* j_temp stores indices of a result row before they are added to the linked list */
1114:   PetscCall(PetscMalloc1(pN, &j_temp));

1116:   /* Symbolic calc of the A_diag * p_loc_off */
1117:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1118:   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag));
1119:   current_space = free_space_diag;

1121:   for (i = 0; i < am; i++) {
1122:     /* A_diag * P_loc_off */
1123:     nzi = adi[i + 1] - adi[i];
1124:     for (j = 0; j < nzi; j++) {
1125:       row  = *adj++;
1126:       pnz  = poff_i[row + 1] - poff_i[row];
1127:       Jptr = poff_j + poff_i[row];
1128:       for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1129:       /* add non-zero cols of P into the sorted linked list lnk */
1130:       PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt));
1131:     }

1133:     adponz       = lnk[0];
1134:     adpoi[i + 1] = adpoi[i] + adponz;

1136:     /* if free space is not available, double the total space in the list */
1137:     if (current_space->local_remaining < adponz) {
1138:       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), &current_space));
1139:       nspacedouble++;
1140:     }

1142:     /* Copy data into free space, then initialize lnk */
1143:     PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt));

1145:     current_space->array += adponz;
1146:     current_space->local_used += adponz;
1147:     current_space->local_remaining -= adponz;
1148:   }

1150:   /* Symbolic calc of A_off * P_oth */
1151:   PetscCall(MatSetOptionsPrefix(a->B, prefix));
1152:   PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_"));
1153:   PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth));
1154:   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1155:   aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1156:   aopothi    = aopoth_seq->i;
1157:   aopothj    = aopoth_seq->j;

1159:   /* Allocate space for apj, adpj, aopj, ... */
1160:   /* destroy lists of free space and other temporary array(s) */

1162:   PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am], &ptap->apj));
1163:   PetscCall(PetscMalloc1(adpoi[am], &adpoj));

1165:   /* Copy from linked list to j-array */
1166:   PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj));
1167:   PetscCall(PetscLLDestroy(lnk, lnkbt));

1169:   adpoJ = adpoj;
1170:   adpdJ = adpdj;
1171:   aopJ  = aopothj;
1172:   apj   = ptap->apj;
1173:   apJ   = apj; /* still empty */

1175:   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1176:   /* A_diag * P_loc_diag to get A*P */
1177:   for (i = 0; i < am; i++) {
1178:     aopnz  = aopothi[i + 1] - aopothi[i];
1179:     adponz = adpoi[i + 1] - adpoi[i];
1180:     adpdnz = adpdi[i + 1] - adpdi[i];

1182:     /* Correct indices from A_diag*P_diag */
1183:     for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1184:     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1185:     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1186:     PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz));

1188:     aopJ += aopnz;
1189:     adpoJ += adponz;
1190:     adpdJ += adpdnz;
1191:     apJ += apnz;
1192:     api[i + 1] = api[i] + apnz;
1193:   }

1195:   /* malloc apa to store dense row A[i,:]*P */
1196:   PetscCall(PetscCalloc1(pN, &ptap->apa));

1198:   /* create and assemble symbolic parallel matrix C */
1199:   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
1200:   PetscCall(MatSetBlockSizesFromMats(C, A, P));
1201:   PetscCall(MatGetType(A, &mtype));
1202:   PetscCall(MatSetType(C, mtype));
1203:   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1204:   MatPreallocateEnd(dnz, onz);

1206:   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1207:   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
1208:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1209:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1210:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));

1212:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1213:   C->ops->productnumeric = MatProductNumeric_AB;

1215:   /* attach the supporting struct to C for reuse */
1216:   C->product->data    = ptap;
1217:   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;

1219:   /* set MatInfo */
1220:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1221:   if (afill < 1.0) afill = 1.0;
1222:   C->info.mallocs           = nspacedouble;
1223:   C->info.fill_ratio_given  = fill;
1224:   C->info.fill_ratio_needed = afill;

1226: #if defined(PETSC_USE_INFO)
1227:   if (api[am]) {
1228:     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
1229:     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
1230:   } else {
1231:     PetscCall(PetscInfo(C, "Empty matrix product\n"));
1232:   }
1233: #endif

1235:   PetscCall(MatDestroy(&aopoth));
1236:   PetscCall(MatDestroy(&adpd));
1237:   PetscCall(PetscFree(j_temp));
1238:   PetscCall(PetscFree(adpoj));
1239:   PetscCall(PetscFree(adpoi));
1240:   PetscFunctionReturn(PETSC_SUCCESS);
1241: }

1243: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1244: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1245: {
1246:   MatProductCtx_APMPI *ptap;
1247:   Mat                  Pt;

1249:   PetscFunctionBegin;
1250:   MatCheckProduct(C, 3);
1251:   ptap = (MatProductCtx_APMPI *)C->product->data;
1252:   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1253:   PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");

1255:   Pt = ptap->Pt;
1256:   PetscCall(MatTransposeSetPrecursor(P, Pt));
1257:   PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt));
1258:   PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C));
1259:   PetscFunctionReturn(PETSC_SUCCESS);
1260: }

1262: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1263: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1264: {
1265:   MatProductCtx_APMPI     *ptap;
1266:   Mat_MPIAIJ              *p = (Mat_MPIAIJ *)P->data;
1267:   MPI_Comm                 comm;
1268:   PetscMPIInt              size, rank;
1269:   PetscFreeSpaceList       free_space = NULL, current_space = NULL;
1270:   PetscInt                 pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1271:   PetscInt                *lnk, i, k, rstart;
1272:   PetscBT                  lnkbt;
1273:   PetscMPIInt              tagi, tagj, *len_si, *len_s, *len_ri, nrecv, proc, nsend;
1274:   PETSC_UNUSED PetscMPIInt icompleted = 0;
1275:   PetscInt               **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1276:   PetscInt                 len, *dnz, *onz, *owners, nzi;
1277:   PetscInt                 nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1278:   MPI_Request             *swaits, *rwaits;
1279:   MPI_Status              *sstatus, rstatus;
1280:   PetscLayout              rowmap;
1281:   PetscInt                *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1282:   PetscMPIInt             *len_r, *id_r;          /* array of length of comm->size, store send/recv matrix values */
1283:   PetscInt                *Jptr, *prmap = p->garray, con, j, Crmax;
1284:   Mat_SeqAIJ              *a_loc, *c_loc, *c_oth;
1285:   PetscHMapI               ta;
1286:   MatType                  mtype;
1287:   const char              *prefix;

1289:   PetscFunctionBegin;
1290:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1291:   PetscCallMPI(MPI_Comm_size(comm, &size));
1292:   PetscCallMPI(MPI_Comm_rank(comm, &rank));

1294:   /* create symbolic parallel matrix C */
1295:   PetscCall(MatGetType(A, &mtype));
1296:   PetscCall(MatSetType(C, mtype));

1298:   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;

1300:   /* create struct MatProductCtx_APMPI and attached it to C later */
1301:   PetscCall(PetscNew(&ptap));

1303:   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1304:   PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd));
1305:   PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro));

1307:   /* (1) compute symbolic A_loc */
1308:   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc));

1310:   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1311:   PetscCall(MatGetOptionsPrefix(A, &prefix));
1312:   PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix));
1313:   PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_"));
1314:   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth));
1315:   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth));

1317:   /* (3) send coj of C_oth to other processors  */
1318:   /* determine row ownership */
1319:   PetscCall(PetscLayoutCreate(comm, &rowmap));
1320:   rowmap->n  = pn;
1321:   rowmap->bs = 1;
1322:   PetscCall(PetscLayoutSetUp(rowmap));
1323:   owners = rowmap->range;

1325:   /* determine the number of messages to send, their lengths */
1326:   PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 1, &owners_co));
1327:   PetscCall(PetscArrayzero(len_s, size));
1328:   PetscCall(PetscArrayzero(len_si, size));

1330:   c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1331:   coi   = c_oth->i;
1332:   coj   = c_oth->j;
1333:   con   = ptap->C_oth->rmap->n;
1334:   proc  = 0;
1335:   for (i = 0; i < con; i++) {
1336:     while (prmap[i] >= owners[proc + 1]) proc++;
1337:     len_si[proc]++;                     /* num of rows in Co(=Pt*A) to be sent to [proc] */
1338:     len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1339:   }

1341:   len          = 0; /* max length of buf_si[], see (4) */
1342:   owners_co[0] = 0;
1343:   nsend        = 0;
1344:   for (proc = 0; proc < size; proc++) {
1345:     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1346:     if (len_s[proc]) {
1347:       nsend++;
1348:       len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1349:       len += len_si[proc];
1350:     }
1351:   }

1353:   /* determine the number and length of messages to receive for coi and coj  */
1354:   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv));
1355:   PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri));

1357:   /* post the Irecv and Isend of coj */
1358:   PetscCall(PetscCommGetNewTag(comm, &tagj));
1359:   PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits));
1360:   PetscCall(PetscMalloc1(nsend, &swaits));
1361:   for (proc = 0, k = 0; proc < size; proc++) {
1362:     if (!len_s[proc]) continue;
1363:     i = owners_co[proc];
1364:     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1365:     k++;
1366:   }

1368:   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1369:   PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix));
1370:   PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_"));
1371:   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc));
1372:   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc));
1373:   c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;

1375:   /* receives coj are complete */
1376:   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1377:   PetscCall(PetscFree(rwaits));
1378:   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));

1380:   /* add received column indices into ta to update Crmax */
1381:   a_loc = (Mat_SeqAIJ *)ptap->A_loc->data;

1383:   /* create and initialize a linked list */
1384:   PetscCall(PetscHMapICreateWithSize(an, &ta)); /* for compute Crmax */
1385:   MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);

1387:   for (k = 0; k < nrecv; k++) { /* k-th received message */
1388:     Jptr = buf_rj[k];
1389:     for (j = 0; j < len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1390:   }
1391:   PetscCall(PetscHMapIGetSize(ta, &Crmax));
1392:   PetscCall(PetscHMapIDestroy(&ta));

1394:   /* (4) send and recv coi */
1395:   PetscCall(PetscCommGetNewTag(comm, &tagi));
1396:   PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits));
1397:   PetscCall(PetscMalloc1(len, &buf_s));
1398:   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1399:   for (proc = 0, k = 0; proc < size; proc++) {
1400:     if (!len_s[proc]) continue;
1401:     /* form outgoing message for i-structure:
1402:          buf_si[0]:                 nrows to be sent
1403:                [1:nrows]:           row index (global)
1404:                [nrows+1:2*nrows+1]: i-structure index
1405:     */
1406:     nrows       = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1407:     buf_si_i    = buf_si + nrows + 1;
1408:     buf_si[0]   = nrows;
1409:     buf_si_i[0] = 0;
1410:     nrows       = 0;
1411:     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1412:       nzi                 = coi[i + 1] - coi[i];
1413:       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1414:       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1415:       nrows++;
1416:     }
1417:     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1418:     k++;
1419:     buf_si += len_si[proc];
1420:   }
1421:   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1422:   PetscCall(PetscFree(rwaits));
1423:   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));

1425:   PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co));
1426:   PetscCall(PetscFree(len_ri));
1427:   PetscCall(PetscFree(swaits));
1428:   PetscCall(PetscFree(buf_s));

1430:   /* (5) compute the local portion of C      */
1431:   /* set initial free space to be Crmax, sufficient for holding nonzeros in each row of C */
1432:   PetscCall(PetscFreeSpaceGet(Crmax, &free_space));
1433:   current_space = free_space;

1435:   PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci));
1436:   for (k = 0; k < nrecv; k++) {
1437:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1438:     nrows       = *buf_ri_k[k];
1439:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1440:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1441:   }

1443:   MatPreallocateBegin(comm, pn, an, dnz, onz);
1444:   PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt));
1445:   for (i = 0; i < pn; i++) { /* for each local row of C */
1446:     /* add C_loc into C */
1447:     nzi  = c_loc->i[i + 1] - c_loc->i[i];
1448:     Jptr = c_loc->j + c_loc->i[i];
1449:     PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));

1451:     /* add received col data into lnk */
1452:     for (k = 0; k < nrecv; k++) { /* k-th received message */
1453:       if (i == *nextrow[k]) {     /* i-th row */
1454:         nzi  = *(nextci[k] + 1) - *nextci[k];
1455:         Jptr = buf_rj[k] + *nextci[k];
1456:         PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1457:         nextrow[k]++;
1458:         nextci[k]++;
1459:       }
1460:     }

1462:     /* add missing diagonal entry */
1463:     if (C->force_diagonals) {
1464:       k = i + owners[rank]; /* column index */
1465:       PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt));
1466:     }

1468:     nzi = lnk[0];

1470:     /* copy data into free space, then initialize lnk */
1471:     PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt));
1472:     PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz));
1473:   }
1474:   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1475:   PetscCall(PetscLLDestroy(lnk, lnkbt));
1476:   PetscCall(PetscFreeSpaceDestroy(free_space));

1478:   /* local sizes and preallocation */
1479:   PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE));
1480:   PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs));
1481:   PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs));
1482:   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1483:   MatPreallocateEnd(dnz, onz);

1485:   /* add C_loc and C_oth to C */
1486:   PetscCall(MatGetOwnershipRange(C, &rstart, NULL));
1487:   for (i = 0; i < pn; i++) {
1488:     ncols = c_loc->i[i + 1] - c_loc->i[i];
1489:     cols  = c_loc->j + c_loc->i[i];
1490:     row   = rstart + i;
1491:     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));

1493:     if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES));
1494:   }
1495:   for (i = 0; i < con; i++) {
1496:     ncols = c_oth->i[i + 1] - c_oth->i[i];
1497:     cols  = c_oth->j + c_oth->i[i];
1498:     row   = prmap[i];
1499:     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1500:   }
1501:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1502:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1503:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));

1505:   /* members in merge */
1506:   PetscCall(PetscFree(id_r));
1507:   PetscCall(PetscFree(len_r));
1508:   PetscCall(PetscFree(buf_ri[0]));
1509:   PetscCall(PetscFree(buf_ri));
1510:   PetscCall(PetscFree(buf_rj[0]));
1511:   PetscCall(PetscFree(buf_rj));
1512:   PetscCall(PetscLayoutDestroy(&rowmap));

1514:   /* attach the supporting struct to C for reuse */
1515:   C->product->data    = ptap;
1516:   C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
1517:   PetscFunctionReturn(PETSC_SUCCESS);
1518: }

1520: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1521: {
1522:   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data;
1523:   Mat_SeqAIJ          *c_seq;
1524:   MatProductCtx_APMPI *ptap;
1525:   Mat                  A_loc, C_loc, C_oth;
1526:   PetscInt             i, rstart, rend, cm, ncols, row;
1527:   const PetscInt      *cols;
1528:   const PetscScalar   *vals;

1530:   PetscFunctionBegin;
1531:   MatCheckProduct(C, 3);
1532:   ptap = (MatProductCtx_APMPI *)C->product->data;
1533:   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1534:   PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1535:   PetscCall(MatZeroEntries(C));

1537:   /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1538:   /* 1) get R = Pd^T, Ro = Po^T */
1539:   PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd));
1540:   PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd));
1541:   PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro));
1542:   PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro));

1544:   /* 2) compute numeric A_loc */
1545:   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc));

1547:   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1548:   A_loc = ptap->A_loc;
1549:   PetscCall(ptap->C_loc->ops->matmultnumeric(ptap->Rd, A_loc, ptap->C_loc));
1550:   PetscCall(ptap->C_oth->ops->matmultnumeric(ptap->Ro, A_loc, ptap->C_oth));
1551:   C_loc = ptap->C_loc;
1552:   C_oth = ptap->C_oth;

1554:   /* add C_loc and C_oth to C */
1555:   PetscCall(MatGetOwnershipRange(C, &rstart, &rend));

1557:   /* C_loc -> C */
1558:   cm    = C_loc->rmap->N;
1559:   c_seq = (Mat_SeqAIJ *)C_loc->data;
1560:   cols  = c_seq->j;
1561:   vals  = c_seq->a;
1562:   for (i = 0; i < cm; i++) {
1563:     ncols = c_seq->i[i + 1] - c_seq->i[i];
1564:     row   = rstart + i;
1565:     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1566:     cols += ncols;
1567:     vals += ncols;
1568:   }

1570:   /* Co -> C, off-processor part */
1571:   cm    = C_oth->rmap->N;
1572:   c_seq = (Mat_SeqAIJ *)C_oth->data;
1573:   cols  = c_seq->j;
1574:   vals  = c_seq->a;
1575:   for (i = 0; i < cm; i++) {
1576:     ncols = c_seq->i[i + 1] - c_seq->i[i];
1577:     row   = p->garray[i];
1578:     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1579:     cols += ncols;
1580:     vals += ncols;
1581:   }
1582:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1583:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1584:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1585:   PetscFunctionReturn(PETSC_SUCCESS);
1586: }

1588: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1589: {
1590:   MatMergeSeqsToMPI   *merge;
1591:   Mat_MPIAIJ          *p  = (Mat_MPIAIJ *)P->data;
1592:   Mat_SeqAIJ          *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data;
1593:   MatProductCtx_APMPI *ap;
1594:   PetscInt            *adj;
1595:   PetscInt             i, j, k, anz, pnz, row, *cj, nexta;
1596:   MatScalar           *ada, *ca, valtmp;
1597:   PetscInt             am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1598:   MPI_Comm             comm;
1599:   PetscMPIInt          size, rank, taga, *len_s, proc;
1600:   PetscInt            *owners, nrows, **buf_ri_k, **nextrow, **nextci;
1601:   PetscInt           **buf_ri, **buf_rj;
1602:   PetscInt             cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1603:   MPI_Request         *s_waits, *r_waits;
1604:   MPI_Status          *status;
1605:   MatScalar          **abuf_r, *ba_i, *pA, *coa, *ba;
1606:   const PetscScalar   *dummy;
1607:   PetscInt            *ai, *aj, *coi, *coj, *poJ, *pdJ;
1608:   Mat                  A_loc;
1609:   Mat_SeqAIJ          *a_loc;

1611:   PetscFunctionBegin;
1612:   MatCheckProduct(C, 3);
1613:   ap = (MatProductCtx_APMPI *)C->product->data;
1614:   PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data");
1615:   PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1616:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
1617:   PetscCallMPI(MPI_Comm_size(comm, &size));
1618:   PetscCallMPI(MPI_Comm_rank(comm, &rank));

1620:   merge = ap->merge;

1622:   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1623:   /* get data from symbolic products */
1624:   coi = merge->coi;
1625:   coj = merge->coj;
1626:   PetscCall(PetscCalloc1(coi[pon], &coa));
1627:   bi     = merge->bi;
1628:   bj     = merge->bj;
1629:   owners = merge->rowmap->range;
1630:   PetscCall(PetscCalloc1(bi[cm], &ba));

1632:   /* get A_loc by taking all local rows of A */
1633:   A_loc = ap->A_loc;
1634:   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc));
1635:   a_loc = (Mat_SeqAIJ *)A_loc->data;
1636:   ai    = a_loc->i;
1637:   aj    = a_loc->j;

1639:   /* trigger copy to CPU */
1640:   PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy));
1641:   PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy));
1642:   PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy));
1643:   PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy));
1644:   for (i = 0; i < am; i++) {
1645:     anz = ai[i + 1] - ai[i];
1646:     adj = aj + ai[i];
1647:     ada = a_loc->a + ai[i];

1649:     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1650:     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1651:     pnz = po->i[i + 1] - po->i[i];
1652:     poJ = po->j + po->i[i];
1653:     pA  = po->a + po->i[i];
1654:     for (j = 0; j < pnz; j++) {
1655:       row = poJ[j];
1656:       cj  = coj + coi[row];
1657:       ca  = coa + coi[row];
1658:       /* perform sparse axpy */
1659:       nexta  = 0;
1660:       valtmp = pA[j];
1661:       for (k = 0; nexta < anz; k++) {
1662:         if (cj[k] == adj[nexta]) {
1663:           ca[k] += valtmp * ada[nexta];
1664:           nexta++;
1665:         }
1666:       }
1667:       PetscCall(PetscLogFlops(2.0 * anz));
1668:     }

1670:     /* put the value into Cd (diagonal part) */
1671:     pnz = pd->i[i + 1] - pd->i[i];
1672:     pdJ = pd->j + pd->i[i];
1673:     pA  = pd->a + pd->i[i];
1674:     for (j = 0; j < pnz; j++) {
1675:       row = pdJ[j];
1676:       cj  = bj + bi[row];
1677:       ca  = ba + bi[row];
1678:       /* perform sparse axpy */
1679:       nexta  = 0;
1680:       valtmp = pA[j];
1681:       for (k = 0; nexta < anz; k++) {
1682:         if (cj[k] == adj[nexta]) {
1683:           ca[k] += valtmp * ada[nexta];
1684:           nexta++;
1685:         }
1686:       }
1687:       PetscCall(PetscLogFlops(2.0 * anz));
1688:     }
1689:   }

1691:   /* 3) send and recv matrix values coa */
1692:   buf_ri = merge->buf_ri;
1693:   buf_rj = merge->buf_rj;
1694:   len_s  = merge->len_s;
1695:   PetscCall(PetscCommGetNewTag(comm, &taga));
1696:   PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));

1698:   PetscCall(PetscMalloc2(merge->nsend, &s_waits, size, &status));
1699:   for (proc = 0, k = 0; proc < size; proc++) {
1700:     if (!len_s[proc]) continue;
1701:     i = merge->owners_co[proc];
1702:     PetscCallMPI(MPIU_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
1703:     k++;
1704:   }
1705:   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
1706:   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));

1708:   PetscCall(PetscFree2(s_waits, status));
1709:   PetscCall(PetscFree(r_waits));
1710:   PetscCall(PetscFree(coa));

1712:   /* 4) insert local Cseq and received values into Cmpi */
1713:   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1714:   for (k = 0; k < merge->nrecv; k++) {
1715:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1716:     nrows       = *buf_ri_k[k];
1717:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1718:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1719:   }

1721:   for (i = 0; i < cm; i++) {
1722:     row  = owners[rank] + i; /* global row index of C_seq */
1723:     bj_i = bj + bi[i];       /* col indices of the i-th row of C */
1724:     ba_i = ba + bi[i];
1725:     bnz  = bi[i + 1] - bi[i];
1726:     /* add received vals into ba */
1727:     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1728:       /* i-th row */
1729:       if (i == *nextrow[k]) {
1730:         cnz    = *(nextci[k] + 1) - *nextci[k];
1731:         cj     = buf_rj[k] + *nextci[k];
1732:         ca     = abuf_r[k] + *nextci[k];
1733:         nextcj = 0;
1734:         for (j = 0; nextcj < cnz; j++) {
1735:           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1736:             ba_i[j] += ca[nextcj++];
1737:           }
1738:         }
1739:         nextrow[k]++;
1740:         nextci[k]++;
1741:         PetscCall(PetscLogFlops(2.0 * cnz));
1742:       }
1743:     }
1744:     PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES));
1745:   }
1746:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1747:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));

1749:   PetscCall(PetscFree(ba));
1750:   PetscCall(PetscFree(abuf_r[0]));
1751:   PetscCall(PetscFree(abuf_r));
1752:   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1753:   PetscFunctionReturn(PETSC_SUCCESS);
1754: }

1756: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1757: {
1758:   Mat                  A_loc;
1759:   MatProductCtx_APMPI *ap;
1760:   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
1761:   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1762:   PetscInt            *pdti, *pdtj, *poti, *potj, *ptJ;
1763:   PetscInt             nnz;
1764:   PetscInt            *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1765:   PetscInt             am = A->rmap->n, pn = P->cmap->n;
1766:   MPI_Comm             comm;
1767:   PetscMPIInt          size, rank, tagi, tagj, *len_si, *len_s, *len_ri, proc;
1768:   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
1769:   PetscInt             len, *dnz, *onz, *owners;
1770:   PetscInt             nzi, *bi, *bj;
1771:   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1772:   MPI_Request         *swaits, *rwaits;
1773:   MPI_Status          *sstatus, rstatus;
1774:   MatMergeSeqsToMPI   *merge;
1775:   PetscInt            *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1776:   PetscReal            afill  = 1.0, afill_tmp;
1777:   PetscInt             rstart = P->cmap->rstart, rmax, Armax;
1778:   Mat_SeqAIJ          *a_loc;
1779:   PetscHMapI           ta;
1780:   MatType              mtype;

1782:   PetscFunctionBegin;
1783:   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1784:   /* check if matrix local sizes are compatible */
1785:   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart,
1786:              A->rmap->rend, P->rmap->rstart, P->rmap->rend);

1788:   PetscCallMPI(MPI_Comm_size(comm, &size));
1789:   PetscCallMPI(MPI_Comm_rank(comm, &rank));

1791:   /* create struct MatProductCtx_APMPI and attached it to C later */
1792:   PetscCall(PetscNew(&ap));

1794:   /* get A_loc by taking all local rows of A */
1795:   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc));

1797:   ap->A_loc = A_loc;
1798:   a_loc     = (Mat_SeqAIJ *)A_loc->data;
1799:   ai        = a_loc->i;
1800:   aj        = a_loc->j;

1802:   /* determine symbolic Co=(p->B)^T*A - send to others */
1803:   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
1804:   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
1805:   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1806:                          >= (num of nonzero rows of C_seq) - pn */
1807:   PetscCall(PetscMalloc1(pon + 1, &coi));
1808:   coi[0] = 0;

1810:   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1811:   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1812:   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1813:   current_space = free_space;

1815:   /* create and initialize a linked list */
1816:   PetscCall(PetscHMapICreateWithSize(A->cmap->n + a->B->cmap->N, &ta));
1817:   MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1818:   PetscCall(PetscHMapIGetSize(ta, &Armax));

1820:   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));

1822:   for (i = 0; i < pon; i++) {
1823:     pnz = poti[i + 1] - poti[i];
1824:     ptJ = potj + poti[i];
1825:     for (j = 0; j < pnz; j++) {
1826:       row  = ptJ[j]; /* row of A_loc == col of Pot */
1827:       anz  = ai[row + 1] - ai[row];
1828:       Jptr = aj + ai[row];
1829:       /* add non-zero cols of AP into the sorted linked list lnk */
1830:       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1831:     }
1832:     nnz = lnk[0];

1834:     /* If free space is not available, double the total space in the list */
1835:     if (current_space->local_remaining < nnz) {
1836:       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
1837:       nspacedouble++;
1838:     }

1840:     /* Copy data into free space, and zero out denserows */
1841:     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));

1843:     current_space->array += nnz;
1844:     current_space->local_used += nnz;
1845:     current_space->local_remaining -= nnz;

1847:     coi[i + 1] = coi[i] + nnz;
1848:   }

1850:   PetscCall(PetscMalloc1(coi[pon], &coj));
1851:   PetscCall(PetscFreeSpaceContiguous(&free_space, coj));
1852:   PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */

1854:   afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1855:   if (afill_tmp > afill) afill = afill_tmp;

1857:   /* send j-array (coj) of Co to other processors */
1858:   /* determine row ownership */
1859:   PetscCall(PetscNew(&merge));
1860:   PetscCall(PetscLayoutCreate(comm, &merge->rowmap));

1862:   merge->rowmap->n  = pn;
1863:   merge->rowmap->bs = 1;

1865:   PetscCall(PetscLayoutSetUp(merge->rowmap));
1866:   owners = merge->rowmap->range;

1868:   /* determine the number of messages to send, their lengths */
1869:   PetscCall(PetscCalloc1(size, &len_si));
1870:   PetscCall(PetscCalloc1(size, &merge->len_s));

1872:   len_s        = merge->len_s;
1873:   merge->nsend = 0;

1875:   PetscCall(PetscMalloc1(size + 1, &owners_co));

1877:   proc = 0;
1878:   for (i = 0; i < pon; i++) {
1879:     while (prmap[i] >= owners[proc + 1]) proc++;
1880:     len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1881:     len_s[proc] += coi[i + 1] - coi[i];
1882:   }

1884:   len          = 0; /* max length of buf_si[] */
1885:   owners_co[0] = 0;
1886:   for (proc = 0; proc < size; proc++) {
1887:     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1888:     if (len_s[proc]) {
1889:       merge->nsend++;
1890:       len_si[proc] = 2 * (len_si[proc] + 1);
1891:       len += len_si[proc];
1892:     }
1893:   }

1895:   /* determine the number and length of messages to receive for coi and coj  */
1896:   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
1897:   PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));

1899:   /* post the Irecv and Isend of coj */
1900:   PetscCall(PetscCommGetNewTag(comm, &tagj));
1901:   PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits));
1902:   PetscCall(PetscMalloc1(merge->nsend, &swaits));
1903:   for (proc = 0, k = 0; proc < size; proc++) {
1904:     if (!len_s[proc]) continue;
1905:     i = owners_co[proc];
1906:     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1907:     k++;
1908:   }

1910:   /* receives and sends of coj are complete */
1911:   PetscCall(PetscMalloc1(size, &sstatus));
1912:   for (i = 0; i < merge->nrecv; i++) {
1913:     PETSC_UNUSED PetscMPIInt icompleted;
1914:     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1915:   }
1916:   PetscCall(PetscFree(rwaits));
1917:   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));

1919:   /* add received column indices into table to update Armax */
1920:   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1921:   for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1922:     Jptr = buf_rj[k];
1923:     for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1924:   }
1925:   PetscCall(PetscHMapIGetSize(ta, &Armax));

1927:   /* send and recv coi */
1928:   PetscCall(PetscCommGetNewTag(comm, &tagi));
1929:   PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits));
1930:   PetscCall(PetscMalloc1(len, &buf_s));
1931:   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1932:   for (proc = 0, k = 0; proc < size; proc++) {
1933:     if (!len_s[proc]) continue;
1934:     /* form outgoing message for i-structure:
1935:          buf_si[0]:                 nrows to be sent
1936:                [1:nrows]:           row index (global)
1937:                [nrows+1:2*nrows+1]: i-structure index
1938:     */
1939:     nrows       = len_si[proc] / 2 - 1;
1940:     buf_si_i    = buf_si + nrows + 1;
1941:     buf_si[0]   = nrows;
1942:     buf_si_i[0] = 0;
1943:     nrows       = 0;
1944:     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1945:       nzi                 = coi[i + 1] - coi[i];
1946:       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1947:       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1948:       nrows++;
1949:     }
1950:     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1951:     k++;
1952:     buf_si += len_si[proc];
1953:   }
1954:   i = merge->nrecv;
1955:   while (i--) {
1956:     PETSC_UNUSED PetscMPIInt icompleted;
1957:     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1958:   }
1959:   PetscCall(PetscFree(rwaits));
1960:   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1961:   PetscCall(PetscFree(len_si));
1962:   PetscCall(PetscFree(len_ri));
1963:   PetscCall(PetscFree(swaits));
1964:   PetscCall(PetscFree(sstatus));
1965:   PetscCall(PetscFree(buf_s));

1967:   /* compute the local portion of C (mpi mat) */
1968:   /* allocate bi array and free space for accumulating nonzero column info */
1969:   PetscCall(PetscMalloc1(pn + 1, &bi));
1970:   bi[0] = 0;

1972:   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1973:   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1974:   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1975:   current_space = free_space;

1977:   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1978:   for (k = 0; k < merge->nrecv; k++) {
1979:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1980:     nrows       = *buf_ri_k[k];
1981:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1982:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1983:   }

1985:   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1986:   MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1987:   rmax = 0;
1988:   for (i = 0; i < pn; i++) {
1989:     /* add pdt[i,:]*AP into lnk */
1990:     pnz = pdti[i + 1] - pdti[i];
1991:     ptJ = pdtj + pdti[i];
1992:     for (j = 0; j < pnz; j++) {
1993:       row  = ptJ[j]; /* row of AP == col of Pt */
1994:       anz  = ai[row + 1] - ai[row];
1995:       Jptr = aj + ai[row];
1996:       /* add non-zero cols of AP into the sorted linked list lnk */
1997:       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1998:     }

2000:     /* add received col data into lnk */
2001:     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
2002:       if (i == *nextrow[k]) {            /* i-th row */
2003:         nzi  = *(nextci[k] + 1) - *nextci[k];
2004:         Jptr = buf_rj[k] + *nextci[k];
2005:         PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk));
2006:         nextrow[k]++;
2007:         nextci[k]++;
2008:       }
2009:     }

2011:     /* add missing diagonal entry */
2012:     if (C->force_diagonals) {
2013:       k = i + owners[rank]; /* column index */
2014:       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk));
2015:     }

2017:     nnz = lnk[0];

2019:     /* if free space is not available, make more free space */
2020:     if (current_space->local_remaining < nnz) {
2021:       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
2022:       nspacedouble++;
2023:     }
2024:     /* copy data into free space, then initialize lnk */
2025:     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
2026:     PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz));

2028:     current_space->array += nnz;
2029:     current_space->local_used += nnz;
2030:     current_space->local_remaining -= nnz;

2032:     bi[i + 1] = bi[i] + nnz;
2033:     if (nnz > rmax) rmax = nnz;
2034:   }
2035:   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));

2037:   PetscCall(PetscMalloc1(bi[pn], &bj));
2038:   PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
2039:   afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2040:   if (afill_tmp > afill) afill = afill_tmp;
2041:   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2042:   PetscCall(PetscHMapIDestroy(&ta));
2043:   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
2044:   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));

2046:   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2047:   PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE));
2048:   PetscCall(MatSetBlockSizes(C, P->cmap->bs, A->cmap->bs));
2049:   PetscCall(MatGetType(A, &mtype));
2050:   PetscCall(MatSetType(C, mtype));
2051:   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
2052:   MatPreallocateEnd(dnz, onz);
2053:   PetscCall(MatSetBlockSize(C, 1));
2054:   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
2055:   for (i = 0; i < pn; i++) {
2056:     row  = i + rstart;
2057:     nnz  = bi[i + 1] - bi[i];
2058:     Jptr = bj + bi[i];
2059:     PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES));
2060:   }
2061:   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
2062:   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
2063:   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
2064:   merge->bi        = bi;
2065:   merge->bj        = bj;
2066:   merge->coi       = coi;
2067:   merge->coj       = coj;
2068:   merge->buf_ri    = buf_ri;
2069:   merge->buf_rj    = buf_rj;
2070:   merge->owners_co = owners_co;

2072:   /* attach the supporting struct to C for reuse */
2073:   C->product->data    = ap;
2074:   C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2075:   ap->merge           = merge;

2077:   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;

2079: #if defined(PETSC_USE_INFO)
2080:   if (bi[pn] != 0) {
2081:     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
2082:     PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill));
2083:   } else {
2084:     PetscCall(PetscInfo(C, "Empty matrix product\n"));
2085:   }
2086: #endif
2087:   PetscFunctionReturn(PETSC_SUCCESS);
2088: }

2090: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2091: {
2092:   Mat_Product *product = C->product;
2093:   Mat          A = product->A, B = product->B;
2094:   PetscReal    fill = product->fill;
2095:   PetscBool    flg;

2097:   PetscFunctionBegin;
2098:   /* scalable */
2099:   PetscCall(PetscStrcmp(product->alg, "scalable", &flg));
2100:   if (flg) {
2101:     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
2102:     goto next;
2103:   }

2105:   /* nonscalable */
2106:   PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg));
2107:   if (flg) {
2108:     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
2109:     goto next;
2110:   }

2112:   /* matmatmult */
2113:   PetscCall(PetscStrcmp(product->alg, "at*b", &flg));
2114:   if (flg) {
2115:     Mat                  At;
2116:     MatProductCtx_APMPI *ptap;

2118:     PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
2119:     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C));
2120:     ptap = (MatProductCtx_APMPI *)C->product->data;
2121:     if (ptap) {
2122:       ptap->Pt            = At;
2123:       C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2124:     }
2125:     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2126:     goto next;
2127:   }

2129:   /* backend general code */
2130:   PetscCall(PetscStrcmp(product->alg, "backend", &flg));
2131:   if (flg) {
2132:     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2133:     PetscFunctionReturn(PETSC_SUCCESS);
2134:   }

2136:   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");

2138: next:
2139:   C->ops->productnumeric = MatProductNumeric_AtB;
2140:   PetscFunctionReturn(PETSC_SUCCESS);
2141: }

2143: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2144: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2145: {
2146:   Mat_Product *product = C->product;
2147:   Mat          A = product->A, B = product->B;
2148: #if defined(PETSC_HAVE_HYPRE)
2149:   const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2150:   PetscInt    nalg        = 5;
2151: #else
2152:   const char *algTypes[4] = {
2153:     "scalable",
2154:     "nonscalable",
2155:     "seqmpi",
2156:     "backend",
2157:   };
2158:   PetscInt nalg = 4;
2159: #endif
2160:   PetscInt  alg = 1; /* set nonscalable algorithm as default */
2161:   PetscBool flg;
2162:   MPI_Comm  comm;

2164:   PetscFunctionBegin;
2165:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));

2167:   /* Set "nonscalable" as default algorithm */
2168:   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2169:   if (flg) {
2170:     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2172:     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2173:     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2174:       MatInfo   Ainfo, Binfo;
2175:       PetscInt  nz_local;
2176:       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2178:       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2179:       PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2180:       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);

2182:       if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2183:       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));

2185:       if (alg_scalable) {
2186:         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2187:         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2188:         PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2189:       }
2190:     }
2191:   }

2193:   /* Get runtime option */
2194:   if (product->api_user) {
2195:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2196:     PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2197:     PetscOptionsEnd();
2198:   } else {
2199:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2200:     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2201:     PetscOptionsEnd();
2202:   }
2203:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2205:   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2206:   PetscFunctionReturn(PETSC_SUCCESS);
2207: }

2209: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2210: {
2211:   PetscFunctionBegin;
2212:   PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2213:   C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2214:   PetscFunctionReturn(PETSC_SUCCESS);
2215: }

2217: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2218: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2219: {
2220:   Mat_Product *product = C->product;
2221:   Mat          A = product->A, B = product->B;
2222:   const char  *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2223:   PetscInt     nalg        = 4;
2224:   PetscInt     alg         = 1; /* set default algorithm  */
2225:   PetscBool    flg;
2226:   MPI_Comm     comm;

2228:   PetscFunctionBegin;
2229:   /* Check matrix local sizes */
2230:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2231:   PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2232:              A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);

2234:   /* Set default algorithm */
2235:   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2236:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2238:   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2239:   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2240:     MatInfo   Ainfo, Binfo;
2241:     PetscInt  nz_local;
2242:     PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2244:     PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2245:     PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2246:     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);

2248:     if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2249:     PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));

2251:     if (alg_scalable) {
2252:       alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2253:       PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2254:       PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2255:     }
2256:   }

2258:   /* Get runtime option */
2259:   if (product->api_user) {
2260:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2261:     PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2262:     PetscOptionsEnd();
2263:   } else {
2264:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2265:     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2266:     PetscOptionsEnd();
2267:   }
2268:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2270:   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2271:   PetscFunctionReturn(PETSC_SUCCESS);
2272: }

2274: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2275: {
2276:   Mat_Product *product = C->product;
2277:   Mat          A = product->A, P = product->B;
2278:   MPI_Comm     comm;
2279:   PetscBool    flg;
2280:   PetscInt     alg = 1; /* set default algorithm */
2281: #if !defined(PETSC_HAVE_HYPRE)
2282:   const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2283:   PetscInt    nalg        = 5;
2284: #else
2285:   const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2286:   PetscInt    nalg        = 6;
2287: #endif
2288:   PetscInt pN = P->cmap->N;

2290:   PetscFunctionBegin;
2291:   /* Check matrix local sizes */
2292:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2293:   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2294:              A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2295:   PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2296:              A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);

2298:   /* Set "nonscalable" as default algorithm */
2299:   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2300:   if (flg) {
2301:     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2303:     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2304:     if (pN > 100000) {
2305:       MatInfo   Ainfo, Pinfo;
2306:       PetscInt  nz_local;
2307:       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2309:       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2310:       PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo));
2311:       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);

2313:       if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2314:       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));

2316:       if (alg_scalable) {
2317:         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2318:         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2319:       }
2320:     }
2321:   }

2323:   /* Get runtime option */
2324:   if (product->api_user) {
2325:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2326:     PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2327:     PetscOptionsEnd();
2328:   } else {
2329:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2330:     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2331:     PetscOptionsEnd();
2332:   }
2333:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2335:   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2336:   PetscFunctionReturn(PETSC_SUCCESS);
2337: }

2339: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2340: {
2341:   Mat_Product *product = C->product;
2342:   Mat          A = product->A, R = product->B;

2344:   PetscFunctionBegin;
2345:   /* Check matrix local sizes */
2346:   PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n,
2347:              A->rmap->n, R->rmap->n, R->cmap->n);

2349:   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2350:   PetscFunctionReturn(PETSC_SUCCESS);
2351: }

2353: /*
2354:  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2355: */
2356: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2357: {
2358:   Mat_Product *product     = C->product;
2359:   PetscBool    flg         = PETSC_FALSE;
2360:   PetscInt     alg         = 1; /* default algorithm */
2361:   const char  *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2362:   PetscInt     nalg        = 3;

2364:   PetscFunctionBegin;
2365:   /* Set default algorithm */
2366:   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2367:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2369:   /* Get runtime option */
2370:   if (product->api_user) {
2371:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2372:     PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2373:     PetscOptionsEnd();
2374:   } else {
2375:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2376:     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg));
2377:     PetscOptionsEnd();
2378:   }
2379:   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));

2381:   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2382:   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2383:   PetscFunctionReturn(PETSC_SUCCESS);
2384: }

2386: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2387: {
2388:   Mat_Product *product = C->product;

2390:   PetscFunctionBegin;
2391:   switch (product->type) {
2392:   case MATPRODUCT_AB:
2393:     PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2394:     break;
2395:   case MATPRODUCT_ABt:
2396:     PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C));
2397:     break;
2398:   case MATPRODUCT_AtB:
2399:     PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2400:     break;
2401:   case MATPRODUCT_PtAP:
2402:     PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2403:     break;
2404:   case MATPRODUCT_RARt:
2405:     PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2406:     break;
2407:   case MATPRODUCT_ABC:
2408:     PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2409:     break;
2410:   default:
2411:     break;
2412:   }
2413:   PetscFunctionReturn(PETSC_SUCCESS);
2414: }