Actual source code: mpiaij.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: #include <petsc/private/vecimpl.h>
  3: #include <petsc/private/sfimpl.h>
  4: #include <petsc/private/isimpl.h>
  5: #include <petscblaslapack.h>
  6: #include <petscsf.h>
  7: #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 51: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 52:   A->boundtocpu = flg;
 53: #endif
 54:   if (a->A) {
 55:     MatBindToCPU(a->A,flg);
 56:   }
 57:   if (a->B) {
 58:     MatBindToCPU(a->B,flg);
 59:   }

 61:   /* In addition to binding the diagonal and off-diagonal matrices, bind the local vectors used for matrix-vector products.
 62:    * This maybe seems a little odd for a MatBindToCPU() call to do, but it makes no sense for the binding of these vectors
 63:    * to differ from the parent matrix. */
 64:   if (a->lvec) {
 65:     VecBindToCPU(a->lvec,flg);
 66:   }
 67:   if (a->diag) {
 68:     VecBindToCPU(a->diag,flg);
 69:   }

 71:   return 0;
 72: }

 74: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 75: {
 76:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 78:   if (mat->A) {
 79:     MatSetBlockSizes(mat->A,rbs,cbs);
 80:     MatSetBlockSizes(mat->B,rbs,1);
 81:   }
 82:   return 0;
 83: }

 85: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 86: {
 87:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 88:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 89:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 90:   const PetscInt  *ia,*ib;
 91:   const MatScalar *aa,*bb,*aav,*bav;
 92:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 93:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 95:   *keptrows = NULL;

 97:   ia   = a->i;
 98:   ib   = b->i;
 99:   MatSeqAIJGetArrayRead(mat->A,&aav);
100:   MatSeqAIJGetArrayRead(mat->B,&bav);
101:   for (i=0; i<m; i++) {
102:     na = ia[i+1] - ia[i];
103:     nb = ib[i+1] - ib[i];
104:     if (!na && !nb) {
105:       cnt++;
106:       goto ok1;
107:     }
108:     aa = aav + ia[i];
109:     for (j=0; j<na; j++) {
110:       if (aa[j] != 0.0) goto ok1;
111:     }
112:     bb = bav + ib[i];
113:     for (j=0; j <nb; j++) {
114:       if (bb[j] != 0.0) goto ok1;
115:     }
116:     cnt++;
117: ok1:;
118:   }
119:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
120:   if (!n0rows) {
121:     MatSeqAIJRestoreArrayRead(mat->A,&aav);
122:     MatSeqAIJRestoreArrayRead(mat->B,&bav);
123:     return 0;
124:   }
125:   PetscMalloc1(M->rmap->n-cnt,&rows);
126:   cnt  = 0;
127:   for (i=0; i<m; i++) {
128:     na = ia[i+1] - ia[i];
129:     nb = ib[i+1] - ib[i];
130:     if (!na && !nb) continue;
131:     aa = aav + ia[i];
132:     for (j=0; j<na;j++) {
133:       if (aa[j] != 0.0) {
134:         rows[cnt++] = rstart + i;
135:         goto ok2;
136:       }
137:     }
138:     bb = bav + ib[i];
139:     for (j=0; j<nb; j++) {
140:       if (bb[j] != 0.0) {
141:         rows[cnt++] = rstart + i;
142:         goto ok2;
143:       }
144:     }
145: ok2:;
146:   }
147:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
148:   MatSeqAIJRestoreArrayRead(mat->A,&aav);
149:   MatSeqAIJRestoreArrayRead(mat->B,&bav);
150:   return 0;
151: }

153: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
154: {
155:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
156:   PetscBool         cong;

158:   MatHasCongruentLayouts(Y,&cong);
159:   if (Y->assembled && cong) {
160:     MatDiagonalSet(aij->A,D,is);
161:   } else {
162:     MatDiagonalSet_Default(Y,D,is);
163:   }
164:   return 0;
165: }

167: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
168: {
169:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
170:   PetscInt       i,rstart,nrows,*rows;

172:   *zrows = NULL;
173:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
174:   MatGetOwnershipRange(M,&rstart,NULL);
175:   for (i=0; i<nrows; i++) rows[i] += rstart;
176:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
177:   return 0;
178: }

180: PetscErrorCode MatGetColumnReductions_MPIAIJ(Mat A,PetscInt type,PetscReal *reductions)
181: {
182:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)A->data;
183:   PetscInt          i,m,n,*garray = aij->garray;
184:   Mat_SeqAIJ        *a_aij = (Mat_SeqAIJ*) aij->A->data;
185:   Mat_SeqAIJ        *b_aij = (Mat_SeqAIJ*) aij->B->data;
186:   PetscReal         *work;
187:   const PetscScalar *dummy;

189:   MatGetSize(A,&m,&n);
190:   PetscCalloc1(n,&work);
191:   MatSeqAIJGetArrayRead(aij->A,&dummy);
192:   MatSeqAIJRestoreArrayRead(aij->A,&dummy);
193:   MatSeqAIJGetArrayRead(aij->B,&dummy);
194:   MatSeqAIJRestoreArrayRead(aij->B,&dummy);
195:   if (type == NORM_2) {
196:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
197:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
198:     }
199:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
200:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
201:     }
202:   } else if (type == NORM_1) {
203:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
204:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
205:     }
206:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
207:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
208:     }
209:   } else if (type == NORM_INFINITY) {
210:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
211:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
212:     }
213:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
214:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
215:     }
216:   } else if (type == REDUCTION_SUM_REALPART || type == REDUCTION_MEAN_REALPART) {
217:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
218:       work[A->cmap->rstart + a_aij->j[i]] += PetscRealPart(a_aij->a[i]);
219:     }
220:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
221:       work[garray[b_aij->j[i]]] += PetscRealPart(b_aij->a[i]);
222:     }
223:   } else if (type == REDUCTION_SUM_IMAGINARYPART || type == REDUCTION_MEAN_IMAGINARYPART) {
224:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
225:       work[A->cmap->rstart + a_aij->j[i]] += PetscImaginaryPart(a_aij->a[i]);
226:     }
227:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
228:       work[garray[b_aij->j[i]]] += PetscImaginaryPart(b_aij->a[i]);
229:     }
230:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown reduction type");
231:   if (type == NORM_INFINITY) {
232:     MPIU_Allreduce(work,reductions,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
233:   } else {
234:     MPIU_Allreduce(work,reductions,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
235:   }
236:   PetscFree(work);
237:   if (type == NORM_2) {
238:     for (i=0; i<n; i++) reductions[i] = PetscSqrtReal(reductions[i]);
239:   } else if (type == REDUCTION_MEAN_REALPART || type == REDUCTION_MEAN_IMAGINARYPART) {
240:     for (i=0; i<n; i++) reductions[i] /= m;
241:   }
242:   return 0;
243: }

245: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
246: {
247:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
248:   IS              sis,gis;
249:   const PetscInt  *isis,*igis;
250:   PetscInt        n,*iis,nsis,ngis,rstart,i;

252:   MatFindOffBlockDiagonalEntries(a->A,&sis);
253:   MatFindNonzeroRows(a->B,&gis);
254:   ISGetSize(gis,&ngis);
255:   ISGetSize(sis,&nsis);
256:   ISGetIndices(sis,&isis);
257:   ISGetIndices(gis,&igis);

259:   PetscMalloc1(ngis+nsis,&iis);
260:   PetscArraycpy(iis,igis,ngis);
261:   PetscArraycpy(iis+ngis,isis,nsis);
262:   n    = ngis + nsis;
263:   PetscSortRemoveDupsInt(&n,iis);
264:   MatGetOwnershipRange(A,&rstart,NULL);
265:   for (i=0; i<n; i++) iis[i] += rstart;
266:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

268:   ISRestoreIndices(sis,&isis);
269:   ISRestoreIndices(gis,&igis);
270:   ISDestroy(&sis);
271:   ISDestroy(&gis);
272:   return 0;
273: }

275: /*
276:   Local utility routine that creates a mapping from the global column
277: number to the local number in the off-diagonal part of the local
278: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
279: a slightly higher hash table cost; without it it is not scalable (each processor
280: has an order N integer array but is fast to access.
281: */
282: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
283: {
284:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
285:   PetscInt       n = aij->B->cmap->n,i;

288: #if defined(PETSC_USE_CTABLE)
289:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
290:   for (i=0; i<n; i++) {
291:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
292:   }
293: #else
294:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
295:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
296:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
297: #endif
298:   return 0;
299: }

301: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
302: { \
303:     if (col <= lastcol1)  low1 = 0;     \
304:     else                 high1 = nrow1; \
305:     lastcol1 = col;\
306:     while (high1-low1 > 5) { \
307:       t = (low1+high1)/2; \
308:       if (rp1[t] > col) high1 = t; \
309:       else              low1  = t; \
310:     } \
311:       for (_i=low1; _i<high1; _i++) { \
312:         if (rp1[_i] > col) break; \
313:         if (rp1[_i] == col) { \
314:           if (addv == ADD_VALUES) { \
315:             ap1[_i] += value;   \
316:             /* Not sure LogFlops will slow dow the code or not */ \
317:             (void)PetscLogFlops(1.0);   \
318:            } \
319:           else                    ap1[_i] = value; \
320:           goto a_noinsert; \
321:         } \
322:       }  \
323:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
324:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
326:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
327:       N = nrow1++ - 1; a->nz++; high1++; \
328:       /* shift up all the later entries in this row */ \
329:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
330:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
331:       rp1[_i] = col;  \
332:       ap1[_i] = value;  \
333:       A->nonzerostate++;\
334:       a_noinsert: ; \
335:       ailen[row] = nrow1; \
336: }

338: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
339:   { \
340:     if (col <= lastcol2) low2 = 0;                        \
341:     else high2 = nrow2;                                   \
342:     lastcol2 = col;                                       \
343:     while (high2-low2 > 5) {                              \
344:       t = (low2+high2)/2;                                 \
345:       if (rp2[t] > col) high2 = t;                        \
346:       else             low2  = t;                         \
347:     }                                                     \
348:     for (_i=low2; _i<high2; _i++) {                       \
349:       if (rp2[_i] > col) break;                           \
350:       if (rp2[_i] == col) {                               \
351:         if (addv == ADD_VALUES) {                         \
352:           ap2[_i] += value;                               \
353:           (void)PetscLogFlops(1.0);                       \
354:         }                                                 \
355:         else                    ap2[_i] = value;          \
356:         goto b_noinsert;                                  \
357:       }                                                   \
358:     }                                                     \
359:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
360:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
362:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
363:     N = nrow2++ - 1; b->nz++; high2++;                    \
364:     /* shift up all the later entries in this row */      \
365:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
366:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
367:     rp2[_i] = col;                                        \
368:     ap2[_i] = value;                                      \
369:     B->nonzerostate++;                                    \
370:     b_noinsert: ;                                         \
371:     bilen[row] = nrow2;                                   \
372:   }

374: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
375: {
376:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
377:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
378:   PetscInt       l,*garray = mat->garray,diag;
379:   PetscScalar    *aa,*ba;

381:   /* code only works for square matrices A */

383:   /* find size of row to the left of the diagonal part */
384:   MatGetOwnershipRange(A,&diag,NULL);
385:   row  = row - diag;
386:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
387:     if (garray[b->j[b->i[row]+l]] > diag) break;
388:   }
389:   if (l) {
390:     MatSeqAIJGetArray(mat->B,&ba);
391:     PetscArraycpy(ba+b->i[row],v,l);
392:     MatSeqAIJRestoreArray(mat->B,&ba);
393:   }

395:   /* diagonal part */
396:   if (a->i[row+1]-a->i[row]) {
397:     MatSeqAIJGetArray(mat->A,&aa);
398:     PetscArraycpy(aa+a->i[row],v+l,(a->i[row+1]-a->i[row]));
399:     MatSeqAIJRestoreArray(mat->A,&aa);
400:   }

402:   /* right of diagonal part */
403:   if (b->i[row+1]-b->i[row]-l) {
404:     MatSeqAIJGetArray(mat->B,&ba);
405:     PetscArraycpy(ba+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
406:     MatSeqAIJRestoreArray(mat->B,&ba);
407:   }
408:   return 0;
409: }

411: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
412: {
413:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
414:   PetscScalar    value = 0.0;
415:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
416:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
417:   PetscBool      roworiented = aij->roworiented;

419:   /* Some Variables required in the macro */
420:   Mat        A                    = aij->A;
421:   Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
422:   PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
423:   PetscBool  ignorezeroentries    = a->ignorezeroentries;
424:   Mat        B                    = aij->B;
425:   Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
426:   PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
427:   MatScalar  *aa,*ba;
428:   PetscInt   *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
429:   PetscInt   nonew;
430:   MatScalar  *ap1,*ap2;

432:   MatSeqAIJGetArray(A,&aa);
433:   MatSeqAIJGetArray(B,&ba);
434:   for (i=0; i<m; i++) {
435:     if (im[i] < 0) continue;
437:     if (im[i] >= rstart && im[i] < rend) {
438:       row      = im[i] - rstart;
439:       lastcol1 = -1;
440:       rp1      = aj + ai[row];
441:       ap1      = aa + ai[row];
442:       rmax1    = aimax[row];
443:       nrow1    = ailen[row];
444:       low1     = 0;
445:       high1    = nrow1;
446:       lastcol2 = -1;
447:       rp2      = bj + bi[row];
448:       ap2      = ba + bi[row];
449:       rmax2    = bimax[row];
450:       nrow2    = bilen[row];
451:       low2     = 0;
452:       high2    = nrow2;

454:       for (j=0; j<n; j++) {
455:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
456:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
457:         if (in[j] >= cstart && in[j] < cend) {
458:           col   = in[j] - cstart;
459:           nonew = a->nonew;
460:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
461:         } else if (in[j] < 0) continue;
463:         else {
464:           if (mat->was_assembled) {
465:             if (!aij->colmap) {
466:               MatCreateColmap_MPIAIJ_Private(mat);
467:             }
468: #if defined(PETSC_USE_CTABLE)
469:             PetscTableFind(aij->colmap,in[j]+1,&col); /* map global col ids to local ones */
470:             col--;
471: #else
472:             col = aij->colmap[in[j]] - 1;
473: #endif
474:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) { /* col < 0 means in[j] is a new col for B */
475:               MatDisAssemble_MPIAIJ(mat); /* Change aij->B from reduced/local format to expanded/global format */
476:               col  =  in[j];
477:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
478:               B        = aij->B;
479:               b        = (Mat_SeqAIJ*)B->data;
480:               bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
481:               rp2      = bj + bi[row];
482:               ap2      = ba + bi[row];
483:               rmax2    = bimax[row];
484:               nrow2    = bilen[row];
485:               low2     = 0;
486:               high2    = nrow2;
487:               bm       = aij->B->rmap->n;
488:               ba       = b->a;
489:             } else if (col < 0 && !(ignorezeroentries && value == 0.0)) {
490:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
491:                 PetscInfo(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%" PetscInt_FMT ",%" PetscInt_FMT ")\n",(double)PetscRealPart(value),im[i],in[j]);
492:               } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
493:             }
494:           } else col = in[j];
495:           nonew = b->nonew;
496:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
497:         }
498:       }
499:     } else {
501:       if (!aij->donotstash) {
502:         mat->assembled = PETSC_FALSE;
503:         if (roworiented) {
504:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
505:         } else {
506:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
507:         }
508:       }
509:     }
510:   }
511:   MatSeqAIJRestoreArray(A,&aa);
512:   MatSeqAIJRestoreArray(B,&ba);
513:   return 0;
514: }

516: /*
517:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
518:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
519:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
520: */
521: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
522: {
523:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
524:   Mat            A           = aij->A; /* diagonal part of the matrix */
525:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
526:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
527:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
528:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
529:   PetscInt       *ailen      = a->ilen,*aj = a->j;
530:   PetscInt       *bilen      = b->ilen,*bj = b->j;
531:   PetscInt       am          = aij->A->rmap->n,j;
532:   PetscInt       diag_so_far = 0,dnz;
533:   PetscInt       offd_so_far = 0,onz;

535:   /* Iterate over all rows of the matrix */
536:   for (j=0; j<am; j++) {
537:     dnz = onz = 0;
538:     /*  Iterate over all non-zero columns of the current row */
539:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
540:       /* If column is in the diagonal */
541:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
542:         aj[diag_so_far++] = mat_j[col] - cstart;
543:         dnz++;
544:       } else { /* off-diagonal entries */
545:         bj[offd_so_far++] = mat_j[col];
546:         onz++;
547:       }
548:     }
549:     ailen[j] = dnz;
550:     bilen[j] = onz;
551:   }
552:   return 0;
553: }

555: /*
556:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
557:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
558:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
559:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
560:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
561: */
562: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
563: {
564:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
565:   Mat            A      = aij->A; /* diagonal part of the matrix */
566:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
567:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
568:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
569:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
570:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
571:   PetscInt       *ailen = a->ilen,*aj = a->j;
572:   PetscInt       *bilen = b->ilen,*bj = b->j;
573:   PetscInt       am     = aij->A->rmap->n,j;
574:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
575:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
576:   PetscScalar    *aa = a->a,*ba = b->a;

578:   /* Iterate over all rows of the matrix */
579:   for (j=0; j<am; j++) {
580:     dnz_row = onz_row = 0;
581:     rowstart_offd = full_offd_i[j];
582:     rowstart_diag = full_diag_i[j];
583:     /*  Iterate over all non-zero columns of the current row */
584:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
585:       /* If column is in the diagonal */
586:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
587:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
588:         aa[rowstart_diag+dnz_row] = mat_a[col];
589:         dnz_row++;
590:       } else { /* off-diagonal entries */
591:         bj[rowstart_offd+onz_row] = mat_j[col];
592:         ba[rowstart_offd+onz_row] = mat_a[col];
593:         onz_row++;
594:       }
595:     }
596:     ailen[j] = dnz_row;
597:     bilen[j] = onz_row;
598:   }
599:   return 0;
600: }

602: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
603: {
604:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
605:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
606:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

608:   for (i=0; i<m; i++) {
609:     if (idxm[i] < 0) continue; /* negative row */
611:     if (idxm[i] >= rstart && idxm[i] < rend) {
612:       row = idxm[i] - rstart;
613:       for (j=0; j<n; j++) {
614:         if (idxn[j] < 0) continue; /* negative column */
616:         if (idxn[j] >= cstart && idxn[j] < cend) {
617:           col  = idxn[j] - cstart;
618:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
619:         } else {
620:           if (!aij->colmap) {
621:             MatCreateColmap_MPIAIJ_Private(mat);
622:           }
623: #if defined(PETSC_USE_CTABLE)
624:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
625:           col--;
626: #else
627:           col = aij->colmap[idxn[j]] - 1;
628: #endif
629:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
630:           else {
631:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
632:           }
633:         }
634:       }
635:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
636:   }
637:   return 0;
638: }

640: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
641: {
642:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
643:   PetscInt       nstash,reallocs;

645:   if (aij->donotstash || mat->nooffprocentries) return 0;

647:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
648:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
649:   PetscInfo(aij->A,"Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n",nstash,reallocs);
650:   return 0;
651: }

653: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
654: {
655:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
656:   PetscMPIInt    n;
657:   PetscInt       i,j,rstart,ncols,flg;
658:   PetscInt       *row,*col;
659:   PetscBool      other_disassembled;
660:   PetscScalar    *val;

662:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

664:   if (!aij->donotstash && !mat->nooffprocentries) {
665:     while (1) {
666:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
667:       if (!flg) break;

669:       for (i=0; i<n;) {
670:         /* Now identify the consecutive vals belonging to the same row */
671:         for (j=i,rstart=row[j]; j<n; j++) {
672:           if (row[j] != rstart) break;
673:         }
674:         if (j < n) ncols = j-i;
675:         else       ncols = n-i;
676:         /* Now assemble all these values with a single function call */
677:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
678:         i    = j;
679:       }
680:     }
681:     MatStashScatterEnd_Private(&mat->stash);
682:   }
683: #if defined(PETSC_HAVE_DEVICE)
684:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
685:   /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
686:   if (mat->boundtocpu) {
687:     MatBindToCPU(aij->A,PETSC_TRUE);
688:     MatBindToCPU(aij->B,PETSC_TRUE);
689:   }
690: #endif
691:   MatAssemblyBegin(aij->A,mode);
692:   MatAssemblyEnd(aij->A,mode);

694:   /* determine if any processor has disassembled, if so we must
695:      also disassemble ourself, in order that we may reassemble. */
696:   /*
697:      if nonzero structure of submatrix B cannot change then we know that
698:      no processor disassembled thus we can skip this stuff
699:   */
700:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
701:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
702:     if (mat->was_assembled && !other_disassembled) { /* mat on this rank has reduced off-diag B with local col ids, but globaly it does not */
703:       MatDisAssemble_MPIAIJ(mat);
704:     }
705:   }
706:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
707:     MatSetUpMultiply_MPIAIJ(mat);
708:   }
709:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
710: #if defined(PETSC_HAVE_DEVICE)
711:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
712: #endif
713:   MatAssemblyBegin(aij->B,mode);
714:   MatAssemblyEnd(aij->B,mode);

716:   PetscFree2(aij->rowvalues,aij->rowindices);

718:   aij->rowvalues = NULL;

720:   VecDestroy(&aij->diag);

722:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
723:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
724:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
725:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
726:   }
727: #if defined(PETSC_HAVE_DEVICE)
728:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
729: #endif
730:   return 0;
731: }

733: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
734: {
735:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

737:   MatZeroEntries(l->A);
738:   MatZeroEntries(l->B);
739:   return 0;
740: }

742: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
743: {
744:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
745:   PetscObjectState sA, sB;
746:   PetscInt        *lrows;
747:   PetscInt         r, len;
748:   PetscBool        cong, lch, gch;

750:   /* get locally owned rows */
751:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
752:   MatHasCongruentLayouts(A,&cong);
753:   /* fix right hand side if needed */
754:   if (x && b) {
755:     const PetscScalar *xx;
756:     PetscScalar       *bb;

759:     VecGetArrayRead(x, &xx);
760:     VecGetArray(b, &bb);
761:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
762:     VecRestoreArrayRead(x, &xx);
763:     VecRestoreArray(b, &bb);
764:   }

766:   sA = mat->A->nonzerostate;
767:   sB = mat->B->nonzerostate;

769:   if (diag != 0.0 && cong) {
770:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
771:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
772:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
773:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
774:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
775:     PetscInt   nnwA, nnwB;
776:     PetscBool  nnzA, nnzB;

778:     nnwA = aijA->nonew;
779:     nnwB = aijB->nonew;
780:     nnzA = aijA->keepnonzeropattern;
781:     nnzB = aijB->keepnonzeropattern;
782:     if (!nnzA) {
783:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
784:       aijA->nonew = 0;
785:     }
786:     if (!nnzB) {
787:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
788:       aijB->nonew = 0;
789:     }
790:     /* Must zero here before the next loop */
791:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
792:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
793:     for (r = 0; r < len; ++r) {
794:       const PetscInt row = lrows[r] + A->rmap->rstart;
795:       if (row >= A->cmap->N) continue;
796:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
797:     }
798:     aijA->nonew = nnwA;
799:     aijB->nonew = nnwB;
800:   } else {
801:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
802:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
803:   }
804:   PetscFree(lrows);
805:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
806:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

808:   /* reduce nonzerostate */
809:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
810:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
811:   if (gch) A->nonzerostate++;
812:   return 0;
813: }

815: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
816: {
817:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
818:   PetscMPIInt       n = A->rmap->n;
819:   PetscInt          i,j,r,m,len = 0;
820:   PetscInt          *lrows,*owners = A->rmap->range;
821:   PetscMPIInt       p = 0;
822:   PetscSFNode       *rrows;
823:   PetscSF           sf;
824:   const PetscScalar *xx;
825:   PetscScalar       *bb,*mask,*aij_a;
826:   Vec               xmask,lmask;
827:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
828:   const PetscInt    *aj, *ii,*ridx;
829:   PetscScalar       *aa;

831:   /* Create SF where leaves are input rows and roots are owned rows */
832:   PetscMalloc1(n, &lrows);
833:   for (r = 0; r < n; ++r) lrows[r] = -1;
834:   PetscMalloc1(N, &rrows);
835:   for (r = 0; r < N; ++r) {
836:     const PetscInt idx   = rows[r];
838:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
839:       PetscLayoutFindOwner(A->rmap,idx,&p);
840:     }
841:     rrows[r].rank  = p;
842:     rrows[r].index = rows[r] - owners[p];
843:   }
844:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
845:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
846:   /* Collect flags for rows to be zeroed */
847:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
848:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
849:   PetscSFDestroy(&sf);
850:   /* Compress and put in row numbers */
851:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
852:   /* zero diagonal part of matrix */
853:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
854:   /* handle off diagonal part of matrix */
855:   MatCreateVecs(A,&xmask,NULL);
856:   VecDuplicate(l->lvec,&lmask);
857:   VecGetArray(xmask,&bb);
858:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
859:   VecRestoreArray(xmask,&bb);
860:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
861:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
862:   VecDestroy(&xmask);
863:   if (x && b) { /* this code is buggy when the row and column layout don't match */
864:     PetscBool cong;

866:     MatHasCongruentLayouts(A,&cong);
868:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
869:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
870:     VecGetArrayRead(l->lvec,&xx);
871:     VecGetArray(b,&bb);
872:   }
873:   VecGetArray(lmask,&mask);
874:   /* remove zeroed rows of off diagonal matrix */
875:   MatSeqAIJGetArray(l->B,&aij_a);
876:   ii = aij->i;
877:   for (i=0; i<len; i++) {
878:     PetscArrayzero(aij_a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
879:   }
880:   /* loop over all elements of off process part of matrix zeroing removed columns*/
881:   if (aij->compressedrow.use) {
882:     m    = aij->compressedrow.nrows;
883:     ii   = aij->compressedrow.i;
884:     ridx = aij->compressedrow.rindex;
885:     for (i=0; i<m; i++) {
886:       n  = ii[i+1] - ii[i];
887:       aj = aij->j + ii[i];
888:       aa = aij_a + ii[i];

890:       for (j=0; j<n; j++) {
891:         if (PetscAbsScalar(mask[*aj])) {
892:           if (b) bb[*ridx] -= *aa*xx[*aj];
893:           *aa = 0.0;
894:         }
895:         aa++;
896:         aj++;
897:       }
898:       ridx++;
899:     }
900:   } else { /* do not use compressed row format */
901:     m = l->B->rmap->n;
902:     for (i=0; i<m; i++) {
903:       n  = ii[i+1] - ii[i];
904:       aj = aij->j + ii[i];
905:       aa = aij_a + ii[i];
906:       for (j=0; j<n; j++) {
907:         if (PetscAbsScalar(mask[*aj])) {
908:           if (b) bb[i] -= *aa*xx[*aj];
909:           *aa = 0.0;
910:         }
911:         aa++;
912:         aj++;
913:       }
914:     }
915:   }
916:   if (x && b) {
917:     VecRestoreArray(b,&bb);
918:     VecRestoreArrayRead(l->lvec,&xx);
919:   }
920:   MatSeqAIJRestoreArray(l->B,&aij_a);
921:   VecRestoreArray(lmask,&mask);
922:   VecDestroy(&lmask);
923:   PetscFree(lrows);

925:   /* only change matrix nonzero state if pattern was allowed to be changed */
926:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
927:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
928:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
929:   }
930:   return 0;
931: }

933: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
934: {
935:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
936:   PetscInt       nt;
937:   VecScatter     Mvctx = a->Mvctx;

939:   VecGetLocalSize(xx,&nt);
941:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
942:   (*a->A->ops->mult)(a->A,xx,yy);
943:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
944:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
945:   return 0;
946: }

948: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
949: {
950:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

952:   MatMultDiagonalBlock(a->A,bb,xx);
953:   return 0;
954: }

956: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
957: {
958:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
959:   VecScatter     Mvctx = a->Mvctx;

961:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
962:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
963:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
964:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
965:   return 0;
966: }

968: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
969: {
970:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

972:   /* do nondiagonal part */
973:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
974:   /* do local part */
975:   (*a->A->ops->multtranspose)(a->A,xx,yy);
976:   /* add partial results together */
977:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
978:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
979:   return 0;
980: }

982: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
983: {
984:   MPI_Comm       comm;
985:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
986:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
987:   IS             Me,Notme;
988:   PetscInt       M,N,first,last,*notme,i;
989:   PetscBool      lf;
990:   PetscMPIInt    size;

992:   /* Easy test: symmetric diagonal block */
993:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
994:   MatIsTranspose(Adia,Bdia,tol,&lf);
995:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
996:   if (!*f) return 0;
997:   PetscObjectGetComm((PetscObject)Amat,&comm);
998:   MPI_Comm_size(comm,&size);
999:   if (size == 1) return 0;

1001:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1002:   MatGetSize(Amat,&M,&N);
1003:   MatGetOwnershipRange(Amat,&first,&last);
1004:   PetscMalloc1(N-last+first,&notme);
1005:   for (i=0; i<first; i++) notme[i] = i;
1006:   for (i=last; i<M; i++) notme[i-last+first] = i;
1007:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1008:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1009:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1010:   Aoff = Aoffs[0];
1011:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1012:   Boff = Boffs[0];
1013:   MatIsTranspose(Aoff,Boff,tol,f);
1014:   MatDestroyMatrices(1,&Aoffs);
1015:   MatDestroyMatrices(1,&Boffs);
1016:   ISDestroy(&Me);
1017:   ISDestroy(&Notme);
1018:   PetscFree(notme);
1019:   return 0;
1020: }

1022: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1023: {
1024:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1025:   return 0;
1026: }

1028: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1029: {
1030:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1032:   /* do nondiagonal part */
1033:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1034:   /* do local part */
1035:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1036:   /* add partial results together */
1037:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1038:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1039:   return 0;
1040: }

1042: /*
1043:   This only works correctly for square matrices where the subblock A->A is the
1044:    diagonal block
1045: */
1046: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1047: {
1048:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1052:   MatGetDiagonal(a->A,v);
1053:   return 0;
1054: }

1056: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1057: {
1058:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1060:   MatScale(a->A,aa);
1061:   MatScale(a->B,aa);
1062:   return 0;
1063: }

1065: /* Free COO stuff; must match allocation methods in MatSetPreallocationCOO_MPIAIJ() */
1066: PETSC_INTERN PetscErrorCode MatResetPreallocationCOO_MPIAIJ(Mat mat)
1067: {
1068:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1070:   PetscSFDestroy(&aij->coo_sf);
1071:   PetscFree4(aij->Aperm1,aij->Bperm1,aij->Ajmap1,aij->Bjmap1);
1072:   PetscFree4(aij->Aperm2,aij->Bperm2,aij->Ajmap2,aij->Bjmap2);
1073:   PetscFree4(aij->Aimap1,aij->Bimap1,aij->Aimap2,aij->Bimap2);
1074:   PetscFree2(aij->sendbuf,aij->recvbuf);
1075:   PetscFree(aij->Cperm1);
1076:   return 0;
1077: }

1079: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1080: {
1081:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1083: #if defined(PETSC_USE_LOG)
1084:   PetscLogObjectState((PetscObject)mat,"Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT,mat->rmap->N,mat->cmap->N);
1085: #endif
1086:   MatStashDestroy_Private(&mat->stash);
1087:   VecDestroy(&aij->diag);
1088:   MatDestroy(&aij->A);
1089:   MatDestroy(&aij->B);
1090: #if defined(PETSC_USE_CTABLE)
1091:   PetscTableDestroy(&aij->colmap);
1092: #else
1093:   PetscFree(aij->colmap);
1094: #endif
1095:   PetscFree(aij->garray);
1096:   VecDestroy(&aij->lvec);
1097:   VecScatterDestroy(&aij->Mvctx);
1098:   PetscFree2(aij->rowvalues,aij->rowindices);
1099:   PetscFree(aij->ld);

1101:   /* Free COO */
1102:   MatResetPreallocationCOO_MPIAIJ(mat);

1104:   PetscFree(mat->data);

1106:   /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
1107:   PetscObjectCompose((PetscObject)mat,"MatMergeSeqsToMPI",NULL);

1109:   PetscObjectChangeTypeName((PetscObject)mat,NULL);
1110:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1111:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1112:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1113:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1114:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1115:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1116:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1117:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpibaij_C",NULL);
1118:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1119: #if defined(PETSC_HAVE_CUDA)
1120:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcusparse_C",NULL);
1121: #endif
1122: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1123:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijkokkos_C",NULL);
1124: #endif
1125:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpidense_C",NULL);
1126: #if defined(PETSC_HAVE_ELEMENTAL)
1127:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1128: #endif
1129: #if defined(PETSC_HAVE_SCALAPACK)
1130:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_scalapack_C",NULL);
1131: #endif
1132: #if defined(PETSC_HAVE_HYPRE)
1133:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1134:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",NULL);
1135: #endif
1136:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1137:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_is_mpiaij_C",NULL);
1138:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpiaij_C",NULL);
1139:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetUseScalableIncreaseOverlap_C",NULL);
1140:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijperm_C",NULL);
1141:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijsell_C",NULL);
1142: #if defined(PETSC_HAVE_MKL_SPARSE)
1143:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijmkl_C",NULL);
1144: #endif
1145:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcrl_C",NULL);
1146:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1147:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisell_C",NULL);
1148:   PetscObjectComposeFunction((PetscObject)mat,"MatSetPreallocationCOO_C",NULL);
1149:   PetscObjectComposeFunction((PetscObject)mat,"MatSetValuesCOO_C",NULL);
1150:   return 0;
1151: }

1153: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1154: {
1155:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1156:   Mat_SeqAIJ        *A   = (Mat_SeqAIJ*)aij->A->data;
1157:   Mat_SeqAIJ        *B   = (Mat_SeqAIJ*)aij->B->data;
1158:   const PetscInt    *garray = aij->garray;
1159:   const PetscScalar *aa,*ba;
1160:   PetscInt          header[4],M,N,m,rs,cs,nz,cnt,i,ja,jb;
1161:   PetscInt          *rowlens;
1162:   PetscInt          *colidxs;
1163:   PetscScalar       *matvals;

1165:   PetscViewerSetUp(viewer);

1167:   M  = mat->rmap->N;
1168:   N  = mat->cmap->N;
1169:   m  = mat->rmap->n;
1170:   rs = mat->rmap->rstart;
1171:   cs = mat->cmap->rstart;
1172:   nz = A->nz + B->nz;

1174:   /* write matrix header */
1175:   header[0] = MAT_FILE_CLASSID;
1176:   header[1] = M; header[2] = N; header[3] = nz;
1177:   MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1178:   PetscViewerBinaryWrite(viewer,header,4,PETSC_INT);

1180:   /* fill in and store row lengths  */
1181:   PetscMalloc1(m,&rowlens);
1182:   for (i=0; i<m; i++) rowlens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1183:   PetscViewerBinaryWriteAll(viewer,rowlens,m,rs,M,PETSC_INT);
1184:   PetscFree(rowlens);

1186:   /* fill in and store column indices */
1187:   PetscMalloc1(nz,&colidxs);
1188:   for (cnt=0, i=0; i<m; i++) {
1189:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1190:       if (garray[B->j[jb]] > cs) break;
1191:       colidxs[cnt++] = garray[B->j[jb]];
1192:     }
1193:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1194:       colidxs[cnt++] = A->j[ja] + cs;
1195:     for (; jb<B->i[i+1]; jb++)
1196:       colidxs[cnt++] = garray[B->j[jb]];
1197:   }
1199:   PetscViewerBinaryWriteAll(viewer,colidxs,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
1200:   PetscFree(colidxs);

1202:   /* fill in and store nonzero values */
1203:   MatSeqAIJGetArrayRead(aij->A,&aa);
1204:   MatSeqAIJGetArrayRead(aij->B,&ba);
1205:   PetscMalloc1(nz,&matvals);
1206:   for (cnt=0, i=0; i<m; i++) {
1207:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1208:       if (garray[B->j[jb]] > cs) break;
1209:       matvals[cnt++] = ba[jb];
1210:     }
1211:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1212:       matvals[cnt++] = aa[ja];
1213:     for (; jb<B->i[i+1]; jb++)
1214:       matvals[cnt++] = ba[jb];
1215:   }
1216:   MatSeqAIJRestoreArrayRead(aij->A,&aa);
1217:   MatSeqAIJRestoreArrayRead(aij->B,&ba);
1219:   PetscViewerBinaryWriteAll(viewer,matvals,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
1220:   PetscFree(matvals);

1222:   /* write block size option to the viewer's .info file */
1223:   MatView_Binary_BlockSizes(mat,viewer);
1224:   return 0;
1225: }

1227: #include <petscdraw.h>
1228: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1229: {
1230:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1231:   PetscMPIInt       rank = aij->rank,size = aij->size;
1232:   PetscBool         isdraw,iascii,isbinary;
1233:   PetscViewer       sviewer;
1234:   PetscViewerFormat format;

1236:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1237:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1238:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1239:   if (iascii) {
1240:     PetscViewerGetFormat(viewer,&format);
1241:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1242:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1243:       PetscMalloc1(size,&nz);
1244:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1245:       for (i=0; i<(PetscInt)size; i++) {
1246:         nmax = PetscMax(nmax,nz[i]);
1247:         nmin = PetscMin(nmin,nz[i]);
1248:         navg += nz[i];
1249:       }
1250:       PetscFree(nz);
1251:       navg = navg/size;
1252:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %" PetscInt_FMT "  avg %" PetscInt_FMT "  max %" PetscInt_FMT "\n",nmin,navg,nmax);
1253:       return 0;
1254:     }
1255:     PetscViewerGetFormat(viewer,&format);
1256:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1257:       MatInfo   info;
1258:       PetscInt *inodes=NULL;

1260:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1261:       MatGetInfo(mat,MAT_LOCAL,&info);
1262:       MatInodeGetInodeSizes(aij->A,NULL,&inodes,NULL);
1263:       PetscViewerASCIIPushSynchronized(viewer);
1264:       if (!inodes) {
1265:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, not using I-node routines\n",
1266:                                                    rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory));
1267:       } else {
1268:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, using I-node routines\n",
1269:                                                    rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory));
1270:       }
1271:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1272:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %" PetscInt_FMT " \n",rank,(PetscInt)info.nz_used);
1273:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1274:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %" PetscInt_FMT " \n",rank,(PetscInt)info.nz_used);
1275:       PetscViewerFlush(viewer);
1276:       PetscViewerASCIIPopSynchronized(viewer);
1277:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1278:       VecScatterView(aij->Mvctx,viewer);
1279:       return 0;
1280:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1281:       PetscInt inodecount,inodelimit,*inodes;
1282:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1283:       if (inodes) {
1284:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n",inodecount,inodelimit);
1285:       } else {
1286:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1287:       }
1288:       return 0;
1289:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1290:       return 0;
1291:     }
1292:   } else if (isbinary) {
1293:     if (size == 1) {
1294:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1295:       MatView(aij->A,viewer);
1296:     } else {
1297:       MatView_MPIAIJ_Binary(mat,viewer);
1298:     }
1299:     return 0;
1300:   } else if (iascii && size == 1) {
1301:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1302:     MatView(aij->A,viewer);
1303:     return 0;
1304:   } else if (isdraw) {
1305:     PetscDraw draw;
1306:     PetscBool isnull;
1307:     PetscViewerDrawGetDraw(viewer,0,&draw);
1308:     PetscDrawIsNull(draw,&isnull);
1309:     if (isnull) return 0;
1310:   }

1312:   { /* assemble the entire matrix onto first processor */
1313:     Mat A = NULL, Av;
1314:     IS  isrow,iscol;

1316:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow);
1317:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol);
1318:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1319:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1320: /*  The commented code uses MatCreateSubMatrices instead */
1321: /*
1322:     Mat *AA, A = NULL, Av;
1323:     IS  isrow,iscol;

1325:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow);
1326:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol);
1327:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1328:     if (rank == 0) {
1329:        PetscObjectReference((PetscObject)AA[0]);
1330:        A    = AA[0];
1331:        Av   = AA[0];
1332:     }
1333:     MatDestroySubMatrices(1,&AA);
1334: */
1335:     ISDestroy(&iscol);
1336:     ISDestroy(&isrow);
1337:     /*
1338:        Everyone has to call to draw the matrix since the graphics waits are
1339:        synchronized across all processors that share the PetscDraw object
1340:     */
1341:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1342:     if (rank == 0) {
1343:       if (((PetscObject)mat)->name) {
1344:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1345:       }
1346:       MatView_SeqAIJ(Av,sviewer);
1347:     }
1348:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1349:     PetscViewerFlush(viewer);
1350:     MatDestroy(&A);
1351:   }
1352:   return 0;
1353: }

1355: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1356: {
1357:   PetscBool      iascii,isdraw,issocket,isbinary;

1359:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1360:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1361:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1362:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1363:   if (iascii || isdraw || isbinary || issocket) {
1364:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1365:   }
1366:   return 0;
1367: }

1369: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1370: {
1371:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1372:   Vec            bb1 = NULL;
1373:   PetscBool      hasop;

1375:   if (flag == SOR_APPLY_UPPER) {
1376:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1377:     return 0;
1378:   }

1380:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1381:     VecDuplicate(bb,&bb1);
1382:   }

1384:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1385:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1386:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1387:       its--;
1388:     }

1390:     while (its--) {
1391:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1392:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1394:       /* update rhs: bb1 = bb - B*x */
1395:       VecScale(mat->lvec,-1.0);
1396:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1398:       /* local sweep */
1399:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1400:     }
1401:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1402:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1403:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1404:       its--;
1405:     }
1406:     while (its--) {
1407:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1408:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1410:       /* update rhs: bb1 = bb - B*x */
1411:       VecScale(mat->lvec,-1.0);
1412:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1414:       /* local sweep */
1415:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1416:     }
1417:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1418:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1419:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1420:       its--;
1421:     }
1422:     while (its--) {
1423:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1424:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1426:       /* update rhs: bb1 = bb - B*x */
1427:       VecScale(mat->lvec,-1.0);
1428:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1430:       /* local sweep */
1431:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1432:     }
1433:   } else if (flag & SOR_EISENSTAT) {
1434:     Vec xx1;

1436:     VecDuplicate(bb,&xx1);
1437:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1439:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1440:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1441:     if (!mat->diag) {
1442:       MatCreateVecs(matin,&mat->diag,NULL);
1443:       MatGetDiagonal(matin,mat->diag);
1444:     }
1445:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1446:     if (hasop) {
1447:       MatMultDiagonalBlock(matin,xx,bb1);
1448:     } else {
1449:       VecPointwiseMult(bb1,mat->diag,xx);
1450:     }
1451:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1453:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1455:     /* local sweep */
1456:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1457:     VecAXPY(xx,1.0,xx1);
1458:     VecDestroy(&xx1);
1459:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1461:   VecDestroy(&bb1);

1463:   matin->factorerrortype = mat->A->factorerrortype;
1464:   return 0;
1465: }

1467: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1468: {
1469:   Mat            aA,aB,Aperm;
1470:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1471:   PetscScalar    *aa,*ba;
1472:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1473:   PetscSF        rowsf,sf;
1474:   IS             parcolp = NULL;
1475:   PetscBool      done;

1477:   MatGetLocalSize(A,&m,&n);
1478:   ISGetIndices(rowp,&rwant);
1479:   ISGetIndices(colp,&cwant);
1480:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1482:   /* Invert row permutation to find out where my rows should go */
1483:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1484:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1485:   PetscSFSetFromOptions(rowsf);
1486:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1487:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);
1488:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);

1490:   /* Invert column permutation to find out where my columns should go */
1491:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1492:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1493:   PetscSFSetFromOptions(sf);
1494:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1495:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1496:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1497:   PetscSFDestroy(&sf);

1499:   ISRestoreIndices(rowp,&rwant);
1500:   ISRestoreIndices(colp,&cwant);
1501:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1503:   /* Find out where my gcols should go */
1504:   MatGetSize(aB,NULL,&ng);
1505:   PetscMalloc1(ng,&gcdest);
1506:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1507:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1508:   PetscSFSetFromOptions(sf);
1509:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1510:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1511:   PetscSFDestroy(&sf);

1513:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1514:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1515:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1516:   for (i=0; i<m; i++) {
1517:     PetscInt    row = rdest[i];
1518:     PetscMPIInt rowner;
1519:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1520:     for (j=ai[i]; j<ai[i+1]; j++) {
1521:       PetscInt    col = cdest[aj[j]];
1522:       PetscMPIInt cowner;
1523:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1524:       if (rowner == cowner) dnnz[i]++;
1525:       else onnz[i]++;
1526:     }
1527:     for (j=bi[i]; j<bi[i+1]; j++) {
1528:       PetscInt    col = gcdest[bj[j]];
1529:       PetscMPIInt cowner;
1530:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1531:       if (rowner == cowner) dnnz[i]++;
1532:       else onnz[i]++;
1533:     }
1534:   }
1535:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1536:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1537:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1538:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1539:   PetscSFDestroy(&rowsf);

1541:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1542:   MatSeqAIJGetArray(aA,&aa);
1543:   MatSeqAIJGetArray(aB,&ba);
1544:   for (i=0; i<m; i++) {
1545:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1546:     PetscInt j0,rowlen;
1547:     rowlen = ai[i+1] - ai[i];
1548:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1549:       for (; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1550:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1551:     }
1552:     rowlen = bi[i+1] - bi[i];
1553:     for (j0=j=0; j<rowlen; j0=j) {
1554:       for (; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1555:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1556:     }
1557:   }
1558:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1559:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1560:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1561:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1562:   MatSeqAIJRestoreArray(aA,&aa);
1563:   MatSeqAIJRestoreArray(aB,&ba);
1564:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1565:   PetscFree3(work,rdest,cdest);
1566:   PetscFree(gcdest);
1567:   if (parcolp) ISDestroy(&colp);
1568:   *B = Aperm;
1569:   return 0;
1570: }

1572: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1573: {
1574:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1576:   MatGetSize(aij->B,NULL,nghosts);
1577:   if (ghosts) *ghosts = aij->garray;
1578:   return 0;
1579: }

1581: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1582: {
1583:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1584:   Mat            A    = mat->A,B = mat->B;
1585:   PetscLogDouble isend[5],irecv[5];

1587:   info->block_size = 1.0;
1588:   MatGetInfo(A,MAT_LOCAL,info);

1590:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1591:   isend[3] = info->memory;  isend[4] = info->mallocs;

1593:   MatGetInfo(B,MAT_LOCAL,info);

1595:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1596:   isend[3] += info->memory;  isend[4] += info->mallocs;
1597:   if (flag == MAT_LOCAL) {
1598:     info->nz_used      = isend[0];
1599:     info->nz_allocated = isend[1];
1600:     info->nz_unneeded  = isend[2];
1601:     info->memory       = isend[3];
1602:     info->mallocs      = isend[4];
1603:   } else if (flag == MAT_GLOBAL_MAX) {
1604:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));

1606:     info->nz_used      = irecv[0];
1607:     info->nz_allocated = irecv[1];
1608:     info->nz_unneeded  = irecv[2];
1609:     info->memory       = irecv[3];
1610:     info->mallocs      = irecv[4];
1611:   } else if (flag == MAT_GLOBAL_SUM) {
1612:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));

1614:     info->nz_used      = irecv[0];
1615:     info->nz_allocated = irecv[1];
1616:     info->nz_unneeded  = irecv[2];
1617:     info->memory       = irecv[3];
1618:     info->mallocs      = irecv[4];
1619:   }
1620:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1621:   info->fill_ratio_needed = 0;
1622:   info->factor_mallocs    = 0;
1623:   return 0;
1624: }

1626: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1627: {
1628:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1630:   switch (op) {
1631:   case MAT_NEW_NONZERO_LOCATIONS:
1632:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1633:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1634:   case MAT_KEEP_NONZERO_PATTERN:
1635:   case MAT_NEW_NONZERO_LOCATION_ERR:
1636:   case MAT_USE_INODES:
1637:   case MAT_IGNORE_ZERO_ENTRIES:
1638:   case MAT_FORM_EXPLICIT_TRANSPOSE:
1639:     MatCheckPreallocated(A,1);
1640:     MatSetOption(a->A,op,flg);
1641:     MatSetOption(a->B,op,flg);
1642:     break;
1643:   case MAT_ROW_ORIENTED:
1644:     MatCheckPreallocated(A,1);
1645:     a->roworiented = flg;

1647:     MatSetOption(a->A,op,flg);
1648:     MatSetOption(a->B,op,flg);
1649:     break;
1650:   case MAT_FORCE_DIAGONAL_ENTRIES:
1651:   case MAT_SORTED_FULL:
1652:     PetscInfo(A,"Option %s ignored\n",MatOptions[op]);
1653:     break;
1654:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1655:     a->donotstash = flg;
1656:     break;
1657:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1658:   case MAT_SPD:
1659:   case MAT_SYMMETRIC:
1660:   case MAT_STRUCTURALLY_SYMMETRIC:
1661:   case MAT_HERMITIAN:
1662:   case MAT_SYMMETRY_ETERNAL:
1663:     break;
1664:   case MAT_SUBMAT_SINGLEIS:
1665:     A->submat_singleis = flg;
1666:     break;
1667:   case MAT_STRUCTURE_ONLY:
1668:     /* The option is handled directly by MatSetOption() */
1669:     break;
1670:   default:
1671:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1672:   }
1673:   return 0;
1674: }

1676: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1677: {
1678:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1679:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1680:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1681:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1682:   PetscInt       *cmap,*idx_p;

1685:   mat->getrowactive = PETSC_TRUE;

1687:   if (!mat->rowvalues && (idx || v)) {
1688:     /*
1689:         allocate enough space to hold information from the longest row.
1690:     */
1691:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1692:     PetscInt   max = 1,tmp;
1693:     for (i=0; i<matin->rmap->n; i++) {
1694:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1695:       if (max < tmp) max = tmp;
1696:     }
1697:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1698:   }

1701:   lrow = row - rstart;

1703:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1704:   if (!v)   {pvA = NULL; pvB = NULL;}
1705:   if (!idx) {pcA = NULL; if (!v) pcB = NULL;}
1706:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1707:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1708:   nztot = nzA + nzB;

1710:   cmap = mat->garray;
1711:   if (v  || idx) {
1712:     if (nztot) {
1713:       /* Sort by increasing column numbers, assuming A and B already sorted */
1714:       PetscInt imark = -1;
1715:       if (v) {
1716:         *v = v_p = mat->rowvalues;
1717:         for (i=0; i<nzB; i++) {
1718:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1719:           else break;
1720:         }
1721:         imark = i;
1722:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1723:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1724:       }
1725:       if (idx) {
1726:         *idx = idx_p = mat->rowindices;
1727:         if (imark > -1) {
1728:           for (i=0; i<imark; i++) {
1729:             idx_p[i] = cmap[cworkB[i]];
1730:           }
1731:         } else {
1732:           for (i=0; i<nzB; i++) {
1733:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1734:             else break;
1735:           }
1736:           imark = i;
1737:         }
1738:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1739:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1740:       }
1741:     } else {
1742:       if (idx) *idx = NULL;
1743:       if (v)   *v   = NULL;
1744:     }
1745:   }
1746:   *nz  = nztot;
1747:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1748:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1749:   return 0;
1750: }

1752: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1753: {
1754:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1757:   aij->getrowactive = PETSC_FALSE;
1758:   return 0;
1759: }

1761: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1762: {
1763:   Mat_MPIAIJ      *aij  = (Mat_MPIAIJ*)mat->data;
1764:   Mat_SeqAIJ      *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1765:   PetscInt        i,j,cstart = mat->cmap->rstart;
1766:   PetscReal       sum = 0.0;
1767:   const MatScalar *v,*amata,*bmata;

1769:   if (aij->size == 1) {
1770:     MatNorm(aij->A,type,norm);
1771:   } else {
1772:     MatSeqAIJGetArrayRead(aij->A,&amata);
1773:     MatSeqAIJGetArrayRead(aij->B,&bmata);
1774:     if (type == NORM_FROBENIUS) {
1775:       v = amata;
1776:       for (i=0; i<amat->nz; i++) {
1777:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1778:       }
1779:       v = bmata;
1780:       for (i=0; i<bmat->nz; i++) {
1781:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1782:       }
1783:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1784:       *norm = PetscSqrtReal(*norm);
1785:       PetscLogFlops(2.0*amat->nz+2.0*bmat->nz);
1786:     } else if (type == NORM_1) { /* max column norm */
1787:       PetscReal *tmp,*tmp2;
1788:       PetscInt  *jj,*garray = aij->garray;
1789:       PetscCalloc1(mat->cmap->N+1,&tmp);
1790:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1791:       *norm = 0.0;
1792:       v     = amata; jj = amat->j;
1793:       for (j=0; j<amat->nz; j++) {
1794:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1795:       }
1796:       v = bmata; jj = bmat->j;
1797:       for (j=0; j<bmat->nz; j++) {
1798:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1799:       }
1800:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1801:       for (j=0; j<mat->cmap->N; j++) {
1802:         if (tmp2[j] > *norm) *norm = tmp2[j];
1803:       }
1804:       PetscFree(tmp);
1805:       PetscFree(tmp2);
1806:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1807:     } else if (type == NORM_INFINITY) { /* max row norm */
1808:       PetscReal ntemp = 0.0;
1809:       for (j=0; j<aij->A->rmap->n; j++) {
1810:         v   = amata + amat->i[j];
1811:         sum = 0.0;
1812:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1813:           sum += PetscAbsScalar(*v); v++;
1814:         }
1815:         v = bmata + bmat->i[j];
1816:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1817:           sum += PetscAbsScalar(*v); v++;
1818:         }
1819:         if (sum > ntemp) ntemp = sum;
1820:       }
1821:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1822:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1823:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1824:     MatSeqAIJRestoreArrayRead(aij->A,&amata);
1825:     MatSeqAIJRestoreArrayRead(aij->B,&bmata);
1826:   }
1827:   return 0;
1828: }

1830: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1831: {
1832:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
1833:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
1834:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
1835:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
1836:   Mat             B,A_diag,*B_diag;
1837:   const MatScalar *pbv,*bv;

1839:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1840:   ai = Aloc->i; aj = Aloc->j;
1841:   bi = Bloc->i; bj = Bloc->j;
1842:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1843:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
1844:     PetscSFNode          *oloc;
1845:     PETSC_UNUSED PetscSF sf;

1847:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1848:     /* compute d_nnz for preallocation */
1849:     PetscArrayzero(d_nnz,na);
1850:     for (i=0; i<ai[ma]; i++) d_nnz[aj[i]]++;
1851:     /* compute local off-diagonal contributions */
1852:     PetscArrayzero(g_nnz,nb);
1853:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1854:     /* map those to global */
1855:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1856:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
1857:     PetscSFSetFromOptions(sf);
1858:     PetscArrayzero(o_nnz,na);
1859:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1860:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1861:     PetscSFDestroy(&sf);

1863:     MatCreate(PetscObjectComm((PetscObject)A),&B);
1864:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1865:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
1866:     MatSetType(B,((PetscObject)A)->type_name);
1867:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
1868:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
1869:   } else {
1870:     B    = *matout;
1871:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
1872:   }

1874:   b           = (Mat_MPIAIJ*)B->data;
1875:   A_diag      = a->A;
1876:   B_diag      = &b->A;
1877:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
1878:   A_diag_ncol = A_diag->cmap->N;
1879:   B_diag_ilen = sub_B_diag->ilen;
1880:   B_diag_i    = sub_B_diag->i;

1882:   /* Set ilen for diagonal of B */
1883:   for (i=0; i<A_diag_ncol; i++) {
1884:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
1885:   }

1887:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
1888:   very quickly (=without using MatSetValues), because all writes are local. */
1889:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

1891:   /* copy over the B part */
1892:   PetscMalloc1(bi[mb],&cols);
1893:   MatSeqAIJGetArrayRead(a->B,&bv);
1894:   pbv  = bv;
1895:   row  = A->rmap->rstart;
1896:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
1897:   cols_tmp = cols;
1898:   for (i=0; i<mb; i++) {
1899:     ncol = bi[i+1]-bi[i];
1900:     MatSetValues(B,ncol,cols_tmp,1,&row,pbv,INSERT_VALUES);
1901:     row++;
1902:     pbv += ncol; cols_tmp += ncol;
1903:   }
1904:   PetscFree(cols);
1905:   MatSeqAIJRestoreArrayRead(a->B,&bv);

1907:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1908:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1909:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1910:     *matout = B;
1911:   } else {
1912:     MatHeaderMerge(A,&B);
1913:   }
1914:   return 0;
1915: }

1917: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1918: {
1919:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1920:   Mat            a    = aij->A,b = aij->B;
1921:   PetscInt       s1,s2,s3;

1923:   MatGetLocalSize(mat,&s2,&s3);
1924:   if (rr) {
1925:     VecGetLocalSize(rr,&s1);
1927:     /* Overlap communication with computation. */
1928:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1929:   }
1930:   if (ll) {
1931:     VecGetLocalSize(ll,&s1);
1933:     (*b->ops->diagonalscale)(b,ll,NULL);
1934:   }
1935:   /* scale  the diagonal block */
1936:   (*a->ops->diagonalscale)(a,ll,rr);

1938:   if (rr) {
1939:     /* Do a scatter end and then right scale the off-diagonal block */
1940:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1941:     (*b->ops->diagonalscale)(b,NULL,aij->lvec);
1942:   }
1943:   return 0;
1944: }

1946: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
1947: {
1948:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1950:   MatSetUnfactored(a->A);
1951:   return 0;
1952: }

1954: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
1955: {
1956:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
1957:   Mat            a,b,c,d;
1958:   PetscBool      flg;

1960:   a = matA->A; b = matA->B;
1961:   c = matB->A; d = matB->B;

1963:   MatEqual(a,c,&flg);
1964:   if (flg) {
1965:     MatEqual(b,d,&flg);
1966:   }
1967:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
1968:   return 0;
1969: }

1971: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
1972: {
1973:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1974:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

1976:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1977:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1978:     /* because of the column compression in the off-processor part of the matrix a->B,
1979:        the number of columns in a->B and b->B may be different, hence we cannot call
1980:        the MatCopy() directly on the two parts. If need be, we can provide a more
1981:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
1982:        then copying the submatrices */
1983:     MatCopy_Basic(A,B,str);
1984:   } else {
1985:     MatCopy(a->A,b->A,str);
1986:     MatCopy(a->B,b->B,str);
1987:   }
1988:   PetscObjectStateIncrease((PetscObject)B);
1989:   return 0;
1990: }

1992: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
1993: {
1994:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);
1995:   return 0;
1996: }

1998: /*
1999:    Computes the number of nonzeros per row needed for preallocation when X and Y
2000:    have different nonzero structure.
2001: */
2002: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2003: {
2004:   PetscInt       i,j,k,nzx,nzy;

2006:   /* Set the number of nonzeros in the new matrix */
2007:   for (i=0; i<m; i++) {
2008:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2009:     nzx = xi[i+1] - xi[i];
2010:     nzy = yi[i+1] - yi[i];
2011:     nnz[i] = 0;
2012:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2013:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2014:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2015:       nnz[i]++;
2016:     }
2017:     for (; k<nzy; k++) nnz[i]++;
2018:   }
2019:   return 0;
2020: }

2022: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2023: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2024: {
2025:   PetscInt       m = Y->rmap->N;
2026:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2027:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2029:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2030:   return 0;
2031: }

2033: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2034: {
2035:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;

2037:   if (str == SAME_NONZERO_PATTERN) {
2038:     MatAXPY(yy->A,a,xx->A,str);
2039:     MatAXPY(yy->B,a,xx->B,str);
2040:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2041:     MatAXPY_Basic(Y,a,X,str);
2042:   } else {
2043:     Mat      B;
2044:     PetscInt *nnz_d,*nnz_o;

2046:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2047:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2048:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2049:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2050:     MatSetLayouts(B,Y->rmap,Y->cmap);
2051:     MatSetType(B,((PetscObject)Y)->type_name);
2052:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2053:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2054:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2055:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2056:     MatHeaderMerge(Y,&B);
2057:     PetscFree(nnz_d);
2058:     PetscFree(nnz_o);
2059:   }
2060:   return 0;
2061: }

2063: PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);

2065: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2066: {
2067:   if (PetscDefined(USE_COMPLEX)) {
2068:     Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

2070:     MatConjugate_SeqAIJ(aij->A);
2071:     MatConjugate_SeqAIJ(aij->B);
2072:   }
2073:   return 0;
2074: }

2076: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2077: {
2078:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2080:   MatRealPart(a->A);
2081:   MatRealPart(a->B);
2082:   return 0;
2083: }

2085: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2086: {
2087:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2089:   MatImaginaryPart(a->A);
2090:   MatImaginaryPart(a->B);
2091:   return 0;
2092: }

2094: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2095: {
2096:   Mat_MPIAIJ        *a = (Mat_MPIAIJ*)A->data;
2097:   PetscInt          i,*idxb = NULL,m = A->rmap->n;
2098:   PetscScalar       *va,*vv;
2099:   Vec               vB,vA;
2100:   const PetscScalar *vb;

2102:   VecCreateSeq(PETSC_COMM_SELF,m,&vA);
2103:   MatGetRowMaxAbs(a->A,vA,idx);

2105:   VecGetArrayWrite(vA,&va);
2106:   if (idx) {
2107:     for (i=0; i<m; i++) {
2108:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2109:     }
2110:   }

2112:   VecCreateSeq(PETSC_COMM_SELF,m,&vB);
2113:   PetscMalloc1(m,&idxb);
2114:   MatGetRowMaxAbs(a->B,vB,idxb);

2116:   VecGetArrayWrite(v,&vv);
2117:   VecGetArrayRead(vB,&vb);
2118:   for (i=0; i<m; i++) {
2119:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2120:       vv[i] = vb[i];
2121:       if (idx) idx[i] = a->garray[idxb[i]];
2122:     } else {
2123:       vv[i] = va[i];
2124:       if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]])
2125:         idx[i] = a->garray[idxb[i]];
2126:     }
2127:   }
2128:   VecRestoreArrayWrite(vA,&vv);
2129:   VecRestoreArrayWrite(vA,&va);
2130:   VecRestoreArrayRead(vB,&vb);
2131:   PetscFree(idxb);
2132:   VecDestroy(&vA);
2133:   VecDestroy(&vB);
2134:   return 0;
2135: }

2137: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2138: {
2139:   Mat_MPIAIJ        *mat   = (Mat_MPIAIJ*) A->data;
2140:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2141:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2142:   PetscInt          *cmap  = mat->garray;
2143:   PetscInt          *diagIdx, *offdiagIdx;
2144:   Vec               diagV, offdiagV;
2145:   PetscScalar       *a, *diagA, *offdiagA;
2146:   const PetscScalar *ba,*bav;
2147:   PetscInt          r,j,col,ncols,*bi,*bj;
2148:   Mat               B = mat->B;
2149:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2151:   /* When a process holds entire A and other processes have no entry */
2152:   if (A->cmap->N == n) {
2153:     VecGetArrayWrite(v,&diagA);
2154:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2155:     MatGetRowMinAbs(mat->A,diagV,idx);
2156:     VecDestroy(&diagV);
2157:     VecRestoreArrayWrite(v,&diagA);
2158:     return 0;
2159:   } else if (n == 0) {
2160:     if (m) {
2161:       VecGetArrayWrite(v,&a);
2162:       for (r = 0; r < m; r++) {a[r] = 0.0; if (idx) idx[r] = -1;}
2163:       VecRestoreArrayWrite(v,&a);
2164:     }
2165:     return 0;
2166:   }

2168:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2169:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2170:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2171:   MatGetRowMinAbs(mat->A, diagV, diagIdx);

2173:   /* Get offdiagIdx[] for implicit 0.0 */
2174:   MatSeqAIJGetArrayRead(B,&bav);
2175:   ba   = bav;
2176:   bi   = b->i;
2177:   bj   = b->j;
2178:   VecGetArrayWrite(offdiagV, &offdiagA);
2179:   for (r = 0; r < m; r++) {
2180:     ncols = bi[r+1] - bi[r];
2181:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2182:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2183:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2184:       offdiagA[r] = 0.0;

2186:       /* Find first hole in the cmap */
2187:       for (j=0; j<ncols; j++) {
2188:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2189:         if (col > j && j < cstart) {
2190:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2191:           break;
2192:         } else if (col > j + n && j >= cstart) {
2193:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2194:           break;
2195:         }
2196:       }
2197:       if (j == ncols && ncols < A->cmap->N - n) {
2198:         /* a hole is outside compressed Bcols */
2199:         if (ncols == 0) {
2200:           if (cstart) {
2201:             offdiagIdx[r] = 0;
2202:           } else offdiagIdx[r] = cend;
2203:         } else { /* ncols > 0 */
2204:           offdiagIdx[r] = cmap[ncols-1] + 1;
2205:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2206:         }
2207:       }
2208:     }

2210:     for (j=0; j<ncols; j++) {
2211:       if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2212:       ba++; bj++;
2213:     }
2214:   }

2216:   VecGetArrayWrite(v, &a);
2217:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2218:   for (r = 0; r < m; ++r) {
2219:     if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) {
2220:       a[r]   = diagA[r];
2221:       if (idx) idx[r] = cstart + diagIdx[r];
2222:     } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) {
2223:       a[r] = diagA[r];
2224:       if (idx) {
2225:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2226:           idx[r] = cstart + diagIdx[r];
2227:         } else idx[r] = offdiagIdx[r];
2228:       }
2229:     } else {
2230:       a[r]   = offdiagA[r];
2231:       if (idx) idx[r] = offdiagIdx[r];
2232:     }
2233:   }
2234:   MatSeqAIJRestoreArrayRead(B,&bav);
2235:   VecRestoreArrayWrite(v, &a);
2236:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2237:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2238:   VecDestroy(&diagV);
2239:   VecDestroy(&offdiagV);
2240:   PetscFree2(diagIdx, offdiagIdx);
2241:   return 0;
2242: }

2244: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2245: {
2246:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*) A->data;
2247:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2248:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2249:   PetscInt          *cmap  = mat->garray;
2250:   PetscInt          *diagIdx, *offdiagIdx;
2251:   Vec               diagV, offdiagV;
2252:   PetscScalar       *a, *diagA, *offdiagA;
2253:   const PetscScalar *ba,*bav;
2254:   PetscInt          r,j,col,ncols,*bi,*bj;
2255:   Mat               B = mat->B;
2256:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2258:   /* When a process holds entire A and other processes have no entry */
2259:   if (A->cmap->N == n) {
2260:     VecGetArrayWrite(v,&diagA);
2261:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2262:     MatGetRowMin(mat->A,diagV,idx);
2263:     VecDestroy(&diagV);
2264:     VecRestoreArrayWrite(v,&diagA);
2265:     return 0;
2266:   } else if (n == 0) {
2267:     if (m) {
2268:       VecGetArrayWrite(v,&a);
2269:       for (r = 0; r < m; r++) {a[r] = PETSC_MAX_REAL; if (idx) idx[r] = -1;}
2270:       VecRestoreArrayWrite(v,&a);
2271:     }
2272:     return 0;
2273:   }

2275:   PetscCalloc2(m,&diagIdx,m,&offdiagIdx);
2276:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2277:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2278:   MatGetRowMin(mat->A, diagV, diagIdx);

2280:   /* Get offdiagIdx[] for implicit 0.0 */
2281:   MatSeqAIJGetArrayRead(B,&bav);
2282:   ba   = bav;
2283:   bi   = b->i;
2284:   bj   = b->j;
2285:   VecGetArrayWrite(offdiagV, &offdiagA);
2286:   for (r = 0; r < m; r++) {
2287:     ncols = bi[r+1] - bi[r];
2288:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2289:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2290:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2291:       offdiagA[r] = 0.0;

2293:       /* Find first hole in the cmap */
2294:       for (j=0; j<ncols; j++) {
2295:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2296:         if (col > j && j < cstart) {
2297:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2298:           break;
2299:         } else if (col > j + n && j >= cstart) {
2300:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2301:           break;
2302:         }
2303:       }
2304:       if (j == ncols && ncols < A->cmap->N - n) {
2305:         /* a hole is outside compressed Bcols */
2306:         if (ncols == 0) {
2307:           if (cstart) {
2308:             offdiagIdx[r] = 0;
2309:           } else offdiagIdx[r] = cend;
2310:         } else { /* ncols > 0 */
2311:           offdiagIdx[r] = cmap[ncols-1] + 1;
2312:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2313:         }
2314:       }
2315:     }

2317:     for (j=0; j<ncols; j++) {
2318:       if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2319:       ba++; bj++;
2320:     }
2321:   }

2323:   VecGetArrayWrite(v, &a);
2324:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2325:   for (r = 0; r < m; ++r) {
2326:     if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) {
2327:       a[r]   = diagA[r];
2328:       if (idx) idx[r] = cstart + diagIdx[r];
2329:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2330:       a[r] = diagA[r];
2331:       if (idx) {
2332:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2333:           idx[r] = cstart + diagIdx[r];
2334:         } else idx[r] = offdiagIdx[r];
2335:       }
2336:     } else {
2337:       a[r]   = offdiagA[r];
2338:       if (idx) idx[r] = offdiagIdx[r];
2339:     }
2340:   }
2341:   MatSeqAIJRestoreArrayRead(B,&bav);
2342:   VecRestoreArrayWrite(v, &a);
2343:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2344:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2345:   VecDestroy(&diagV);
2346:   VecDestroy(&offdiagV);
2347:   PetscFree2(diagIdx, offdiagIdx);
2348:   return 0;
2349: }

2351: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2352: {
2353:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*)A->data;
2354:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2355:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2356:   PetscInt          *cmap  = mat->garray;
2357:   PetscInt          *diagIdx, *offdiagIdx;
2358:   Vec               diagV, offdiagV;
2359:   PetscScalar       *a, *diagA, *offdiagA;
2360:   const PetscScalar *ba,*bav;
2361:   PetscInt          r,j,col,ncols,*bi,*bj;
2362:   Mat               B = mat->B;
2363:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2365:   /* When a process holds entire A and other processes have no entry */
2366:   if (A->cmap->N == n) {
2367:     VecGetArrayWrite(v,&diagA);
2368:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2369:     MatGetRowMax(mat->A,diagV,idx);
2370:     VecDestroy(&diagV);
2371:     VecRestoreArrayWrite(v,&diagA);
2372:     return 0;
2373:   } else if (n == 0) {
2374:     if (m) {
2375:       VecGetArrayWrite(v,&a);
2376:       for (r = 0; r < m; r++) {a[r] = PETSC_MIN_REAL; if (idx) idx[r] = -1;}
2377:       VecRestoreArrayWrite(v,&a);
2378:     }
2379:     return 0;
2380:   }

2382:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2383:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2384:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2385:   MatGetRowMax(mat->A, diagV, diagIdx);

2387:   /* Get offdiagIdx[] for implicit 0.0 */
2388:   MatSeqAIJGetArrayRead(B,&bav);
2389:   ba   = bav;
2390:   bi   = b->i;
2391:   bj   = b->j;
2392:   VecGetArrayWrite(offdiagV, &offdiagA);
2393:   for (r = 0; r < m; r++) {
2394:     ncols = bi[r+1] - bi[r];
2395:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2396:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2397:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2398:       offdiagA[r] = 0.0;

2400:       /* Find first hole in the cmap */
2401:       for (j=0; j<ncols; j++) {
2402:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2403:         if (col > j && j < cstart) {
2404:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2405:           break;
2406:         } else if (col > j + n && j >= cstart) {
2407:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2408:           break;
2409:         }
2410:       }
2411:       if (j == ncols && ncols < A->cmap->N - n) {
2412:         /* a hole is outside compressed Bcols */
2413:         if (ncols == 0) {
2414:           if (cstart) {
2415:             offdiagIdx[r] = 0;
2416:           } else offdiagIdx[r] = cend;
2417:         } else { /* ncols > 0 */
2418:           offdiagIdx[r] = cmap[ncols-1] + 1;
2419:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2420:         }
2421:       }
2422:     }

2424:     for (j=0; j<ncols; j++) {
2425:       if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2426:       ba++; bj++;
2427:     }
2428:   }

2430:   VecGetArrayWrite(v,    &a);
2431:   VecGetArrayRead(diagV,(const PetscScalar**)&diagA);
2432:   for (r = 0; r < m; ++r) {
2433:     if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) {
2434:       a[r] = diagA[r];
2435:       if (idx) idx[r] = cstart + diagIdx[r];
2436:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2437:       a[r] = diagA[r];
2438:       if (idx) {
2439:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2440:           idx[r] = cstart + diagIdx[r];
2441:         } else idx[r] = offdiagIdx[r];
2442:       }
2443:     } else {
2444:       a[r] = offdiagA[r];
2445:       if (idx) idx[r] = offdiagIdx[r];
2446:     }
2447:   }
2448:   MatSeqAIJRestoreArrayRead(B,&bav);
2449:   VecRestoreArrayWrite(v,       &a);
2450:   VecRestoreArrayRead(diagV,   (const PetscScalar**)&diagA);
2451:   VecRestoreArrayWrite(offdiagV,&offdiagA);
2452:   VecDestroy(&diagV);
2453:   VecDestroy(&offdiagV);
2454:   PetscFree2(diagIdx, offdiagIdx);
2455:   return 0;
2456: }

2458: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2459: {
2460:   Mat            *dummy;

2462:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2463:   *newmat = *dummy;
2464:   PetscFree(dummy);
2465:   return 0;
2466: }

2468: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2469: {
2470:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2472:   MatInvertBlockDiagonal(a->A,values);
2473:   A->factorerrortype = a->A->factorerrortype;
2474:   return 0;
2475: }

2477: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2478: {
2479:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2482:   MatSetRandom(aij->A,rctx);
2483:   if (x->assembled) {
2484:     MatSetRandom(aij->B,rctx);
2485:   } else {
2486:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2487:   }
2488:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2489:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2490:   return 0;
2491: }

2493: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2494: {
2495:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2496:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2497:   return 0;
2498: }

2500: /*@
2501:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2503:    Collective on Mat

2505:    Input Parameters:
2506: +    A - the matrix
2507: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2509:  Level: advanced

2511: @*/
2512: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2513: {
2514:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2515:   return 0;
2516: }

2518: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2519: {
2520:   PetscBool            sc = PETSC_FALSE,flg;

2522:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2523:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2524:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2525:   if (flg) {
2526:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2527:   }
2528:   PetscOptionsTail();
2529:   return 0;
2530: }

2532: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2533: {
2534:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2535:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2537:   if (!Y->preallocated) {
2538:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2539:   } else if (!aij->nz) {
2540:     PetscInt nonew = aij->nonew;
2541:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2542:     aij->nonew = nonew;
2543:   }
2544:   MatShift_Basic(Y,a);
2545:   return 0;
2546: }

2548: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2549: {
2550:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2553:   MatMissingDiagonal(a->A,missing,d);
2554:   if (d) {
2555:     PetscInt rstart;
2556:     MatGetOwnershipRange(A,&rstart,NULL);
2557:     *d += rstart;

2559:   }
2560:   return 0;
2561: }

2563: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2564: {
2565:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2567:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2568:   return 0;
2569: }

2571: /* -------------------------------------------------------------------*/
2572: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2573:                                        MatGetRow_MPIAIJ,
2574:                                        MatRestoreRow_MPIAIJ,
2575:                                        MatMult_MPIAIJ,
2576:                                 /* 4*/ MatMultAdd_MPIAIJ,
2577:                                        MatMultTranspose_MPIAIJ,
2578:                                        MatMultTransposeAdd_MPIAIJ,
2579:                                        NULL,
2580:                                        NULL,
2581:                                        NULL,
2582:                                 /*10*/ NULL,
2583:                                        NULL,
2584:                                        NULL,
2585:                                        MatSOR_MPIAIJ,
2586:                                        MatTranspose_MPIAIJ,
2587:                                 /*15*/ MatGetInfo_MPIAIJ,
2588:                                        MatEqual_MPIAIJ,
2589:                                        MatGetDiagonal_MPIAIJ,
2590:                                        MatDiagonalScale_MPIAIJ,
2591:                                        MatNorm_MPIAIJ,
2592:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2593:                                        MatAssemblyEnd_MPIAIJ,
2594:                                        MatSetOption_MPIAIJ,
2595:                                        MatZeroEntries_MPIAIJ,
2596:                                 /*24*/ MatZeroRows_MPIAIJ,
2597:                                        NULL,
2598:                                        NULL,
2599:                                        NULL,
2600:                                        NULL,
2601:                                 /*29*/ MatSetUp_MPIAIJ,
2602:                                        NULL,
2603:                                        NULL,
2604:                                        MatGetDiagonalBlock_MPIAIJ,
2605:                                        NULL,
2606:                                 /*34*/ MatDuplicate_MPIAIJ,
2607:                                        NULL,
2608:                                        NULL,
2609:                                        NULL,
2610:                                        NULL,
2611:                                 /*39*/ MatAXPY_MPIAIJ,
2612:                                        MatCreateSubMatrices_MPIAIJ,
2613:                                        MatIncreaseOverlap_MPIAIJ,
2614:                                        MatGetValues_MPIAIJ,
2615:                                        MatCopy_MPIAIJ,
2616:                                 /*44*/ MatGetRowMax_MPIAIJ,
2617:                                        MatScale_MPIAIJ,
2618:                                        MatShift_MPIAIJ,
2619:                                        MatDiagonalSet_MPIAIJ,
2620:                                        MatZeroRowsColumns_MPIAIJ,
2621:                                 /*49*/ MatSetRandom_MPIAIJ,
2622:                                        NULL,
2623:                                        NULL,
2624:                                        NULL,
2625:                                        NULL,
2626:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2627:                                        NULL,
2628:                                        MatSetUnfactored_MPIAIJ,
2629:                                        MatPermute_MPIAIJ,
2630:                                        NULL,
2631:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2632:                                        MatDestroy_MPIAIJ,
2633:                                        MatView_MPIAIJ,
2634:                                        NULL,
2635:                                        NULL,
2636:                                 /*64*/ NULL,
2637:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2638:                                        NULL,
2639:                                        NULL,
2640:                                        NULL,
2641:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2642:                                        MatGetRowMinAbs_MPIAIJ,
2643:                                        NULL,
2644:                                        NULL,
2645:                                        NULL,
2646:                                        NULL,
2647:                                 /*75*/ MatFDColoringApply_AIJ,
2648:                                        MatSetFromOptions_MPIAIJ,
2649:                                        NULL,
2650:                                        NULL,
2651:                                        MatFindZeroDiagonals_MPIAIJ,
2652:                                 /*80*/ NULL,
2653:                                        NULL,
2654:                                        NULL,
2655:                                 /*83*/ MatLoad_MPIAIJ,
2656:                                        MatIsSymmetric_MPIAIJ,
2657:                                        NULL,
2658:                                        NULL,
2659:                                        NULL,
2660:                                        NULL,
2661:                                 /*89*/ NULL,
2662:                                        NULL,
2663:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2664:                                        NULL,
2665:                                        NULL,
2666:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2667:                                        NULL,
2668:                                        NULL,
2669:                                        NULL,
2670:                                        MatBindToCPU_MPIAIJ,
2671:                                 /*99*/ MatProductSetFromOptions_MPIAIJ,
2672:                                        NULL,
2673:                                        NULL,
2674:                                        MatConjugate_MPIAIJ,
2675:                                        NULL,
2676:                                 /*104*/MatSetValuesRow_MPIAIJ,
2677:                                        MatRealPart_MPIAIJ,
2678:                                        MatImaginaryPart_MPIAIJ,
2679:                                        NULL,
2680:                                        NULL,
2681:                                 /*109*/NULL,
2682:                                        NULL,
2683:                                        MatGetRowMin_MPIAIJ,
2684:                                        NULL,
2685:                                        MatMissingDiagonal_MPIAIJ,
2686:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2687:                                        NULL,
2688:                                        MatGetGhosts_MPIAIJ,
2689:                                        NULL,
2690:                                        NULL,
2691:                                 /*119*/MatMultDiagonalBlock_MPIAIJ,
2692:                                        NULL,
2693:                                        NULL,
2694:                                        NULL,
2695:                                        MatGetMultiProcBlock_MPIAIJ,
2696:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2697:                                        MatGetColumnReductions_MPIAIJ,
2698:                                        MatInvertBlockDiagonal_MPIAIJ,
2699:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2700:                                        MatCreateSubMatricesMPI_MPIAIJ,
2701:                                 /*129*/NULL,
2702:                                        NULL,
2703:                                        NULL,
2704:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2705:                                        NULL,
2706:                                 /*134*/NULL,
2707:                                        NULL,
2708:                                        NULL,
2709:                                        NULL,
2710:                                        NULL,
2711:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2712:                                        NULL,
2713:                                        NULL,
2714:                                        MatFDColoringSetUp_MPIXAIJ,
2715:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2716:                                        MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2717:                                 /*145*/NULL,
2718:                                        NULL,
2719:                                        NULL
2720: };

2722: /* ----------------------------------------------------------------------------------------*/

2724: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2725: {
2726:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2728:   MatStoreValues(aij->A);
2729:   MatStoreValues(aij->B);
2730:   return 0;
2731: }

2733: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2734: {
2735:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2737:   MatRetrieveValues(aij->A);
2738:   MatRetrieveValues(aij->B);
2739:   return 0;
2740: }

2742: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2743: {
2744:   Mat_MPIAIJ     *b;
2745:   PetscMPIInt    size;

2747:   PetscLayoutSetUp(B->rmap);
2748:   PetscLayoutSetUp(B->cmap);
2749:   b = (Mat_MPIAIJ*)B->data;

2751: #if defined(PETSC_USE_CTABLE)
2752:   PetscTableDestroy(&b->colmap);
2753: #else
2754:   PetscFree(b->colmap);
2755: #endif
2756:   PetscFree(b->garray);
2757:   VecDestroy(&b->lvec);
2758:   VecScatterDestroy(&b->Mvctx);

2760:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2761:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2762:   MatDestroy(&b->B);
2763:   MatCreate(PETSC_COMM_SELF,&b->B);
2764:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2765:   MatSetBlockSizesFromMats(b->B,B,B);
2766:   MatSetType(b->B,MATSEQAIJ);
2767:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2769:   if (!B->preallocated) {
2770:     MatCreate(PETSC_COMM_SELF,&b->A);
2771:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2772:     MatSetBlockSizesFromMats(b->A,B,B);
2773:     MatSetType(b->A,MATSEQAIJ);
2774:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2775:   }

2777:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2778:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2779:   B->preallocated  = PETSC_TRUE;
2780:   B->was_assembled = PETSC_FALSE;
2781:   B->assembled     = PETSC_FALSE;
2782:   return 0;
2783: }

2785: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2786: {
2787:   Mat_MPIAIJ     *b;

2790:   PetscLayoutSetUp(B->rmap);
2791:   PetscLayoutSetUp(B->cmap);
2792:   b = (Mat_MPIAIJ*)B->data;

2794: #if defined(PETSC_USE_CTABLE)
2795:   PetscTableDestroy(&b->colmap);
2796: #else
2797:   PetscFree(b->colmap);
2798: #endif
2799:   PetscFree(b->garray);
2800:   VecDestroy(&b->lvec);
2801:   VecScatterDestroy(&b->Mvctx);

2803:   MatResetPreallocation(b->A);
2804:   MatResetPreallocation(b->B);
2805:   B->preallocated  = PETSC_TRUE;
2806:   B->was_assembled = PETSC_FALSE;
2807:   B->assembled = PETSC_FALSE;
2808:   return 0;
2809: }

2811: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2812: {
2813:   Mat            mat;
2814:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2816:   *newmat = NULL;
2817:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2818:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2819:   MatSetBlockSizesFromMats(mat,matin,matin);
2820:   MatSetType(mat,((PetscObject)matin)->type_name);
2821:   a       = (Mat_MPIAIJ*)mat->data;

2823:   mat->factortype   = matin->factortype;
2824:   mat->assembled    = matin->assembled;
2825:   mat->insertmode   = NOT_SET_VALUES;
2826:   mat->preallocated = matin->preallocated;

2828:   a->size         = oldmat->size;
2829:   a->rank         = oldmat->rank;
2830:   a->donotstash   = oldmat->donotstash;
2831:   a->roworiented  = oldmat->roworiented;
2832:   a->rowindices   = NULL;
2833:   a->rowvalues    = NULL;
2834:   a->getrowactive = PETSC_FALSE;

2836:   PetscLayoutReference(matin->rmap,&mat->rmap);
2837:   PetscLayoutReference(matin->cmap,&mat->cmap);

2839:   if (oldmat->colmap) {
2840: #if defined(PETSC_USE_CTABLE)
2841:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2842: #else
2843:     PetscMalloc1(mat->cmap->N,&a->colmap);
2844:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2845:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2846: #endif
2847:   } else a->colmap = NULL;
2848:   if (oldmat->garray) {
2849:     PetscInt len;
2850:     len  = oldmat->B->cmap->n;
2851:     PetscMalloc1(len+1,&a->garray);
2852:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2853:     if (len) PetscArraycpy(a->garray,oldmat->garray,len);
2854:   } else a->garray = NULL;

2856:   /* It may happen MatDuplicate is called with a non-assembled matrix
2857:      In fact, MatDuplicate only requires the matrix to be preallocated
2858:      This may happen inside a DMCreateMatrix_Shell */
2859:   if (oldmat->lvec) {
2860:     VecDuplicate(oldmat->lvec,&a->lvec);
2861:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2862:   }
2863:   if (oldmat->Mvctx) {
2864:     VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2865:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2866:   }
2867:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2868:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2869:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2870:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2871:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2872:   *newmat = mat;
2873:   return 0;
2874: }

2876: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2877: {
2878:   PetscBool      isbinary, ishdf5;

2882:   /* force binary viewer to load .info file if it has not yet done so */
2883:   PetscViewerSetUp(viewer);
2884:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2885:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2886:   if (isbinary) {
2887:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2888:   } else if (ishdf5) {
2889: #if defined(PETSC_HAVE_HDF5)
2890:     MatLoad_AIJ_HDF5(newMat,viewer);
2891: #else
2892:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2893: #endif
2894:   } else {
2895:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2896:   }
2897:   return 0;
2898: }

2900: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
2901: {
2902:   PetscInt       header[4],M,N,m,nz,rows,cols,sum,i;
2903:   PetscInt       *rowidxs,*colidxs;
2904:   PetscScalar    *matvals;

2906:   PetscViewerSetUp(viewer);

2908:   /* read in matrix header */
2909:   PetscViewerBinaryRead(viewer,header,4,NULL,PETSC_INT);
2911:   M  = header[1]; N = header[2]; nz = header[3];

2916:   /* set block sizes from the viewer's .info file */
2917:   MatLoad_Binary_BlockSizes(mat,viewer);
2918:   /* set global sizes if not set already */
2919:   if (mat->rmap->N < 0) mat->rmap->N = M;
2920:   if (mat->cmap->N < 0) mat->cmap->N = N;
2921:   PetscLayoutSetUp(mat->rmap);
2922:   PetscLayoutSetUp(mat->cmap);

2924:   /* check if the matrix sizes are correct */
2925:   MatGetSize(mat,&rows,&cols);

2928:   /* read in row lengths and build row indices */
2929:   MatGetLocalSize(mat,&m,NULL);
2930:   PetscMalloc1(m+1,&rowidxs);
2931:   PetscViewerBinaryReadAll(viewer,rowidxs+1,m,PETSC_DECIDE,M,PETSC_INT);
2932:   rowidxs[0] = 0; for (i=0; i<m; i++) rowidxs[i+1] += rowidxs[i];
2933:   MPIU_Allreduce(&rowidxs[m],&sum,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)viewer));
2935:   /* read in column indices and matrix values */
2936:   PetscMalloc2(rowidxs[m],&colidxs,rowidxs[m],&matvals);
2937:   PetscViewerBinaryReadAll(viewer,colidxs,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
2938:   PetscViewerBinaryReadAll(viewer,matvals,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
2939:   /* store matrix indices and values */
2940:   MatMPIAIJSetPreallocationCSR(mat,rowidxs,colidxs,matvals);
2941:   PetscFree(rowidxs);
2942:   PetscFree2(colidxs,matvals);
2943:   return 0;
2944: }

2946: /* Not scalable because of ISAllGather() unless getting all columns. */
2947: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
2948: {
2949:   IS             iscol_local;
2950:   PetscBool      isstride;
2951:   PetscMPIInt    lisstride=0,gisstride;

2953:   /* check if we are grabbing all columns*/
2954:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

2956:   if (isstride) {
2957:     PetscInt  start,len,mstart,mlen;
2958:     ISStrideGetInfo(iscol,&start,NULL);
2959:     ISGetLocalSize(iscol,&len);
2960:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
2961:     if (mstart == start && mlen-mstart == len) lisstride = 1;
2962:   }

2964:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
2965:   if (gisstride) {
2966:     PetscInt N;
2967:     MatGetSize(mat,NULL,&N);
2968:     ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol_local);
2969:     ISSetIdentity(iscol_local);
2970:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
2971:   } else {
2972:     PetscInt cbs;
2973:     ISGetBlockSize(iscol,&cbs);
2974:     ISAllGather(iscol,&iscol_local);
2975:     ISSetBlockSize(iscol_local,cbs);
2976:   }

2978:   *isseq = iscol_local;
2979:   return 0;
2980: }

2982: /*
2983:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
2984:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

2986:  Input Parameters:
2987:    mat - matrix
2988:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
2989:            i.e., mat->rstart <= isrow[i] < mat->rend
2990:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
2991:            i.e., mat->cstart <= iscol[i] < mat->cend
2992:  Output Parameter:
2993:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
2994:    iscol_o - sequential column index set for retrieving mat->B
2995:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
2996:  */
2997: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
2998: {
2999:   Vec            x,cmap;
3000:   const PetscInt *is_idx;
3001:   PetscScalar    *xarray,*cmaparray;
3002:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3003:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3004:   Mat            B=a->B;
3005:   Vec            lvec=a->lvec,lcmap;
3006:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3007:   MPI_Comm       comm;
3008:   VecScatter     Mvctx=a->Mvctx;

3010:   PetscObjectGetComm((PetscObject)mat,&comm);
3011:   ISGetLocalSize(iscol,&ncols);

3013:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3014:   MatCreateVecs(mat,&x,NULL);
3015:   VecSet(x,-1.0);
3016:   VecDuplicate(x,&cmap);
3017:   VecSet(cmap,-1.0);

3019:   /* Get start indices */
3020:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3021:   isstart -= ncols;
3022:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3024:   ISGetIndices(iscol,&is_idx);
3025:   VecGetArray(x,&xarray);
3026:   VecGetArray(cmap,&cmaparray);
3027:   PetscMalloc1(ncols,&idx);
3028:   for (i=0; i<ncols; i++) {
3029:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3030:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3031:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3032:   }
3033:   VecRestoreArray(x,&xarray);
3034:   VecRestoreArray(cmap,&cmaparray);
3035:   ISRestoreIndices(iscol,&is_idx);

3037:   /* Get iscol_d */
3038:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3039:   ISGetBlockSize(iscol,&i);
3040:   ISSetBlockSize(*iscol_d,i);

3042:   /* Get isrow_d */
3043:   ISGetLocalSize(isrow,&m);
3044:   rstart = mat->rmap->rstart;
3045:   PetscMalloc1(m,&idx);
3046:   ISGetIndices(isrow,&is_idx);
3047:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3048:   ISRestoreIndices(isrow,&is_idx);

3050:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3051:   ISGetBlockSize(isrow,&i);
3052:   ISSetBlockSize(*isrow_d,i);

3054:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3055:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3056:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3058:   VecDuplicate(lvec,&lcmap);

3060:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3061:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3063:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3064:   /* off-process column indices */
3065:   count = 0;
3066:   PetscMalloc1(Bn,&idx);
3067:   PetscMalloc1(Bn,&cmap1);

3069:   VecGetArray(lvec,&xarray);
3070:   VecGetArray(lcmap,&cmaparray);
3071:   for (i=0; i<Bn; i++) {
3072:     if (PetscRealPart(xarray[i]) > -1.0) {
3073:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3074:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3075:       count++;
3076:     }
3077:   }
3078:   VecRestoreArray(lvec,&xarray);
3079:   VecRestoreArray(lcmap,&cmaparray);

3081:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3082:   /* cannot ensure iscol_o has same blocksize as iscol! */

3084:   PetscFree(idx);
3085:   *garray = cmap1;

3087:   VecDestroy(&x);
3088:   VecDestroy(&cmap);
3089:   VecDestroy(&lcmap);
3090:   return 0;
3091: }

3093: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3094: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3095: {
3096:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3097:   Mat            M = NULL;
3098:   MPI_Comm       comm;
3099:   IS             iscol_d,isrow_d,iscol_o;
3100:   Mat            Asub = NULL,Bsub = NULL;
3101:   PetscInt       n;

3103:   PetscObjectGetComm((PetscObject)mat,&comm);

3105:   if (call == MAT_REUSE_MATRIX) {
3106:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3107:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);

3110:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);

3113:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);

3116:     /* Update diagonal and off-diagonal portions of submat */
3117:     asub = (Mat_MPIAIJ*)(*submat)->data;
3118:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3119:     ISGetLocalSize(iscol_o,&n);
3120:     if (n) {
3121:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3122:     }
3123:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3124:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3126:   } else { /* call == MAT_INITIAL_MATRIX) */
3127:     const PetscInt *garray;
3128:     PetscInt        BsubN;

3130:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3131:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3133:     /* Create local submatrices Asub and Bsub */
3134:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3135:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3137:     /* Create submatrix M */
3138:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3140:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3141:     asub = (Mat_MPIAIJ*)M->data;

3143:     ISGetLocalSize(iscol_o,&BsubN);
3144:     n = asub->B->cmap->N;
3145:     if (BsubN > n) {
3146:       /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3147:       const PetscInt *idx;
3148:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3149:       PetscInfo(M,"submatrix Bn %" PetscInt_FMT " != BsubN %" PetscInt_FMT ", update iscol_o\n",n,BsubN);

3151:       PetscMalloc1(n,&idx_new);
3152:       j = 0;
3153:       ISGetIndices(iscol_o,&idx);
3154:       for (i=0; i<n; i++) {
3155:         if (j >= BsubN) break;
3156:         while (subgarray[i] > garray[j]) j++;

3158:         if (subgarray[i] == garray[j]) {
3159:           idx_new[i] = idx[j++];
3160:         } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%" PetscInt_FMT "]=%" PetscInt_FMT " cannot < garray[%" PetscInt_FMT "]=%" PetscInt_FMT,i,subgarray[i],j,garray[j]);
3161:       }
3162:       ISRestoreIndices(iscol_o,&idx);

3164:       ISDestroy(&iscol_o);
3165:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3167:     } else if (BsubN < n) {
3168:       SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub (%" PetscInt_FMT ") cannot be smaller than B's (%" PetscInt_FMT ")",BsubN,asub->B->cmap->N);
3169:     }

3171:     PetscFree(garray);
3172:     *submat = M;

3174:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3175:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3176:     ISDestroy(&isrow_d);

3178:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3179:     ISDestroy(&iscol_d);

3181:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3182:     ISDestroy(&iscol_o);
3183:   }
3184:   return 0;
3185: }

3187: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3188: {
3189:   IS             iscol_local=NULL,isrow_d;
3190:   PetscInt       csize;
3191:   PetscInt       n,i,j,start,end;
3192:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3193:   MPI_Comm       comm;

3195:   /* If isrow has same processor distribution as mat,
3196:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3197:   if (call == MAT_REUSE_MATRIX) {
3198:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3199:     if (isrow_d) {
3200:       sameRowDist  = PETSC_TRUE;
3201:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3202:     } else {
3203:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3204:       if (iscol_local) {
3205:         sameRowDist  = PETSC_TRUE;
3206:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3207:       }
3208:     }
3209:   } else {
3210:     /* Check if isrow has same processor distribution as mat */
3211:     sameDist[0] = PETSC_FALSE;
3212:     ISGetLocalSize(isrow,&n);
3213:     if (!n) {
3214:       sameDist[0] = PETSC_TRUE;
3215:     } else {
3216:       ISGetMinMax(isrow,&i,&j);
3217:       MatGetOwnershipRange(mat,&start,&end);
3218:       if (i >= start && j < end) {
3219:         sameDist[0] = PETSC_TRUE;
3220:       }
3221:     }

3223:     /* Check if iscol has same processor distribution as mat */
3224:     sameDist[1] = PETSC_FALSE;
3225:     ISGetLocalSize(iscol,&n);
3226:     if (!n) {
3227:       sameDist[1] = PETSC_TRUE;
3228:     } else {
3229:       ISGetMinMax(iscol,&i,&j);
3230:       MatGetOwnershipRangeColumn(mat,&start,&end);
3231:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3232:     }

3234:     PetscObjectGetComm((PetscObject)mat,&comm);
3235:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3236:     sameRowDist = tsameDist[0];
3237:   }

3239:   if (sameRowDist) {
3240:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3241:       /* isrow and iscol have same processor distribution as mat */
3242:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3243:       return 0;
3244:     } else { /* sameRowDist */
3245:       /* isrow has same processor distribution as mat */
3246:       if (call == MAT_INITIAL_MATRIX) {
3247:         PetscBool sorted;
3248:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3249:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3250:         ISGetSize(iscol,&i);

3253:         ISSorted(iscol_local,&sorted);
3254:         if (sorted) {
3255:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3256:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3257:           return 0;
3258:         }
3259:       } else { /* call == MAT_REUSE_MATRIX */
3260:         IS iscol_sub;
3261:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3262:         if (iscol_sub) {
3263:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3264:           return 0;
3265:         }
3266:       }
3267:     }
3268:   }

3270:   /* General case: iscol -> iscol_local which has global size of iscol */
3271:   if (call == MAT_REUSE_MATRIX) {
3272:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3274:   } else {
3275:     if (!iscol_local) {
3276:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3277:     }
3278:   }

3280:   ISGetLocalSize(iscol,&csize);
3281:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3283:   if (call == MAT_INITIAL_MATRIX) {
3284:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3285:     ISDestroy(&iscol_local);
3286:   }
3287:   return 0;
3288: }

3290: /*@C
3291:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3292:          and "off-diagonal" part of the matrix in CSR format.

3294:    Collective

3296:    Input Parameters:
3297: +  comm - MPI communicator
3298: .  A - "diagonal" portion of matrix
3299: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3300: -  garray - global index of B columns

3302:    Output Parameter:
3303: .   mat - the matrix, with input A as its local diagonal matrix
3304:    Level: advanced

3306:    Notes:
3307:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3308:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3310: .seealso: MatCreateMPIAIJWithSplitArrays()
3311: @*/
3312: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3313: {
3314:   Mat_MPIAIJ        *maij;
3315:   Mat_SeqAIJ        *b=(Mat_SeqAIJ*)B->data,*bnew;
3316:   PetscInt          *oi=b->i,*oj=b->j,i,nz,col;
3317:   const PetscScalar *oa;
3318:   Mat               Bnew;
3319:   PetscInt          m,n,N;

3321:   MatCreate(comm,mat);
3322:   MatGetSize(A,&m,&n);
3325:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */

3328:   /* Get global columns of mat */
3329:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3331:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3332:   MatSetType(*mat,MATMPIAIJ);
3333:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3334:   maij = (Mat_MPIAIJ*)(*mat)->data;

3336:   (*mat)->preallocated = PETSC_TRUE;

3338:   PetscLayoutSetUp((*mat)->rmap);
3339:   PetscLayoutSetUp((*mat)->cmap);

3341:   /* Set A as diagonal portion of *mat */
3342:   maij->A = A;

3344:   nz = oi[m];
3345:   for (i=0; i<nz; i++) {
3346:     col   = oj[i];
3347:     oj[i] = garray[col];
3348:   }

3350:   /* Set Bnew as off-diagonal portion of *mat */
3351:   MatSeqAIJGetArrayRead(B,&oa);
3352:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,(PetscScalar*)oa,&Bnew);
3353:   MatSeqAIJRestoreArrayRead(B,&oa);
3354:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3355:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3356:   maij->B     = Bnew;


3360:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3361:   b->free_a       = PETSC_FALSE;
3362:   b->free_ij      = PETSC_FALSE;
3363:   MatDestroy(&B);

3365:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3366:   bnew->free_a       = PETSC_TRUE;
3367:   bnew->free_ij      = PETSC_TRUE;

3369:   /* condense columns of maij->B */
3370:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3371:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3372:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3373:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3374:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3375:   return 0;
3376: }

3378: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3380: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3381: {
3382:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3383:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3384:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3385:   Mat            M,Msub,B=a->B;
3386:   MatScalar      *aa;
3387:   Mat_SeqAIJ     *aij;
3388:   PetscInt       *garray = a->garray,*colsub,Ncols;
3389:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3390:   IS             iscol_sub,iscmap;
3391:   const PetscInt *is_idx,*cmap;
3392:   PetscBool      allcolumns=PETSC_FALSE;
3393:   MPI_Comm       comm;

3395:   PetscObjectGetComm((PetscObject)mat,&comm);
3396:   if (call == MAT_REUSE_MATRIX) {
3397:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3399:     ISGetLocalSize(iscol_sub,&count);

3401:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);

3404:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);

3407:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3409:   } else { /* call == MAT_INITIAL_MATRIX) */
3410:     PetscBool flg;

3412:     ISGetLocalSize(iscol,&n);
3413:     ISGetSize(iscol,&Ncols);

3415:     /* (1) iscol -> nonscalable iscol_local */
3416:     /* Check for special case: each processor gets entire matrix columns */
3417:     ISIdentity(iscol_local,&flg);
3418:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3419:     MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));
3420:     if (allcolumns) {
3421:       iscol_sub = iscol_local;
3422:       PetscObjectReference((PetscObject)iscol_local);
3423:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3425:     } else {
3426:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3427:       PetscInt *idx,*cmap1,k;
3428:       PetscMalloc1(Ncols,&idx);
3429:       PetscMalloc1(Ncols,&cmap1);
3430:       ISGetIndices(iscol_local,&is_idx);
3431:       count = 0;
3432:       k     = 0;
3433:       for (i=0; i<Ncols; i++) {
3434:         j = is_idx[i];
3435:         if (j >= cstart && j < cend) {
3436:           /* diagonal part of mat */
3437:           idx[count]     = j;
3438:           cmap1[count++] = i; /* column index in submat */
3439:         } else if (Bn) {
3440:           /* off-diagonal part of mat */
3441:           if (j == garray[k]) {
3442:             idx[count]     = j;
3443:             cmap1[count++] = i;  /* column index in submat */
3444:           } else if (j > garray[k]) {
3445:             while (j > garray[k] && k < Bn-1) k++;
3446:             if (j == garray[k]) {
3447:               idx[count]     = j;
3448:               cmap1[count++] = i; /* column index in submat */
3449:             }
3450:           }
3451:         }
3452:       }
3453:       ISRestoreIndices(iscol_local,&is_idx);

3455:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3456:       ISGetBlockSize(iscol,&cbs);
3457:       ISSetBlockSize(iscol_sub,cbs);

3459:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3460:     }

3462:     /* (3) Create sequential Msub */
3463:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3464:   }

3466:   ISGetLocalSize(iscol_sub,&count);
3467:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3468:   ii   = aij->i;
3469:   ISGetIndices(iscmap,&cmap);

3471:   /*
3472:       m - number of local rows
3473:       Ncols - number of columns (same on all processors)
3474:       rstart - first row in new global matrix generated
3475:   */
3476:   MatGetSize(Msub,&m,NULL);

3478:   if (call == MAT_INITIAL_MATRIX) {
3479:     /* (4) Create parallel newmat */
3480:     PetscMPIInt    rank,size;
3481:     PetscInt       csize;

3483:     MPI_Comm_size(comm,&size);
3484:     MPI_Comm_rank(comm,&rank);

3486:     /*
3487:         Determine the number of non-zeros in the diagonal and off-diagonal
3488:         portions of the matrix in order to do correct preallocation
3489:     */

3491:     /* first get start and end of "diagonal" columns */
3492:     ISGetLocalSize(iscol,&csize);
3493:     if (csize == PETSC_DECIDE) {
3494:       ISGetSize(isrow,&mglobal);
3495:       if (mglobal == Ncols) { /* square matrix */
3496:         nlocal = m;
3497:       } else {
3498:         nlocal = Ncols/size + ((Ncols % size) > rank);
3499:       }
3500:     } else {
3501:       nlocal = csize;
3502:     }
3503:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3504:     rstart = rend - nlocal;

3507:     /* next, compute all the lengths */
3508:     jj    = aij->j;
3509:     PetscMalloc1(2*m+1,&dlens);
3510:     olens = dlens + m;
3511:     for (i=0; i<m; i++) {
3512:       jend = ii[i+1] - ii[i];
3513:       olen = 0;
3514:       dlen = 0;
3515:       for (j=0; j<jend; j++) {
3516:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3517:         else dlen++;
3518:         jj++;
3519:       }
3520:       olens[i] = olen;
3521:       dlens[i] = dlen;
3522:     }

3524:     ISGetBlockSize(isrow,&bs);
3525:     ISGetBlockSize(iscol,&cbs);

3527:     MatCreate(comm,&M);
3528:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3529:     MatSetBlockSizes(M,bs,cbs);
3530:     MatSetType(M,((PetscObject)mat)->type_name);
3531:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3532:     PetscFree(dlens);

3534:   } else { /* call == MAT_REUSE_MATRIX */
3535:     M    = *newmat;
3536:     MatGetLocalSize(M,&i,NULL);
3538:     MatZeroEntries(M);
3539:     /*
3540:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3541:        rather than the slower MatSetValues().
3542:     */
3543:     M->was_assembled = PETSC_TRUE;
3544:     M->assembled     = PETSC_FALSE;
3545:   }

3547:   /* (5) Set values of Msub to *newmat */
3548:   PetscMalloc1(count,&colsub);
3549:   MatGetOwnershipRange(M,&rstart,NULL);

3551:   jj   = aij->j;
3552:   MatSeqAIJGetArrayRead(Msub,(const PetscScalar**)&aa);
3553:   for (i=0; i<m; i++) {
3554:     row = rstart + i;
3555:     nz  = ii[i+1] - ii[i];
3556:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3557:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3558:     jj += nz; aa += nz;
3559:   }
3560:   MatSeqAIJRestoreArrayRead(Msub,(const PetscScalar**)&aa);
3561:   ISRestoreIndices(iscmap,&cmap);

3563:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3564:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3566:   PetscFree(colsub);

3568:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3569:   if (call == MAT_INITIAL_MATRIX) {
3570:     *newmat = M;
3571:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3572:     MatDestroy(&Msub);

3574:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3575:     ISDestroy(&iscol_sub);

3577:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3578:     ISDestroy(&iscmap);

3580:     if (iscol_local) {
3581:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3582:       ISDestroy(&iscol_local);
3583:     }
3584:   }
3585:   return 0;
3586: }

3588: /*
3589:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3590:   in local and then by concatenating the local matrices the end result.
3591:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3593:   Note: This requires a sequential iscol with all indices.
3594: */
3595: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3596: {
3597:   PetscMPIInt    rank,size;
3598:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3599:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3600:   Mat            M,Mreuse;
3601:   MatScalar      *aa,*vwork;
3602:   MPI_Comm       comm;
3603:   Mat_SeqAIJ     *aij;
3604:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3606:   PetscObjectGetComm((PetscObject)mat,&comm);
3607:   MPI_Comm_rank(comm,&rank);
3608:   MPI_Comm_size(comm,&size);

3610:   /* Check for special case: each processor gets entire matrix columns */
3611:   ISIdentity(iscol,&colflag);
3612:   ISGetLocalSize(iscol,&n);
3613:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3614:   MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));

3616:   if (call ==  MAT_REUSE_MATRIX) {
3617:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3619:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3620:   } else {
3621:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3622:   }

3624:   /*
3625:       m - number of local rows
3626:       n - number of columns (same on all processors)
3627:       rstart - first row in new global matrix generated
3628:   */
3629:   MatGetSize(Mreuse,&m,&n);
3630:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3631:   if (call == MAT_INITIAL_MATRIX) {
3632:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3633:     ii  = aij->i;
3634:     jj  = aij->j;

3636:     /*
3637:         Determine the number of non-zeros in the diagonal and off-diagonal
3638:         portions of the matrix in order to do correct preallocation
3639:     */

3641:     /* first get start and end of "diagonal" columns */
3642:     if (csize == PETSC_DECIDE) {
3643:       ISGetSize(isrow,&mglobal);
3644:       if (mglobal == n) { /* square matrix */
3645:         nlocal = m;
3646:       } else {
3647:         nlocal = n/size + ((n % size) > rank);
3648:       }
3649:     } else {
3650:       nlocal = csize;
3651:     }
3652:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3653:     rstart = rend - nlocal;

3656:     /* next, compute all the lengths */
3657:     PetscMalloc1(2*m+1,&dlens);
3658:     olens = dlens + m;
3659:     for (i=0; i<m; i++) {
3660:       jend = ii[i+1] - ii[i];
3661:       olen = 0;
3662:       dlen = 0;
3663:       for (j=0; j<jend; j++) {
3664:         if (*jj < rstart || *jj >= rend) olen++;
3665:         else dlen++;
3666:         jj++;
3667:       }
3668:       olens[i] = olen;
3669:       dlens[i] = dlen;
3670:     }
3671:     MatCreate(comm,&M);
3672:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3673:     MatSetBlockSizes(M,bs,cbs);
3674:     MatSetType(M,((PetscObject)mat)->type_name);
3675:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3676:     PetscFree(dlens);
3677:   } else {
3678:     PetscInt ml,nl;

3680:     M    = *newmat;
3681:     MatGetLocalSize(M,&ml,&nl);
3683:     MatZeroEntries(M);
3684:     /*
3685:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3686:        rather than the slower MatSetValues().
3687:     */
3688:     M->was_assembled = PETSC_TRUE;
3689:     M->assembled     = PETSC_FALSE;
3690:   }
3691:   MatGetOwnershipRange(M,&rstart,&rend);
3692:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3693:   ii   = aij->i;
3694:   jj   = aij->j;

3696:   /* trigger copy to CPU if needed */
3697:   MatSeqAIJGetArrayRead(Mreuse,(const PetscScalar**)&aa);
3698:   for (i=0; i<m; i++) {
3699:     row   = rstart + i;
3700:     nz    = ii[i+1] - ii[i];
3701:     cwork = jj; jj += nz;
3702:     vwork = aa; aa += nz;
3703:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3704:   }
3705:   MatSeqAIJRestoreArrayRead(Mreuse,(const PetscScalar**)&aa);

3707:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3708:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3709:   *newmat = M;

3711:   /* save submatrix used in processor for next request */
3712:   if (call ==  MAT_INITIAL_MATRIX) {
3713:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3714:     MatDestroy(&Mreuse);
3715:   }
3716:   return 0;
3717: }

3719: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3720: {
3721:   PetscInt       m,cstart, cend,j,nnz,i,d;
3722:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3723:   const PetscInt *JJ;
3724:   PetscBool      nooffprocentries;


3728:   PetscLayoutSetUp(B->rmap);
3729:   PetscLayoutSetUp(B->cmap);
3730:   m      = B->rmap->n;
3731:   cstart = B->cmap->rstart;
3732:   cend   = B->cmap->rend;
3733:   rstart = B->rmap->rstart;

3735:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3737:   if (PetscDefined(USE_DEBUG)) {
3738:     for (i=0; i<m; i++) {
3739:       nnz = Ii[i+1]- Ii[i];
3740:       JJ  = J + Ii[i];
3744:     }
3745:   }

3747:   for (i=0; i<m; i++) {
3748:     nnz     = Ii[i+1]- Ii[i];
3749:     JJ      = J + Ii[i];
3750:     nnz_max = PetscMax(nnz_max,nnz);
3751:     d       = 0;
3752:     for (j=0; j<nnz; j++) {
3753:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3754:     }
3755:     d_nnz[i] = d;
3756:     o_nnz[i] = nnz - d;
3757:   }
3758:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3759:   PetscFree2(d_nnz,o_nnz);

3761:   for (i=0; i<m; i++) {
3762:     ii   = i + rstart;
3763:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3764:   }
3765:   nooffprocentries    = B->nooffprocentries;
3766:   B->nooffprocentries = PETSC_TRUE;
3767:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3768:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3769:   B->nooffprocentries = nooffprocentries;

3771:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3772:   return 0;
3773: }

3775: /*@
3776:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3777:    (the default parallel PETSc format).

3779:    Collective

3781:    Input Parameters:
3782: +  B - the matrix
3783: .  i - the indices into j for the start of each local row (starts with zero)
3784: .  j - the column indices for each local row (starts with zero)
3785: -  v - optional values in the matrix

3787:    Level: developer

3789:    Notes:
3790:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3791:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
3792:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3794:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3796:        The format which is used for the sparse matrix input, is equivalent to a
3797:     row-major ordering.. i.e for the following matrix, the input data expected is
3798:     as shown

3800: $        1 0 0
3801: $        2 0 3     P0
3802: $       -------
3803: $        4 5 6     P1
3804: $
3805: $     Process0 [P0]: rows_owned=[0,1]
3806: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3807: $        j =  {0,0,2}  [size = 3]
3808: $        v =  {1,2,3}  [size = 3]
3809: $
3810: $     Process1 [P1]: rows_owned=[2]
3811: $        i =  {0,3}    [size = nrow+1  = 1+1]
3812: $        j =  {0,1,2}  [size = 3]
3813: $        v =  {4,5,6}  [size = 3]

3815: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3816:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3817: @*/
3818: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3819: {
3820:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3821:   return 0;
3822: }

3824: /*@C
3825:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3826:    (the default parallel PETSc format).  For good matrix assembly performance
3827:    the user should preallocate the matrix storage by setting the parameters
3828:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3829:    performance can be increased by more than a factor of 50.

3831:    Collective

3833:    Input Parameters:
3834: +  B - the matrix
3835: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3836:            (same value is used for all local rows)
3837: .  d_nnz - array containing the number of nonzeros in the various rows of the
3838:            DIAGONAL portion of the local submatrix (possibly different for each row)
3839:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3840:            The size of this array is equal to the number of local rows, i.e 'm'.
3841:            For matrices that will be factored, you must leave room for (and set)
3842:            the diagonal entry even if it is zero.
3843: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3844:            submatrix (same value is used for all local rows).
3845: -  o_nnz - array containing the number of nonzeros in the various rows of the
3846:            OFF-DIAGONAL portion of the local submatrix (possibly different for
3847:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3848:            structure. The size of this array is equal to the number
3849:            of local rows, i.e 'm'.

3851:    If the *_nnz parameter is given then the *_nz parameter is ignored

3853:    The AIJ format (also called the Yale sparse matrix format or
3854:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3855:    storage.  The stored row and column indices begin with zero.
3856:    See Users-Manual: ch_mat for details.

3858:    The parallel matrix is partitioned such that the first m0 rows belong to
3859:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3860:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

3862:    The DIAGONAL portion of the local submatrix of a processor can be defined
3863:    as the submatrix which is obtained by extraction the part corresponding to
3864:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3865:    first row that belongs to the processor, r2 is the last row belonging to
3866:    the this processor, and c1-c2 is range of indices of the local part of a
3867:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
3868:    common case of a square matrix, the row and column ranges are the same and
3869:    the DIAGONAL part is also square. The remaining portion of the local
3870:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

3872:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

3874:    You can call MatGetInfo() to get information on how effective the preallocation was;
3875:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3876:    You can also run with the option -info and look for messages with the string
3877:    malloc in them to see if additional memory allocation was needed.

3879:    Example usage:

3881:    Consider the following 8x8 matrix with 34 non-zero values, that is
3882:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3883:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3884:    as follows:

3886: .vb
3887:             1  2  0  |  0  3  0  |  0  4
3888:     Proc0   0  5  6  |  7  0  0  |  8  0
3889:             9  0 10  | 11  0  0  | 12  0
3890:     -------------------------------------
3891:            13  0 14  | 15 16 17  |  0  0
3892:     Proc1   0 18  0  | 19 20 21  |  0  0
3893:             0  0  0  | 22 23  0  | 24  0
3894:     -------------------------------------
3895:     Proc2  25 26 27  |  0  0 28  | 29  0
3896:            30  0  0  | 31 32 33  |  0 34
3897: .ve

3899:    This can be represented as a collection of submatrices as:

3901: .vb
3902:       A B C
3903:       D E F
3904:       G H I
3905: .ve

3907:    Where the submatrices A,B,C are owned by proc0, D,E,F are
3908:    owned by proc1, G,H,I are owned by proc2.

3910:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3911:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3912:    The 'M','N' parameters are 8,8, and have the same values on all procs.

3914:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3915:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3916:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3917:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3918:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3919:    matrix, ans [DF] as another SeqAIJ matrix.

3921:    When d_nz, o_nz parameters are specified, d_nz storage elements are
3922:    allocated for every row of the local diagonal submatrix, and o_nz
3923:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3924:    One way to choose d_nz and o_nz is to use the max nonzerors per local
3925:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3926:    In this case, the values of d_nz,o_nz are:
3927: .vb
3928:      proc0 : dnz = 2, o_nz = 2
3929:      proc1 : dnz = 3, o_nz = 2
3930:      proc2 : dnz = 1, o_nz = 4
3931: .ve
3932:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3933:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3934:    for proc3. i.e we are using 12+15+10=37 storage locations to store
3935:    34 values.

3937:    When d_nnz, o_nnz parameters are specified, the storage is specified
3938:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3939:    In the above case the values for d_nnz,o_nnz are:
3940: .vb
3941:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3942:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3943:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3944: .ve
3945:    Here the space allocated is sum of all the above values i.e 34, and
3946:    hence pre-allocation is perfect.

3948:    Level: intermediate

3950: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
3951:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
3952: @*/
3953: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3954: {
3957:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
3958:   return 0;
3959: }

3961: /*@
3962:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3963:          CSR format for the local rows.

3965:    Collective

3967:    Input Parameters:
3968: +  comm - MPI communicator
3969: .  m - number of local rows (Cannot be PETSC_DECIDE)
3970: .  n - This value should be the same as the local size used in creating the
3971:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3972:        calculated if N is given) For square matrices n is almost always m.
3973: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3974: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3975: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
3976: .   j - column indices
3977: -   a - matrix values

3979:    Output Parameter:
3980: .   mat - the matrix

3982:    Level: intermediate

3984:    Notes:
3985:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3986:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3987:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3989:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3991:        The format which is used for the sparse matrix input, is equivalent to a
3992:     row-major ordering.. i.e for the following matrix, the input data expected is
3993:     as shown

3995:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

3997: $        1 0 0
3998: $        2 0 3     P0
3999: $       -------
4000: $        4 5 6     P1
4001: $
4002: $     Process0 [P0]: rows_owned=[0,1]
4003: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4004: $        j =  {0,0,2}  [size = 3]
4005: $        v =  {1,2,3}  [size = 3]
4006: $
4007: $     Process1 [P1]: rows_owned=[2]
4008: $        i =  {0,3}    [size = nrow+1  = 1+1]
4009: $        j =  {0,1,2}  [size = 3]
4010: $        v =  {4,5,6}  [size = 3]

4012: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4013:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4014: @*/
4015: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4016: {
4019:   MatCreate(comm,mat);
4020:   MatSetSizes(*mat,m,n,M,N);
4021:   /* MatSetBlockSizes(M,bs,cbs); */
4022:   MatSetType(*mat,MATMPIAIJ);
4023:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4024:   return 0;
4025: }

4027: /*@
4028:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4029:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4031:    Collective

4033:    Input Parameters:
4034: +  mat - the matrix
4035: .  m - number of local rows (Cannot be PETSC_DECIDE)
4036: .  n - This value should be the same as the local size used in creating the
4037:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4038:        calculated if N is given) For square matrices n is almost always m.
4039: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4040: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4041: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4042: .  J - column indices
4043: -  v - matrix values

4045:    Level: intermediate

4047: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4048:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4049: @*/
4050: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4051: {
4052:   PetscInt       cstart,nnz,i,j;
4053:   PetscInt       *ld;
4054:   PetscBool      nooffprocentries;
4055:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4056:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data;
4057:   PetscScalar    *ad,*ao;
4058:   const PetscInt *Adi = Ad->i;
4059:   PetscInt       ldi,Iii,md;


4066:   MatSeqAIJGetArrayWrite(Aij->A,&ad);
4067:   MatSeqAIJGetArrayWrite(Aij->B,&ao);
4068:   cstart = mat->cmap->rstart;
4069:   if (!Aij->ld) {
4070:     /* count number of entries below block diagonal */
4071:     PetscCalloc1(m,&ld);
4072:     Aij->ld = ld;
4073:     for (i=0; i<m; i++) {
4074:       nnz  = Ii[i+1]- Ii[i];
4075:       j     = 0;
4076:       while  (J[j] < cstart && j < nnz) {j++;}
4077:       J    += nnz;
4078:       ld[i] = j;
4079:     }
4080:   } else {
4081:     ld = Aij->ld;
4082:   }

4084:   for (i=0; i<m; i++) {
4085:     nnz  = Ii[i+1]- Ii[i];
4086:     Iii  = Ii[i];
4087:     ldi  = ld[i];
4088:     md   = Adi[i+1]-Adi[i];
4089:     PetscArraycpy(ao,v + Iii,ldi);
4090:     PetscArraycpy(ad,v + Iii + ldi,md);
4091:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4092:     ad  += md;
4093:     ao  += nnz - md;
4094:   }
4095:   nooffprocentries      = mat->nooffprocentries;
4096:   mat->nooffprocentries = PETSC_TRUE;
4097:   MatSeqAIJRestoreArrayWrite(Aij->A,&ad);
4098:   MatSeqAIJRestoreArrayWrite(Aij->B,&ao);
4099:   PetscObjectStateIncrease((PetscObject)Aij->A);
4100:   PetscObjectStateIncrease((PetscObject)Aij->B);
4101:   PetscObjectStateIncrease((PetscObject)mat);
4102:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4103:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4104:   mat->nooffprocentries = nooffprocentries;
4105:   return 0;
4106: }

4108: /*@C
4109:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4110:    (the default parallel PETSc format).  For good matrix assembly performance
4111:    the user should preallocate the matrix storage by setting the parameters
4112:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4113:    performance can be increased by more than a factor of 50.

4115:    Collective

4117:    Input Parameters:
4118: +  comm - MPI communicator
4119: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4120:            This value should be the same as the local size used in creating the
4121:            y vector for the matrix-vector product y = Ax.
4122: .  n - This value should be the same as the local size used in creating the
4123:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4124:        calculated if N is given) For square matrices n is almost always m.
4125: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4126: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4127: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4128:            (same value is used for all local rows)
4129: .  d_nnz - array containing the number of nonzeros in the various rows of the
4130:            DIAGONAL portion of the local submatrix (possibly different for each row)
4131:            or NULL, if d_nz is used to specify the nonzero structure.
4132:            The size of this array is equal to the number of local rows, i.e 'm'.
4133: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4134:            submatrix (same value is used for all local rows).
4135: -  o_nnz - array containing the number of nonzeros in the various rows of the
4136:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4137:            each row) or NULL, if o_nz is used to specify the nonzero
4138:            structure. The size of this array is equal to the number
4139:            of local rows, i.e 'm'.

4141:    Output Parameter:
4142: .  A - the matrix

4144:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4145:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4146:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4148:    Notes:
4149:    If the *_nnz parameter is given then the *_nz parameter is ignored

4151:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4152:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4153:    storage requirements for this matrix.

4155:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4156:    processor than it must be used on all processors that share the object for
4157:    that argument.

4159:    The user MUST specify either the local or global matrix dimensions
4160:    (possibly both).

4162:    The parallel matrix is partitioned across processors such that the
4163:    first m0 rows belong to process 0, the next m1 rows belong to
4164:    process 1, the next m2 rows belong to process 2 etc.. where
4165:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4166:    values corresponding to [m x N] submatrix.

4168:    The columns are logically partitioned with the n0 columns belonging
4169:    to 0th partition, the next n1 columns belonging to the next
4170:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4172:    The DIAGONAL portion of the local submatrix on any given processor
4173:    is the submatrix corresponding to the rows and columns m,n
4174:    corresponding to the given processor. i.e diagonal matrix on
4175:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4176:    etc. The remaining portion of the local submatrix [m x (N-n)]
4177:    constitute the OFF-DIAGONAL portion. The example below better
4178:    illustrates this concept.

4180:    For a square global matrix we define each processor's diagonal portion
4181:    to be its local rows and the corresponding columns (a square submatrix);
4182:    each processor's off-diagonal portion encompasses the remainder of the
4183:    local matrix (a rectangular submatrix).

4185:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4187:    When calling this routine with a single process communicator, a matrix of
4188:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4189:    type of communicator, use the construction mechanism
4190: .vb
4191:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4192: .ve

4194: $     MatCreate(...,&A);
4195: $     MatSetType(A,MATMPIAIJ);
4196: $     MatSetSizes(A, m,n,M,N);
4197: $     MatMPIAIJSetPreallocation(A,...);

4199:    By default, this format uses inodes (identical nodes) when possible.
4200:    We search for consecutive rows with the same nonzero structure, thereby
4201:    reusing matrix information to achieve increased efficiency.

4203:    Options Database Keys:
4204: +  -mat_no_inode  - Do not use inodes
4205: .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
4206: -  -matmult_vecscatter_view <viewer> - View the vecscatter (i.e., communication pattern) used in MatMult() of sparse parallel matrices.
4207:         See viewer types in manual of MatView(). Of them, ascii_matlab, draw or binary cause the vecscatter be viewed as a matrix.
4208:         Entry (i,j) is the size of message (in bytes) rank i sends to rank j in one MatMult() call.

4210:    Example usage:

4212:    Consider the following 8x8 matrix with 34 non-zero values, that is
4213:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4214:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4215:    as follows

4217: .vb
4218:             1  2  0  |  0  3  0  |  0  4
4219:     Proc0   0  5  6  |  7  0  0  |  8  0
4220:             9  0 10  | 11  0  0  | 12  0
4221:     -------------------------------------
4222:            13  0 14  | 15 16 17  |  0  0
4223:     Proc1   0 18  0  | 19 20 21  |  0  0
4224:             0  0  0  | 22 23  0  | 24  0
4225:     -------------------------------------
4226:     Proc2  25 26 27  |  0  0 28  | 29  0
4227:            30  0  0  | 31 32 33  |  0 34
4228: .ve

4230:    This can be represented as a collection of submatrices as

4232: .vb
4233:       A B C
4234:       D E F
4235:       G H I
4236: .ve

4238:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4239:    owned by proc1, G,H,I are owned by proc2.

4241:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4242:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4243:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4245:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4246:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4247:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4248:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4249:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4250:    matrix, ans [DF] as another SeqAIJ matrix.

4252:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4253:    allocated for every row of the local diagonal submatrix, and o_nz
4254:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4255:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4256:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4257:    In this case, the values of d_nz,o_nz are
4258: .vb
4259:      proc0 : dnz = 2, o_nz = 2
4260:      proc1 : dnz = 3, o_nz = 2
4261:      proc2 : dnz = 1, o_nz = 4
4262: .ve
4263:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4264:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4265:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4266:    34 values.

4268:    When d_nnz, o_nnz parameters are specified, the storage is specified
4269:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4270:    In the above case the values for d_nnz,o_nnz are
4271: .vb
4272:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4273:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4274:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4275: .ve
4276:    Here the space allocated is sum of all the above values i.e 34, and
4277:    hence pre-allocation is perfect.

4279:    Level: intermediate

4281: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4282:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4283: @*/
4284: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4285: {
4286:   PetscMPIInt    size;

4288:   MatCreate(comm,A);
4289:   MatSetSizes(*A,m,n,M,N);
4290:   MPI_Comm_size(comm,&size);
4291:   if (size > 1) {
4292:     MatSetType(*A,MATMPIAIJ);
4293:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4294:   } else {
4295:     MatSetType(*A,MATSEQAIJ);
4296:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4297:   }
4298:   return 0;
4299: }

4301: /*@C
4302:   MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix

4304:   Not collective

4306:   Input Parameter:
4307: . A - The MPIAIJ matrix

4309:   Output Parameters:
4310: + Ad - The local diagonal block as a SeqAIJ matrix
4311: . Ao - The local off-diagonal block as a SeqAIJ matrix
4312: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix

4314:   Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4315:   in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4316:   the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4317:   local column numbers to global column numbers in the original matrix.

4319:   Level: intermediate

4321: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAIJ, MATSEQAIJ
4322: @*/
4323: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4324: {
4325:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4326:   PetscBool      flg;

4328:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4330:   if (Ad)     *Ad     = a->A;
4331:   if (Ao)     *Ao     = a->B;
4332:   if (colmap) *colmap = a->garray;
4333:   return 0;
4334: }

4336: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4337: {
4339:   PetscInt       m,N,i,rstart,nnz,Ii;
4340:   PetscInt       *indx;
4341:   PetscScalar    *values;
4342:   MatType        rootType;

4344:   MatGetSize(inmat,&m,&N);
4345:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4346:     PetscInt       *dnz,*onz,sum,bs,cbs;

4348:     if (n == PETSC_DECIDE) {
4349:       PetscSplitOwnership(comm,&n,&N);
4350:     }
4351:     /* Check sum(n) = N */
4352:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);

4355:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4356:     rstart -= m;

4358:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4359:     for (i=0; i<m; i++) {
4360:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4361:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4362:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4363:     }

4365:     MatCreate(comm,outmat);
4366:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4367:     MatGetBlockSizes(inmat,&bs,&cbs);
4368:     MatSetBlockSizes(*outmat,bs,cbs);
4369:     MatGetRootType_Private(inmat,&rootType);
4370:     MatSetType(*outmat,rootType);
4371:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4372:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4373:     MatPreallocateFinalize(dnz,onz);
4374:     MatSetOption(*outmat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
4375:   }

4377:   /* numeric phase */
4378:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4379:   for (i=0; i<m; i++) {
4380:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4381:     Ii   = i + rstart;
4382:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4383:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4384:   }
4385:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4386:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4387:   return 0;
4388: }

4390: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4391: {
4392:   PetscMPIInt       rank;
4393:   PetscInt          m,N,i,rstart,nnz;
4394:   size_t            len;
4395:   const PetscInt    *indx;
4396:   PetscViewer       out;
4397:   char              *name;
4398:   Mat               B;
4399:   const PetscScalar *values;

4401:   MatGetLocalSize(A,&m,NULL);
4402:   MatGetSize(A,NULL,&N);
4403:   /* Should this be the type of the diagonal block of A? */
4404:   MatCreate(PETSC_COMM_SELF,&B);
4405:   MatSetSizes(B,m,N,m,N);
4406:   MatSetBlockSizesFromMats(B,A,A);
4407:   MatSetType(B,MATSEQAIJ);
4408:   MatSeqAIJSetPreallocation(B,0,NULL);
4409:   MatGetOwnershipRange(A,&rstart,NULL);
4410:   for (i=0; i<m; i++) {
4411:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4412:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4413:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4414:   }
4415:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4416:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4418:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4419:   PetscStrlen(outfile,&len);
4420:   PetscMalloc1(len+6,&name);
4421:   PetscSNPrintf(name,len+6,"%s.%d",outfile,rank);
4422:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4423:   PetscFree(name);
4424:   MatView(B,out);
4425:   PetscViewerDestroy(&out);
4426:   MatDestroy(&B);
4427:   return 0;
4428: }

4430: static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data)
4431: {
4432:   Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data;

4434:   if (!merge) return 0;
4435:   PetscFree(merge->id_r);
4436:   PetscFree(merge->len_s);
4437:   PetscFree(merge->len_r);
4438:   PetscFree(merge->bi);
4439:   PetscFree(merge->bj);
4440:   PetscFree(merge->buf_ri[0]);
4441:   PetscFree(merge->buf_ri);
4442:   PetscFree(merge->buf_rj[0]);
4443:   PetscFree(merge->buf_rj);
4444:   PetscFree(merge->coi);
4445:   PetscFree(merge->coj);
4446:   PetscFree(merge->owners_co);
4447:   PetscLayoutDestroy(&merge->rowmap);
4448:   PetscFree(merge);
4449:   return 0;
4450: }

4452: #include <../src/mat/utils/freespace.h>
4453: #include <petscbt.h>

4455: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4456: {
4457:   MPI_Comm            comm;
4458:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4459:   PetscMPIInt         size,rank,taga,*len_s;
4460:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4461:   PetscInt            proc,m;
4462:   PetscInt            **buf_ri,**buf_rj;
4463:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4464:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4465:   MPI_Request         *s_waits,*r_waits;
4466:   MPI_Status          *status;
4467:   const MatScalar     *aa,*a_a;
4468:   MatScalar           **abuf_r,*ba_i;
4469:   Mat_Merge_SeqsToMPI *merge;
4470:   PetscContainer      container;

4472:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4473:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4475:   MPI_Comm_size(comm,&size);
4476:   MPI_Comm_rank(comm,&rank);

4478:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4480:   PetscContainerGetPointer(container,(void**)&merge);
4481:   MatSeqAIJGetArrayRead(seqmat,&a_a);
4482:   aa   = a_a;

4484:   bi     = merge->bi;
4485:   bj     = merge->bj;
4486:   buf_ri = merge->buf_ri;
4487:   buf_rj = merge->buf_rj;

4489:   PetscMalloc1(size,&status);
4490:   owners = merge->rowmap->range;
4491:   len_s  = merge->len_s;

4493:   /* send and recv matrix values */
4494:   /*-----------------------------*/
4495:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4496:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4498:   PetscMalloc1(merge->nsend+1,&s_waits);
4499:   for (proc=0,k=0; proc<size; proc++) {
4500:     if (!len_s[proc]) continue;
4501:     i    = owners[proc];
4502:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4503:     k++;
4504:   }

4506:   if (merge->nrecv) MPI_Waitall(merge->nrecv,r_waits,status);
4507:   if (merge->nsend) MPI_Waitall(merge->nsend,s_waits,status);
4508:   PetscFree(status);

4510:   PetscFree(s_waits);
4511:   PetscFree(r_waits);

4513:   /* insert mat values of mpimat */
4514:   /*----------------------------*/
4515:   PetscMalloc1(N,&ba_i);
4516:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4518:   for (k=0; k<merge->nrecv; k++) {
4519:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4520:     nrows       = *(buf_ri_k[k]);
4521:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4522:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4523:   }

4525:   /* set values of ba */
4526:   m    = merge->rowmap->n;
4527:   for (i=0; i<m; i++) {
4528:     arow = owners[rank] + i;
4529:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4530:     bnzi = bi[i+1] - bi[i];
4531:     PetscArrayzero(ba_i,bnzi);

4533:     /* add local non-zero vals of this proc's seqmat into ba */
4534:     anzi   = ai[arow+1] - ai[arow];
4535:     aj     = a->j + ai[arow];
4536:     aa     = a_a + ai[arow];
4537:     nextaj = 0;
4538:     for (j=0; nextaj<anzi; j++) {
4539:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4540:         ba_i[j] += aa[nextaj++];
4541:       }
4542:     }

4544:     /* add received vals into ba */
4545:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4546:       /* i-th row */
4547:       if (i == *nextrow[k]) {
4548:         anzi   = *(nextai[k]+1) - *nextai[k];
4549:         aj     = buf_rj[k] + *(nextai[k]);
4550:         aa     = abuf_r[k] + *(nextai[k]);
4551:         nextaj = 0;
4552:         for (j=0; nextaj<anzi; j++) {
4553:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4554:             ba_i[j] += aa[nextaj++];
4555:           }
4556:         }
4557:         nextrow[k]++; nextai[k]++;
4558:       }
4559:     }
4560:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4561:   }
4562:   MatSeqAIJRestoreArrayRead(seqmat,&a_a);
4563:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4564:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4566:   PetscFree(abuf_r[0]);
4567:   PetscFree(abuf_r);
4568:   PetscFree(ba_i);
4569:   PetscFree3(buf_ri_k,nextrow,nextai);
4570:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4571:   return 0;
4572: }

4574: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4575: {
4576:   PetscErrorCode      ierr;
4577:   Mat                 B_mpi;
4578:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4579:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4580:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4581:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4582:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4583:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4584:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4585:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4586:   MPI_Status          *status;
4587:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4588:   PetscBT             lnkbt;
4589:   Mat_Merge_SeqsToMPI *merge;
4590:   PetscContainer      container;

4592:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4594:   /* make sure it is a PETSc comm */
4595:   PetscCommDuplicate(comm,&comm,NULL);
4596:   MPI_Comm_size(comm,&size);
4597:   MPI_Comm_rank(comm,&rank);

4599:   PetscNew(&merge);
4600:   PetscMalloc1(size,&status);

4602:   /* determine row ownership */
4603:   /*---------------------------------------------------------*/
4604:   PetscLayoutCreate(comm,&merge->rowmap);
4605:   PetscLayoutSetLocalSize(merge->rowmap,m);
4606:   PetscLayoutSetSize(merge->rowmap,M);
4607:   PetscLayoutSetBlockSize(merge->rowmap,1);
4608:   PetscLayoutSetUp(merge->rowmap);
4609:   PetscMalloc1(size,&len_si);
4610:   PetscMalloc1(size,&merge->len_s);

4612:   m      = merge->rowmap->n;
4613:   owners = merge->rowmap->range;

4615:   /* determine the number of messages to send, their lengths */
4616:   /*---------------------------------------------------------*/
4617:   len_s = merge->len_s;

4619:   len          = 0; /* length of buf_si[] */
4620:   merge->nsend = 0;
4621:   for (proc=0; proc<size; proc++) {
4622:     len_si[proc] = 0;
4623:     if (proc == rank) {
4624:       len_s[proc] = 0;
4625:     } else {
4626:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4627:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4628:     }
4629:     if (len_s[proc]) {
4630:       merge->nsend++;
4631:       nrows = 0;
4632:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4633:         if (ai[i+1] > ai[i]) nrows++;
4634:       }
4635:       len_si[proc] = 2*(nrows+1);
4636:       len         += len_si[proc];
4637:     }
4638:   }

4640:   /* determine the number and length of messages to receive for ij-structure */
4641:   /*-------------------------------------------------------------------------*/
4642:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4643:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4645:   /* post the Irecv of j-structure */
4646:   /*-------------------------------*/
4647:   PetscCommGetNewTag(comm,&tagj);
4648:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4650:   /* post the Isend of j-structure */
4651:   /*--------------------------------*/
4652:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4654:   for (proc=0, k=0; proc<size; proc++) {
4655:     if (!len_s[proc]) continue;
4656:     i    = owners[proc];
4657:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4658:     k++;
4659:   }

4661:   /* receives and sends of j-structure are complete */
4662:   /*------------------------------------------------*/
4663:   if (merge->nrecv) MPI_Waitall(merge->nrecv,rj_waits,status);
4664:   if (merge->nsend) MPI_Waitall(merge->nsend,sj_waits,status);

4666:   /* send and recv i-structure */
4667:   /*---------------------------*/
4668:   PetscCommGetNewTag(comm,&tagi);
4669:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4671:   PetscMalloc1(len+1,&buf_s);
4672:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4673:   for (proc=0,k=0; proc<size; proc++) {
4674:     if (!len_s[proc]) continue;
4675:     /* form outgoing message for i-structure:
4676:          buf_si[0]:                 nrows to be sent
4677:                [1:nrows]:           row index (global)
4678:                [nrows+1:2*nrows+1]: i-structure index
4679:     */
4680:     /*-------------------------------------------*/
4681:     nrows       = len_si[proc]/2 - 1;
4682:     buf_si_i    = buf_si + nrows+1;
4683:     buf_si[0]   = nrows;
4684:     buf_si_i[0] = 0;
4685:     nrows       = 0;
4686:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4687:       anzi = ai[i+1] - ai[i];
4688:       if (anzi) {
4689:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4690:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4691:         nrows++;
4692:       }
4693:     }
4694:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4695:     k++;
4696:     buf_si += len_si[proc];
4697:   }

4699:   if (merge->nrecv) MPI_Waitall(merge->nrecv,ri_waits,status);
4700:   if (merge->nsend) MPI_Waitall(merge->nsend,si_waits,status);

4702:   PetscInfo(seqmat,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);
4703:   for (i=0; i<merge->nrecv; i++) {
4704:     PetscInfo(seqmat,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4705:   }

4707:   PetscFree(len_si);
4708:   PetscFree(len_ri);
4709:   PetscFree(rj_waits);
4710:   PetscFree2(si_waits,sj_waits);
4711:   PetscFree(ri_waits);
4712:   PetscFree(buf_s);
4713:   PetscFree(status);

4715:   /* compute a local seq matrix in each processor */
4716:   /*----------------------------------------------*/
4717:   /* allocate bi array and free space for accumulating nonzero column info */
4718:   PetscMalloc1(m+1,&bi);
4719:   bi[0] = 0;

4721:   /* create and initialize a linked list */
4722:   nlnk = N+1;
4723:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4725:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4726:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4727:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4729:   current_space = free_space;

4731:   /* determine symbolic info for each local row */
4732:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4734:   for (k=0; k<merge->nrecv; k++) {
4735:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4736:     nrows       = *buf_ri_k[k];
4737:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4738:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4739:   }

4741:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4742:   len  = 0;
4743:   for (i=0; i<m; i++) {
4744:     bnzi = 0;
4745:     /* add local non-zero cols of this proc's seqmat into lnk */
4746:     arow  = owners[rank] + i;
4747:     anzi  = ai[arow+1] - ai[arow];
4748:     aj    = a->j + ai[arow];
4749:     PetscLLAddSorted(anzi,aj,N,&nlnk,lnk,lnkbt);
4750:     bnzi += nlnk;
4751:     /* add received col data into lnk */
4752:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4753:       if (i == *nextrow[k]) { /* i-th row */
4754:         anzi  = *(nextai[k]+1) - *nextai[k];
4755:         aj    = buf_rj[k] + *nextai[k];
4756:         PetscLLAddSorted(anzi,aj,N,&nlnk,lnk,lnkbt);
4757:         bnzi += nlnk;
4758:         nextrow[k]++; nextai[k]++;
4759:       }
4760:     }
4761:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4763:     /* if free space is not available, make more free space */
4764:     if (current_space->local_remaining<bnzi) {
4765:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4766:       nspacedouble++;
4767:     }
4768:     /* copy data into free space, then initialize lnk */
4769:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4770:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4772:     current_space->array           += bnzi;
4773:     current_space->local_used      += bnzi;
4774:     current_space->local_remaining -= bnzi;

4776:     bi[i+1] = bi[i] + bnzi;
4777:   }

4779:   PetscFree3(buf_ri_k,nextrow,nextai);

4781:   PetscMalloc1(bi[m]+1,&bj);
4782:   PetscFreeSpaceContiguous(&free_space,bj);
4783:   PetscLLDestroy(lnk,lnkbt);

4785:   /* create symbolic parallel matrix B_mpi */
4786:   /*---------------------------------------*/
4787:   MatGetBlockSizes(seqmat,&bs,&cbs);
4788:   MatCreate(comm,&B_mpi);
4789:   if (n==PETSC_DECIDE) {
4790:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4791:   } else {
4792:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4793:   }
4794:   MatSetBlockSizes(B_mpi,bs,cbs);
4795:   MatSetType(B_mpi,MATMPIAIJ);
4796:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4797:   MatPreallocateFinalize(dnz,onz);
4798:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

4800:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4801:   B_mpi->assembled  = PETSC_FALSE;
4802:   merge->bi         = bi;
4803:   merge->bj         = bj;
4804:   merge->buf_ri     = buf_ri;
4805:   merge->buf_rj     = buf_rj;
4806:   merge->coi        = NULL;
4807:   merge->coj        = NULL;
4808:   merge->owners_co  = NULL;

4810:   PetscCommDestroy(&comm);

4812:   /* attach the supporting struct to B_mpi for reuse */
4813:   PetscContainerCreate(PETSC_COMM_SELF,&container);
4814:   PetscContainerSetPointer(container,merge);
4815:   PetscContainerSetUserDestroy(container,MatDestroy_MPIAIJ_SeqsToMPI);
4816:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4817:   PetscContainerDestroy(&container);
4818:   *mpimat = B_mpi;

4820:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4821:   return 0;
4822: }

4824: /*@C
4825:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4826:                  matrices from each processor

4828:     Collective

4830:    Input Parameters:
4831: +    comm - the communicators the parallel matrix will live on
4832: .    seqmat - the input sequential matrices
4833: .    m - number of local rows (or PETSC_DECIDE)
4834: .    n - number of local columns (or PETSC_DECIDE)
4835: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4837:    Output Parameter:
4838: .    mpimat - the parallel matrix generated

4840:     Level: advanced

4842:    Notes:
4843:      The dimensions of the sequential matrix in each processor MUST be the same.
4844:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4845:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4846: @*/
4847: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4848: {
4849:   PetscMPIInt    size;

4851:   MPI_Comm_size(comm,&size);
4852:   if (size == 1) {
4853:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4854:     if (scall == MAT_INITIAL_MATRIX) {
4855:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4856:     } else {
4857:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4858:     }
4859:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4860:     return 0;
4861:   }
4862:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4863:   if (scall == MAT_INITIAL_MATRIX) {
4864:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4865:   }
4866:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4867:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4868:   return 0;
4869: }

4871: /*@
4872:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4873:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4874:           with MatGetSize()

4876:     Not Collective

4878:    Input Parameters:
4879: +    A - the matrix
4880: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4882:    Output Parameter:
4883: .    A_loc - the local sequential matrix generated

4885:     Level: developer

4887:    Notes:
4888:      When the communicator associated with A has size 1 and MAT_INITIAL_MATRIX is requested, the matrix returned is the diagonal part of A.
4889:      If MAT_REUSE_MATRIX is requested with comm size 1, MatCopy(Adiag,*A_loc,SAME_NONZERO_PATTERN) is called.
4890:      This means that one can preallocate the proper sequential matrix first and then call this routine with MAT_REUSE_MATRIX to safely
4891:      modify the values of the returned A_loc.

4893: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed(), MatMPIAIJGetLocalMatMerge()
4894: @*/
4895: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4896: {
4897:   Mat_MPIAIJ        *mpimat=(Mat_MPIAIJ*)A->data;
4898:   Mat_SeqAIJ        *mat,*a,*b;
4899:   PetscInt          *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4900:   const PetscScalar *aa,*ba,*aav,*bav;
4901:   PetscScalar       *ca,*cam;
4902:   PetscMPIInt       size;
4903:   PetscInt          am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4904:   PetscInt          *ci,*cj,col,ncols_d,ncols_o,jo;
4905:   PetscBool         match;

4907:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
4909:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
4910:   if (size == 1) {
4911:     if (scall == MAT_INITIAL_MATRIX) {
4912:       PetscObjectReference((PetscObject)mpimat->A);
4913:       *A_loc = mpimat->A;
4914:     } else if (scall == MAT_REUSE_MATRIX) {
4915:       MatCopy(mpimat->A,*A_loc,SAME_NONZERO_PATTERN);
4916:     }
4917:     return 0;
4918:   }

4920:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
4921:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
4922:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
4923:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4924:   MatSeqAIJGetArrayRead(mpimat->A,&aav);
4925:   MatSeqAIJGetArrayRead(mpimat->B,&bav);
4926:   aa   = aav;
4927:   ba   = bav;
4928:   if (scall == MAT_INITIAL_MATRIX) {
4929:     PetscMalloc1(1+am,&ci);
4930:     ci[0] = 0;
4931:     for (i=0; i<am; i++) {
4932:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4933:     }
4934:     PetscMalloc1(1+ci[am],&cj);
4935:     PetscMalloc1(1+ci[am],&ca);
4936:     k    = 0;
4937:     for (i=0; i<am; i++) {
4938:       ncols_o = bi[i+1] - bi[i];
4939:       ncols_d = ai[i+1] - ai[i];
4940:       /* off-diagonal portion of A */
4941:       for (jo=0; jo<ncols_o; jo++) {
4942:         col = cmap[*bj];
4943:         if (col >= cstart) break;
4944:         cj[k]   = col; bj++;
4945:         ca[k++] = *ba++;
4946:       }
4947:       /* diagonal portion of A */
4948:       for (j=0; j<ncols_d; j++) {
4949:         cj[k]   = cstart + *aj++;
4950:         ca[k++] = *aa++;
4951:       }
4952:       /* off-diagonal portion of A */
4953:       for (j=jo; j<ncols_o; j++) {
4954:         cj[k]   = cmap[*bj++];
4955:         ca[k++] = *ba++;
4956:       }
4957:     }
4958:     /* put together the new matrix */
4959:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
4960:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4961:     /* Since these are PETSc arrays, change flags to free them as necessary. */
4962:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
4963:     mat->free_a  = PETSC_TRUE;
4964:     mat->free_ij = PETSC_TRUE;
4965:     mat->nonew   = 0;
4966:   } else if (scall == MAT_REUSE_MATRIX) {
4967:     mat  =(Mat_SeqAIJ*)(*A_loc)->data;
4968:     ci   = mat->i;
4969:     cj   = mat->j;
4970:     MatSeqAIJGetArrayWrite(*A_loc,&cam);
4971:     for (i=0; i<am; i++) {
4972:       /* off-diagonal portion of A */
4973:       ncols_o = bi[i+1] - bi[i];
4974:       for (jo=0; jo<ncols_o; jo++) {
4975:         col = cmap[*bj];
4976:         if (col >= cstart) break;
4977:         *cam++ = *ba++; bj++;
4978:       }
4979:       /* diagonal portion of A */
4980:       ncols_d = ai[i+1] - ai[i];
4981:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4982:       /* off-diagonal portion of A */
4983:       for (j=jo; j<ncols_o; j++) {
4984:         *cam++ = *ba++; bj++;
4985:       }
4986:     }
4987:     MatSeqAIJRestoreArrayWrite(*A_loc,&cam);
4988:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4989:   MatSeqAIJRestoreArrayRead(mpimat->A,&aav);
4990:   MatSeqAIJRestoreArrayRead(mpimat->B,&bav);
4991:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
4992:   return 0;
4993: }

4995: /*@
4996:      MatMPIAIJGetLocalMatMerge - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4997:           mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and offdiagonal part

4999:     Not Collective

5001:    Input Parameters:
5002: +    A - the matrix
5003: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5005:    Output Parameters:
5006: +    glob - sequential IS with global indices associated with the columns of the local sequential matrix generated (can be NULL)
5007: -    A_loc - the local sequential matrix generated

5009:     Level: developer

5011:    Notes:
5012:      This is different from MatMPIAIJGetLocalMat() since the first columns in the returning matrix are those associated with the diagonal part, then those associated with the offdiagonal part (in its local ordering)

5014: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed()

5016: @*/
5017: PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A,MatReuse scall,IS *glob,Mat *A_loc)
5018: {
5019:   Mat            Ao,Ad;
5020:   const PetscInt *cmap;
5021:   PetscMPIInt    size;
5022:   PetscErrorCode (*f)(Mat,MatReuse,IS*,Mat*);

5024:   MatMPIAIJGetSeqAIJ(A,&Ad,&Ao,&cmap);
5025:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
5026:   if (size == 1) {
5027:     if (scall == MAT_INITIAL_MATRIX) {
5028:       PetscObjectReference((PetscObject)Ad);
5029:       *A_loc = Ad;
5030:     } else if (scall == MAT_REUSE_MATRIX) {
5031:       MatCopy(Ad,*A_loc,SAME_NONZERO_PATTERN);
5032:     }
5033:     if (glob) ISCreateStride(PetscObjectComm((PetscObject)Ad),Ad->cmap->n,Ad->cmap->rstart,1,glob);
5034:     return 0;
5035:   }
5036:   PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",&f);
5037:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5038:   if (f) {
5039:     (*f)(A,scall,glob,A_loc);
5040:   } else {
5041:     Mat_SeqAIJ        *a = (Mat_SeqAIJ*)Ad->data;
5042:     Mat_SeqAIJ        *b = (Mat_SeqAIJ*)Ao->data;
5043:     Mat_SeqAIJ        *c;
5044:     PetscInt          *ai = a->i, *aj = a->j;
5045:     PetscInt          *bi = b->i, *bj = b->j;
5046:     PetscInt          *ci,*cj;
5047:     const PetscScalar *aa,*ba;
5048:     PetscScalar       *ca;
5049:     PetscInt          i,j,am,dn,on;

5051:     MatGetLocalSize(Ad,&am,&dn);
5052:     MatGetLocalSize(Ao,NULL,&on);
5053:     MatSeqAIJGetArrayRead(Ad,&aa);
5054:     MatSeqAIJGetArrayRead(Ao,&ba);
5055:     if (scall == MAT_INITIAL_MATRIX) {
5056:       PetscInt k;
5057:       PetscMalloc1(1+am,&ci);
5058:       PetscMalloc1(ai[am]+bi[am],&cj);
5059:       PetscMalloc1(ai[am]+bi[am],&ca);
5060:       ci[0] = 0;
5061:       for (i=0,k=0; i<am; i++) {
5062:         const PetscInt ncols_o = bi[i+1] - bi[i];
5063:         const PetscInt ncols_d = ai[i+1] - ai[i];
5064:         ci[i+1] = ci[i] + ncols_o + ncols_d;
5065:         /* diagonal portion of A */
5066:         for (j=0; j<ncols_d; j++,k++) {
5067:           cj[k] = *aj++;
5068:           ca[k] = *aa++;
5069:         }
5070:         /* off-diagonal portion of A */
5071:         for (j=0; j<ncols_o; j++,k++) {
5072:           cj[k] = dn + *bj++;
5073:           ca[k] = *ba++;
5074:         }
5075:       }
5076:       /* put together the new matrix */
5077:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,dn+on,ci,cj,ca,A_loc);
5078:       /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5079:       /* Since these are PETSc arrays, change flags to free them as necessary. */
5080:       c          = (Mat_SeqAIJ*)(*A_loc)->data;
5081:       c->free_a  = PETSC_TRUE;
5082:       c->free_ij = PETSC_TRUE;
5083:       c->nonew   = 0;
5084:       MatSetType(*A_loc,((PetscObject)Ad)->type_name);
5085:     } else if (scall == MAT_REUSE_MATRIX) {
5086:       MatSeqAIJGetArrayWrite(*A_loc,&ca);
5087:       for (i=0; i<am; i++) {
5088:         const PetscInt ncols_d = ai[i+1] - ai[i];
5089:         const PetscInt ncols_o = bi[i+1] - bi[i];
5090:         /* diagonal portion of A */
5091:         for (j=0; j<ncols_d; j++) *ca++ = *aa++;
5092:         /* off-diagonal portion of A */
5093:         for (j=0; j<ncols_o; j++) *ca++ = *ba++;
5094:       }
5095:       MatSeqAIJRestoreArrayWrite(*A_loc,&ca);
5096:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5097:     MatSeqAIJRestoreArrayRead(Ad,&aa);
5098:     MatSeqAIJRestoreArrayRead(Ao,&aa);
5099:     if (glob) {
5100:       PetscInt cst, *gidx;

5102:       MatGetOwnershipRangeColumn(A,&cst,NULL);
5103:       PetscMalloc1(dn+on,&gidx);
5104:       for (i=0; i<dn; i++) gidx[i]    = cst + i;
5105:       for (i=0; i<on; i++) gidx[i+dn] = cmap[i];
5106:       ISCreateGeneral(PetscObjectComm((PetscObject)Ad),dn+on,gidx,PETSC_OWN_POINTER,glob);
5107:     }
5108:   }
5109:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5110:   return 0;
5111: }

5113: /*@C
5114:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5116:     Not Collective

5118:    Input Parameters:
5119: +    A - the matrix
5120: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5121: -    row, col - index sets of rows and columns to extract (or NULL)

5123:    Output Parameter:
5124: .    A_loc - the local sequential matrix generated

5126:     Level: developer

5128: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5130: @*/
5131: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5132: {
5133:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5134:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5135:   IS             isrowa,iscola;
5136:   Mat            *aloc;
5137:   PetscBool      match;

5139:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5141:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5142:   if (!row) {
5143:     start = A->rmap->rstart; end = A->rmap->rend;
5144:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5145:   } else {
5146:     isrowa = *row;
5147:   }
5148:   if (!col) {
5149:     start = A->cmap->rstart;
5150:     cmap  = a->garray;
5151:     nzA   = a->A->cmap->n;
5152:     nzB   = a->B->cmap->n;
5153:     PetscMalloc1(nzA+nzB, &idx);
5154:     ncols = 0;
5155:     for (i=0; i<nzB; i++) {
5156:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5157:       else break;
5158:     }
5159:     imark = i;
5160:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5161:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5162:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5163:   } else {
5164:     iscola = *col;
5165:   }
5166:   if (scall != MAT_INITIAL_MATRIX) {
5167:     PetscMalloc1(1,&aloc);
5168:     aloc[0] = *A_loc;
5169:   }
5170:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5171:   if (!col) { /* attach global id of condensed columns */
5172:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5173:   }
5174:   *A_loc = aloc[0];
5175:   PetscFree(aloc);
5176:   if (!row) {
5177:     ISDestroy(&isrowa);
5178:   }
5179:   if (!col) {
5180:     ISDestroy(&iscola);
5181:   }
5182:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5183:   return 0;
5184: }

5186: /*
5187:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5188:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5189:  * on a global size.
5190:  * */
5191: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5192: {
5193:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5194:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5195:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,lidx,*nrcols,*nlcols,ncol;
5196:   PetscMPIInt              owner;
5197:   PetscSFNode              *iremote,*oiremote;
5198:   const PetscInt           *lrowindices;
5199:   PetscSF                  sf,osf;
5200:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5201:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5202:   MPI_Comm                 comm;
5203:   ISLocalToGlobalMapping   mapping;
5204:   const PetscScalar        *pd_a,*po_a;

5206:   PetscObjectGetComm((PetscObject)P,&comm);
5207:   /* plocalsize is the number of roots
5208:    * nrows is the number of leaves
5209:    * */
5210:   MatGetLocalSize(P,&plocalsize,NULL);
5211:   ISGetLocalSize(rows,&nrows);
5212:   PetscCalloc1(nrows,&iremote);
5213:   ISGetIndices(rows,&lrowindices);
5214:   for (i=0;i<nrows;i++) {
5215:     /* Find a remote index and an owner for a row
5216:      * The row could be local or remote
5217:      * */
5218:     owner = 0;
5219:     lidx  = 0;
5220:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5221:     iremote[i].index = lidx;
5222:     iremote[i].rank  = owner;
5223:   }
5224:   /* Create SF to communicate how many nonzero columns for each row */
5225:   PetscSFCreate(comm,&sf);
5226:   /* SF will figure out the number of nonzero colunms for each row, and their
5227:    * offsets
5228:    * */
5229:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5230:   PetscSFSetFromOptions(sf);
5231:   PetscSFSetUp(sf);

5233:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5234:   PetscCalloc1(2*plocalsize,&nrcols);
5235:   PetscCalloc1(nrows,&pnnz);
5236:   roffsets[0] = 0;
5237:   roffsets[1] = 0;
5238:   for (i=0;i<plocalsize;i++) {
5239:     /* diag */
5240:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5241:     /* off diag */
5242:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5243:     /* compute offsets so that we relative location for each row */
5244:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5245:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5246:   }
5247:   PetscCalloc1(2*nrows,&nlcols);
5248:   PetscCalloc1(2*nrows,&loffsets);
5249:   /* 'r' means root, and 'l' means leaf */
5250:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5251:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5252:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5253:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5254:   PetscSFDestroy(&sf);
5255:   PetscFree(roffsets);
5256:   PetscFree(nrcols);
5257:   dntotalcols = 0;
5258:   ontotalcols = 0;
5259:   ncol = 0;
5260:   for (i=0;i<nrows;i++) {
5261:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5262:     ncol = PetscMax(pnnz[i],ncol);
5263:     /* diag */
5264:     dntotalcols += nlcols[i*2+0];
5265:     /* off diag */
5266:     ontotalcols += nlcols[i*2+1];
5267:   }
5268:   /* We do not need to figure the right number of columns
5269:    * since all the calculations will be done by going through the raw data
5270:    * */
5271:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5272:   MatSetUp(*P_oth);
5273:   PetscFree(pnnz);
5274:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5275:   /* diag */
5276:   PetscCalloc1(dntotalcols,&iremote);
5277:   /* off diag */
5278:   PetscCalloc1(ontotalcols,&oiremote);
5279:   /* diag */
5280:   PetscCalloc1(dntotalcols,&ilocal);
5281:   /* off diag */
5282:   PetscCalloc1(ontotalcols,&oilocal);
5283:   dntotalcols = 0;
5284:   ontotalcols = 0;
5285:   ntotalcols  = 0;
5286:   for (i=0;i<nrows;i++) {
5287:     owner = 0;
5288:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5289:     /* Set iremote for diag matrix */
5290:     for (j=0;j<nlcols[i*2+0];j++) {
5291:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5292:       iremote[dntotalcols].rank    = owner;
5293:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5294:       ilocal[dntotalcols++]        = ntotalcols++;
5295:     }
5296:     /* off diag */
5297:     for (j=0;j<nlcols[i*2+1];j++) {
5298:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5299:       oiremote[ontotalcols].rank    = owner;
5300:       oilocal[ontotalcols++]        = ntotalcols++;
5301:     }
5302:   }
5303:   ISRestoreIndices(rows,&lrowindices);
5304:   PetscFree(loffsets);
5305:   PetscFree(nlcols);
5306:   PetscSFCreate(comm,&sf);
5307:   /* P serves as roots and P_oth is leaves
5308:    * Diag matrix
5309:    * */
5310:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5311:   PetscSFSetFromOptions(sf);
5312:   PetscSFSetUp(sf);

5314:   PetscSFCreate(comm,&osf);
5315:   /* Off diag */
5316:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5317:   PetscSFSetFromOptions(osf);
5318:   PetscSFSetUp(osf);
5319:   MatSeqAIJGetArrayRead(p->A,&pd_a);
5320:   MatSeqAIJGetArrayRead(p->B,&po_a);
5321:   /* We operate on the matrix internal data for saving memory */
5322:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd_a,p_oth->a,MPI_REPLACE);
5323:   PetscSFBcastBegin(osf,MPIU_SCALAR,po_a,p_oth->a,MPI_REPLACE);
5324:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5325:   /* Convert to global indices for diag matrix */
5326:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5327:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5328:   /* We want P_oth store global indices */
5329:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5330:   /* Use memory scalable approach */
5331:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5332:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5333:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5334:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5335:   /* Convert back to local indices */
5336:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5337:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5338:   nout = 0;
5339:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5341:   ISLocalToGlobalMappingDestroy(&mapping);
5342:   /* Exchange values */
5343:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd_a,p_oth->a,MPI_REPLACE);
5344:   PetscSFBcastEnd(osf,MPIU_SCALAR,po_a,p_oth->a,MPI_REPLACE);
5345:   MatSeqAIJRestoreArrayRead(p->A,&pd_a);
5346:   MatSeqAIJRestoreArrayRead(p->B,&po_a);
5347:   /* Stop PETSc from shrinking memory */
5348:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5349:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5350:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5351:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5352:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5353:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5354:   PetscSFDestroy(&sf);
5355:   PetscSFDestroy(&osf);
5356:   return 0;
5357: }

5359: /*
5360:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5361:  * This supports MPIAIJ and MAIJ
5362:  * */
5363: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5364: {
5365:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5366:   Mat_SeqAIJ            *p_oth;
5367:   IS                    rows,map;
5368:   PetscHMapI            hamp;
5369:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5370:   MPI_Comm              comm;
5371:   PetscSF               sf,osf;
5372:   PetscBool             has;

5374:   PetscObjectGetComm((PetscObject)A,&comm);
5375:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5376:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5377:    *  and then create a submatrix (that often is an overlapping matrix)
5378:    * */
5379:   if (reuse == MAT_INITIAL_MATRIX) {
5380:     /* Use a hash table to figure out unique keys */
5381:     PetscHMapICreate(&hamp);
5382:     PetscHMapIResize(hamp,a->B->cmap->n);
5383:     PetscCalloc1(a->B->cmap->n,&mapping);
5384:     count = 0;
5385:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5386:     for (i=0;i<a->B->cmap->n;i++) {
5387:       key  = a->garray[i]/dof;
5388:       PetscHMapIHas(hamp,key,&has);
5389:       if (!has) {
5390:         mapping[i] = count;
5391:         PetscHMapISet(hamp,key,count++);
5392:       } else {
5393:         /* Current 'i' has the same value the previous step */
5394:         mapping[i] = count-1;
5395:       }
5396:     }
5397:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5398:     PetscHMapIGetSize(hamp,&htsize);
5400:     PetscCalloc1(htsize,&rowindices);
5401:     off = 0;
5402:     PetscHMapIGetKeys(hamp,&off,rowindices);
5403:     PetscHMapIDestroy(&hamp);
5404:     PetscSortInt(htsize,rowindices);
5405:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5406:     /* In case, the matrix was already created but users want to recreate the matrix */
5407:     MatDestroy(P_oth);
5408:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5409:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5410:     ISDestroy(&map);
5411:     ISDestroy(&rows);
5412:   } else if (reuse == MAT_REUSE_MATRIX) {
5413:     /* If matrix was already created, we simply update values using SF objects
5414:      * that as attached to the matrix ealier.
5415:      */
5416:     const PetscScalar *pd_a,*po_a;

5418:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5419:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5421:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5422:     /* Update values in place */
5423:     MatSeqAIJGetArrayRead(p->A,&pd_a);
5424:     MatSeqAIJGetArrayRead(p->B,&po_a);
5425:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd_a,p_oth->a,MPI_REPLACE);
5426:     PetscSFBcastBegin(osf,MPIU_SCALAR,po_a,p_oth->a,MPI_REPLACE);
5427:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd_a,p_oth->a,MPI_REPLACE);
5428:     PetscSFBcastEnd(osf,MPIU_SCALAR,po_a,p_oth->a,MPI_REPLACE);
5429:     MatSeqAIJRestoreArrayRead(p->A,&pd_a);
5430:     MatSeqAIJRestoreArrayRead(p->B,&po_a);
5431:   } else SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type");
5432:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5433:   return 0;
5434: }

5436: /*@C
5437:   MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5439:   Collective on Mat

5441:   Input Parameters:
5442: + A - the first matrix in mpiaij format
5443: . B - the second matrix in mpiaij format
5444: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5446:   Output Parameters:
5447: + rowb - On input index sets of rows of B to extract (or NULL), modified on output
5448: . colb - On input index sets of columns of B to extract (or NULL), modified on output
5449: - B_seq - the sequential matrix generated

5451:   Level: developer

5453: @*/
5454: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5455: {
5456:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5457:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5458:   IS             isrowb,iscolb;
5459:   Mat            *bseq=NULL;

5461:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5462:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5463:   }
5464:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5466:   if (scall == MAT_INITIAL_MATRIX) {
5467:     start = A->cmap->rstart;
5468:     cmap  = a->garray;
5469:     nzA   = a->A->cmap->n;
5470:     nzB   = a->B->cmap->n;
5471:     PetscMalloc1(nzA+nzB, &idx);
5472:     ncols = 0;
5473:     for (i=0; i<nzB; i++) {  /* row < local row index */
5474:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5475:       else break;
5476:     }
5477:     imark = i;
5478:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5479:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5480:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5481:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5482:   } else {
5484:     isrowb  = *rowb; iscolb = *colb;
5485:     PetscMalloc1(1,&bseq);
5486:     bseq[0] = *B_seq;
5487:   }
5488:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5489:   *B_seq = bseq[0];
5490:   PetscFree(bseq);
5491:   if (!rowb) {
5492:     ISDestroy(&isrowb);
5493:   } else {
5494:     *rowb = isrowb;
5495:   }
5496:   if (!colb) {
5497:     ISDestroy(&iscolb);
5498:   } else {
5499:     *colb = iscolb;
5500:   }
5501:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5502:   return 0;
5503: }

5505: /*
5506:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5507:     of the OFF-DIAGONAL portion of local A

5509:     Collective on Mat

5511:    Input Parameters:
5512: +    A,B - the matrices in mpiaij format
5513: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5515:    Output Parameter:
5516: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5517: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5518: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5519: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5521:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5522:      for this matrix. This is not desirable..

5524:     Level: developer

5526: */
5527: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5528: {
5529:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5530:   Mat_SeqAIJ             *b_oth;
5531:   VecScatter             ctx;
5532:   MPI_Comm               comm;
5533:   const PetscMPIInt      *rprocs,*sprocs;
5534:   const PetscInt         *srow,*rstarts,*sstarts;
5535:   PetscInt               *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5536:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = NULL,*sstartsj,len;
5537:   PetscScalar            *b_otha,*bufa,*bufA,*vals = NULL;
5538:   MPI_Request            *reqs = NULL,*rwaits = NULL,*swaits = NULL;
5539:   PetscMPIInt            size,tag,rank,nreqs;

5541:   PetscObjectGetComm((PetscObject)A,&comm);
5542:   MPI_Comm_size(comm,&size);

5544:   if (PetscUnlikely(A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)) {
5545:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5546:   }
5547:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5548:   MPI_Comm_rank(comm,&rank);

5550:   if (size == 1) {
5551:     startsj_s = NULL;
5552:     bufa_ptr  = NULL;
5553:     *B_oth    = NULL;
5554:     return 0;
5555:   }

5557:   ctx = a->Mvctx;
5558:   tag = ((PetscObject)ctx)->tag;

5560:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5561:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5562:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5563:   PetscMPIIntCast(nsends+nrecvs,&nreqs);
5564:   PetscMalloc1(nreqs,&reqs);
5565:   rwaits = reqs;
5566:   swaits = reqs + nrecvs;

5568:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5569:   if (scall == MAT_INITIAL_MATRIX) {
5570:     /* i-array */
5571:     /*---------*/
5572:     /*  post receives */
5573:     if (nrecvs) PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues); /* rstarts can be NULL when nrecvs=0 */
5574:     for (i=0; i<nrecvs; i++) {
5575:       rowlen = rvalues + rstarts[i]*rbs;
5576:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5577:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5578:     }

5580:     /* pack the outgoing message */
5581:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5583:     sstartsj[0] = 0;
5584:     rstartsj[0] = 0;
5585:     len         = 0; /* total length of j or a array to be sent */
5586:     if (nsends) {
5587:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5588:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5589:     }
5590:     for (i=0; i<nsends; i++) {
5591:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5592:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5593:       for (j=0; j<nrows; j++) {
5594:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5595:         for (l=0; l<sbs; l++) {
5596:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5598:           rowlen[j*sbs+l] = ncols;

5600:           len += ncols;
5601:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5602:         }
5603:         k++;
5604:       }
5605:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5607:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5608:     }
5609:     /* recvs and sends of i-array are completed */
5610:     if (nreqs) MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);
5611:     PetscFree(svalues);

5613:     /* allocate buffers for sending j and a arrays */
5614:     PetscMalloc1(len+1,&bufj);
5615:     PetscMalloc1(len+1,&bufa);

5617:     /* create i-array of B_oth */
5618:     PetscMalloc1(aBn+2,&b_othi);

5620:     b_othi[0] = 0;
5621:     len       = 0; /* total length of j or a array to be received */
5622:     k         = 0;
5623:     for (i=0; i<nrecvs; i++) {
5624:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5625:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5626:       for (j=0; j<nrows; j++) {
5627:         b_othi[k+1] = b_othi[k] + rowlen[j];
5628:         PetscIntSumError(rowlen[j],len,&len);
5629:         k++;
5630:       }
5631:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5632:     }
5633:     PetscFree(rvalues);

5635:     /* allocate space for j and a arrays of B_oth */
5636:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5637:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5639:     /* j-array */
5640:     /*---------*/
5641:     /*  post receives of j-array */
5642:     for (i=0; i<nrecvs; i++) {
5643:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5644:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5645:     }

5647:     /* pack the outgoing message j-array */
5648:     if (nsends) k = sstarts[0];
5649:     for (i=0; i<nsends; i++) {
5650:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5651:       bufJ  = bufj+sstartsj[i];
5652:       for (j=0; j<nrows; j++) {
5653:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5654:         for (ll=0; ll<sbs; ll++) {
5655:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5656:           for (l=0; l<ncols; l++) {
5657:             *bufJ++ = cols[l];
5658:           }
5659:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5660:         }
5661:       }
5662:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5663:     }

5665:     /* recvs and sends of j-array are completed */
5666:     if (nreqs) MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);
5667:   } else if (scall == MAT_REUSE_MATRIX) {
5668:     sstartsj = *startsj_s;
5669:     rstartsj = *startsj_r;
5670:     bufa     = *bufa_ptr;
5671:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5672:     MatSeqAIJGetArrayWrite(*B_oth,&b_otha);
5673:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not possess an object container");

5675:   /* a-array */
5676:   /*---------*/
5677:   /*  post receives of a-array */
5678:   for (i=0; i<nrecvs; i++) {
5679:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5680:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5681:   }

5683:   /* pack the outgoing message a-array */
5684:   if (nsends) k = sstarts[0];
5685:   for (i=0; i<nsends; i++) {
5686:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5687:     bufA  = bufa+sstartsj[i];
5688:     for (j=0; j<nrows; j++) {
5689:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5690:       for (ll=0; ll<sbs; ll++) {
5691:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5692:         for (l=0; l<ncols; l++) {
5693:           *bufA++ = vals[l];
5694:         }
5695:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5696:       }
5697:     }
5698:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5699:   }
5700:   /* recvs and sends of a-array are completed */
5701:   if (nreqs) MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);
5702:   PetscFree(reqs);

5704:   if (scall == MAT_INITIAL_MATRIX) {
5705:     /* put together the new matrix */
5706:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5708:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5709:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5710:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5711:     b_oth->free_a  = PETSC_TRUE;
5712:     b_oth->free_ij = PETSC_TRUE;
5713:     b_oth->nonew   = 0;

5715:     PetscFree(bufj);
5716:     if (!startsj_s || !bufa_ptr) {
5717:       PetscFree2(sstartsj,rstartsj);
5718:       PetscFree(bufa_ptr);
5719:     } else {
5720:       *startsj_s = sstartsj;
5721:       *startsj_r = rstartsj;
5722:       *bufa_ptr  = bufa;
5723:     }
5724:   } else if (scall == MAT_REUSE_MATRIX) {
5725:     MatSeqAIJRestoreArrayWrite(*B_oth,&b_otha);
5726:   }

5728:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5729:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5730:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5731:   return 0;
5732: }

5734: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5735: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5736: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5737: #if defined(PETSC_HAVE_MKL_SPARSE)
5738: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5739: #endif
5740: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat,MatType,MatReuse,Mat*);
5741: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5742: #if defined(PETSC_HAVE_ELEMENTAL)
5743: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5744: #endif
5745: #if defined(PETSC_HAVE_SCALAPACK)
5746: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat,MatType,MatReuse,Mat*);
5747: #endif
5748: #if defined(PETSC_HAVE_HYPRE)
5749: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5750: #endif
5751: #if defined(PETSC_HAVE_CUDA)
5752: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
5753: #endif
5754: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
5755: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat,MatType,MatReuse,Mat*);
5756: #endif
5757: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5758: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5759: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);

5761: /*
5762:     Computes (B'*A')' since computing B*A directly is untenable

5764:                n                       p                          p
5765:         [             ]       [             ]         [                 ]
5766:       m [      A      ]  *  n [       B     ]   =   m [         C       ]
5767:         [             ]       [             ]         [                 ]

5769: */
5770: static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5771: {
5772:   Mat            At,Bt,Ct;

5774:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5775:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5776:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Ct);
5777:   MatDestroy(&At);
5778:   MatDestroy(&Bt);
5779:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5780:   MatDestroy(&Ct);
5781:   return 0;
5782: }

5784: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat C)
5785: {
5786:   PetscBool      cisdense;

5789:   MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);
5790:   MatSetBlockSizesFromMats(C,A,B);
5791:   PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATMPIDENSE,MATMPIDENSECUDA,"");
5792:   if (!cisdense) {
5793:     MatSetType(C,((PetscObject)A)->type_name);
5794:   }
5795:   MatSetUp(C);

5797:   C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5798:   return 0;
5799: }

5801: /* ----------------------------------------------------------------*/
5802: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
5803: {
5804:   Mat_Product *product = C->product;
5805:   Mat         A = product->A,B=product->B;

5807:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
5808:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);

5810:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
5811:   C->ops->productsymbolic = MatProductSymbolic_AB;
5812:   return 0;
5813: }

5815: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
5816: {
5817:   Mat_Product    *product = C->product;

5819:   if (product->type == MATPRODUCT_AB) {
5820:     MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C);
5821:   }
5822:   return 0;
5823: }

5825: /* std::upper_bound(): Given a sorted array, return index of the first element in range [first,last) whose value
5826:    is greater than value, or last if there is no such element.
5827: */
5828: static inline PetscErrorCode PetscSortedIntUpperBound(PetscInt *array,PetscCount first,PetscCount last,PetscInt value,PetscCount *upper)
5829: {
5830:   PetscCount  it,step,count = last - first;

5832:   while (count > 0) {
5833:     it   = first;
5834:     step = count / 2;
5835:     it  += step;
5836:     if (!(value < array[it])) {
5837:       first  = ++it;
5838:       count -= step + 1;
5839:     } else count = step;
5840:   }
5841:   *upper = first;
5842:   return 0;
5843: }

5845: /* Merge two sets of sorted nonzero entries and return a CSR for the merged (sequential) matrix

5847:   Input Parameters:

5849:     j1,rowBegin1,rowEnd1,perm1,jmap1: describe the first set of nonzeros (Set1)
5850:     j2,rowBegin2,rowEnd2,perm2,jmap2: describe the second set of nonzeros (Set2)

5852:     mat: both sets' entries are on m rows, where m is the number of local rows of the matrix mat

5854:     For Set1, j1[] contains column indices of the nonzeros.
5855:     For the k-th row (0<=k<m), [rowBegin1[k],rowEnd1[k]) index into j1[] and point to the begin/end nonzero in row k
5856:     respectively (note rowEnd1[k] is not necessarily equal to rwoBegin1[k+1]). Indices in this range of j1[] are sorted,
5857:     but might have repeats. jmap1[t+1] - jmap1[t] is the number of repeats for the t-th unique nonzero in Set1.

5859:     Similar for Set2.

5861:     This routine merges the two sets of nonzeros row by row and removes repeats.

5863:   Output Parameters: (memories are allocated by the caller)

5865:     i[],j[]: the CSR of the merged matrix, which has m rows.
5866:     imap1[]: the k-th unique nonzero in Set1 (k=0,1,...) corresponds to imap1[k]-th unique nonzero in the merged matrix.
5867:     imap2[]: similar to imap1[], but for Set2.
5868:     Note we order nonzeros row-by-row and from left to right.
5869: */
5870: static PetscErrorCode MatMergeEntries_Internal(Mat mat,const PetscInt j1[],const PetscInt j2[],const PetscCount rowBegin1[],const PetscCount rowEnd1[],
5871:   const PetscCount rowBegin2[],const PetscCount rowEnd2[],const PetscCount jmap1[],const PetscCount jmap2[],
5872:   PetscCount imap1[],PetscCount imap2[],PetscInt i[],PetscInt j[])
5873: {
5874:   PetscInt       r,m; /* Row index of mat */
5875:   PetscCount     t,t1,t2,b1,e1,b2,e2;

5877:   MatGetLocalSize(mat,&m,NULL);
5878:   t1   = t2 = t = 0; /* Count unique nonzeros of in Set1, Set1 and the merged respectively */
5879:   i[0] = 0;
5880:   for (r=0; r<m; r++) { /* Do row by row merging */
5881:     b1   = rowBegin1[r];
5882:     e1   = rowEnd1[r];
5883:     b2   = rowBegin2[r];
5884:     e2   = rowEnd2[r];
5885:     while (b1 < e1 && b2 < e2) {
5886:       if (j1[b1] == j2[b2]) { /* Same column index and hence same nonzero */
5887:         j[t]      = j1[b1];
5888:         imap1[t1] = t;
5889:         imap2[t2] = t;
5890:         b1       += jmap1[t1+1] - jmap1[t1]; /* Jump to next unique local nonzero */
5891:         b2       += jmap2[t2+1] - jmap2[t2]; /* Jump to next unique remote nonzero */
5892:         t1++; t2++; t++;
5893:       } else if (j1[b1] < j2[b2]) {
5894:         j[t]      = j1[b1];
5895:         imap1[t1] = t;
5896:         b1       += jmap1[t1+1] - jmap1[t1];
5897:         t1++; t++;
5898:       } else {
5899:         j[t]      = j2[b2];
5900:         imap2[t2] = t;
5901:         b2       += jmap2[t2+1] - jmap2[t2];
5902:         t2++; t++;
5903:       }
5904:     }
5905:     /* Merge the remaining in either j1[] or j2[] */
5906:     while (b1 < e1) {
5907:       j[t]      = j1[b1];
5908:       imap1[t1] = t;
5909:       b1       += jmap1[t1+1] - jmap1[t1];
5910:       t1++; t++;
5911:     }
5912:     while (b2 < e2) {
5913:       j[t]      = j2[b2];
5914:       imap2[t2] = t;
5915:       b2       += jmap2[t2+1] - jmap2[t2];
5916:       t2++; t++;
5917:     }
5918:     i[r+1] = t;
5919:   }
5920:   return 0;
5921: }

5923: /* Split a set/group of local entries into two subsets: those in the diagonal block and those in the off-diagonal block

5925:   Input Parameters:
5926:     mat: an MPI matrix that provides row and column layout information for splitting. Let's say its number of local rows is m.
5927:     n,i[],j[],perm[]: there are n input entries, belonging to m rows. Row/col indices of the entries are stored in i[] and j[]
5928:       respectively, along with a permutation array perm[]. Length of the i[],j[],perm[] arrays is n.

5930:       i[] is already sorted, but within a row, j[] is not sorted and might have repeats.
5931:       i[] might contain negative indices at the beginning, which means the corresponding entries should be ignored in the splitting.

5933:   Output Parameters:
5934:     j[],perm[]: the routine needs to sort j[] within each row along with perm[].
5935:     rowBegin[],rowMid[],rowEnd[]: of length m, and the memory is preallocated and zeroed by the caller.
5936:       They contain indices pointing to j[]. For 0<=r<m, [rowBegin[r],rowMid[r]) point to begin/end entries of row r of the diagonal block,
5937:       and [rowMid[r],rowEnd[r]) point to begin/end entries of row r of the off-diagonal block.

5939:     Aperm[],Ajmap[],Atot,Annz: Arrays are allocated by this routine.
5940:       Aperm[Atot] stores values from perm[] for entries belonging to the diagonal block. Length of Aperm[] is Atot, though it may also count
5941:         repeats (i.e., same 'i,j' pair).
5942:       Ajmap[Annz+1] stores the number of repeats of each unique entry belonging to the diagonal block. More precisely, Ajmap[t+1] - Ajmap[t]
5943:         is the number of repeats for the t-th unique entry in the diagonal block. Ajmap[0] is always 0.

5945:       Atot: number of entries belonging to the diagonal block
5946:       Annz: number of unique nonzeros belonging to the diagonal block.

5948:     Bperm[], Bjmap[], Btot, Bnnz are similar but for the off-diagonal block.

5950:     Aperm[],Bperm[],Ajmap[],Bjmap[] are allocated by this routine with PetscMalloc4(). One has to free them with PetscFree4() in the exact order.
5951: */
5952: static PetscErrorCode MatSplitEntries_Internal(Mat mat,PetscCount n,const PetscInt i[],PetscInt j[],
5953:   PetscCount perm[],PetscCount rowBegin[],PetscCount rowMid[],PetscCount rowEnd[],
5954:   PetscCount *Atot_,PetscCount **Aperm_,PetscCount *Annz_,PetscCount **Ajmap_,
5955:   PetscCount *Btot_,PetscCount **Bperm_,PetscCount *Bnnz_,PetscCount **Bjmap_)
5956: {
5957:   PetscInt          cstart,cend,rstart,rend,row,col;
5958:   PetscCount        Atot=0,Btot=0; /* Total number of nonzeros in the diagonal and off-diagonal blocks */
5959:   PetscCount        Annz=0,Bnnz=0; /* Number of unique nonzeros in the diagonal and off-diagonal blocks */
5960:   PetscCount        k,m,p,q,r,s,mid;
5961:   PetscCount        *Aperm,*Bperm,*Ajmap,*Bjmap;

5963:   PetscLayoutGetRange(mat->rmap,&rstart,&rend);
5964:   PetscLayoutGetRange(mat->cmap,&cstart,&cend);
5965:   m    = rend - rstart;

5967:   for (k=0; k<n; k++) {if (i[k]>=0) break;} /* Skip negative rows */

5969:   /* Process [k,n): sort and partition each local row into diag and offdiag portions,
5970:      fill rowBegin[], rowMid[], rowEnd[], and count Atot, Btot, Annz, Bnnz.
5971:   */
5972:   while (k<n) {
5973:     row = i[k];
5974:     /* Entries in [k,s) are in one row. Shift diagonal block col indices so that diag is ahead of offdiag after sorting the row */
5975:     for (s=k; s<n; s++) if (i[s] != row) break;
5976:     for (p=k; p<s; p++) {
5977:       if (j[p] >= cstart && j[p] < cend) j[p] -= PETSC_MAX_INT; /* Shift diag columns to range of [-PETSC_MAX_INT, -1]  */
5978:       else PetscAssert((j[p] >= 0) && (j[p] <= mat->cmap->N),PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column index %" PetscInt_FMT " is out of range",j[p]);
5979:     }
5980:     PetscSortIntWithCountArray(s-k,j+k,perm+k);
5981:     PetscSortedIntUpperBound(j,k,s,-1,&mid); /* Seperate [k,s) into [k,mid) for diag and [mid,s) for offdiag */
5982:     rowBegin[row-rstart] = k;
5983:     rowMid[row-rstart]   = mid;
5984:     rowEnd[row-rstart]   = s;

5986:     /* Count nonzeros of this diag/offdiag row, which might have repeats */
5987:     Atot += mid - k;
5988:     Btot += s - mid;

5990:     /* Count unique nonzeros of this diag/offdiag row */
5991:     for (p=k; p<mid;) {
5992:       col = j[p];
5993:       do {j[p] += PETSC_MAX_INT; p++;} while (p<mid && j[p] == col); /* Revert the modified diagonal indices */
5994:       Annz++;
5995:     }

5997:     for (p=mid; p<s;) {
5998:       col = j[p];
5999:       do {p++;} while (p<s && j[p] == col);
6000:       Bnnz++;
6001:     }
6002:     k = s;
6003:   }

6005:   /* Allocation according to Atot, Btot, Annz, Bnnz */
6006:   PetscMalloc4(Atot,&Aperm,Btot,&Bperm,Annz+1,&Ajmap,Bnnz+1,&Bjmap);

6008:   /* Re-scan indices and copy diag/offdiag permutation indices to Aperm, Bperm and also fill Ajmap and Bjmap */
6009:   Ajmap[0] = Bjmap[0] = Atot = Btot = Annz = Bnnz = 0;
6010:   for (r=0; r<m; r++) {
6011:     k     = rowBegin[r];
6012:     mid   = rowMid[r];
6013:     s     = rowEnd[r];
6014:     PetscArraycpy(Aperm+Atot,perm+k,  mid-k);
6015:     PetscArraycpy(Bperm+Btot,perm+mid,s-mid);
6016:     Atot += mid - k;
6017:     Btot += s - mid;

6019:     /* Scan column indices in this row and find out how many repeats each unique nonzero has */
6020:     for (p=k; p<mid;) {
6021:       col = j[p];
6022:       q   = p;
6023:       do {p++;} while (p<mid && j[p] == col);
6024:       Ajmap[Annz+1] = Ajmap[Annz] + (p - q);
6025:       Annz++;
6026:     }

6028:     for (p=mid; p<s;) {
6029:       col = j[p];
6030:       q   = p;
6031:       do {p++;} while (p<s && j[p] == col);
6032:       Bjmap[Bnnz+1] = Bjmap[Bnnz] + (p - q);
6033:       Bnnz++;
6034:     }
6035:   }
6036:   /* Output */
6037:   *Aperm_ = Aperm;
6038:   *Annz_  = Annz;
6039:   *Atot_  = Atot;
6040:   *Ajmap_ = Ajmap;
6041:   *Bperm_ = Bperm;
6042:   *Bnnz_  = Bnnz;
6043:   *Btot_  = Btot;
6044:   *Bjmap_ = Bjmap;
6045:   return 0;
6046: }

6048: PetscErrorCode MatSetPreallocationCOO_MPIAIJ(Mat mat, PetscCount coo_n, const PetscInt coo_i[], const PetscInt coo_j[])
6049: {
6050:   MPI_Comm                  comm;
6051:   PetscMPIInt               rank,size;
6052:   PetscInt                  m,n,M,N,rstart,rend,cstart,cend; /* Sizes, indices of row/col, therefore with type PetscInt */
6053:   PetscCount                k,p,q,rem; /* Loop variables over coo arrays */
6054:   Mat_MPIAIJ                *mpiaij = (Mat_MPIAIJ*)mat->data;

6056:   PetscFree(mpiaij->garray);
6057:   VecDestroy(&mpiaij->lvec);
6058: #if defined(PETSC_USE_CTABLE)
6059:   PetscTableDestroy(&mpiaij->colmap);
6060: #else
6061:   PetscFree(mpiaij->colmap);
6062: #endif
6063:   VecScatterDestroy(&mpiaij->Mvctx);
6064:   mat->assembled = PETSC_FALSE;
6065:   mat->was_assembled = PETSC_FALSE;
6066:   MatResetPreallocationCOO_MPIAIJ(mat);

6068:   PetscObjectGetComm((PetscObject)mat,&comm);
6069:   MPI_Comm_size(comm,&size);
6070:   MPI_Comm_rank(comm,&rank);
6071:   PetscLayoutSetUp(mat->rmap);
6072:   PetscLayoutSetUp(mat->cmap);
6073:   PetscLayoutGetRange(mat->rmap,&rstart,&rend);
6074:   PetscLayoutGetRange(mat->cmap,&cstart,&cend);
6075:   MatGetLocalSize(mat,&m,&n);
6076:   MatGetSize(mat,&M,&N);

6078:   /* ---------------------------------------------------------------------------*/
6079:   /* Sort (i,j) by row along with a permutation array, so that the to-be-ignored */
6080:   /* entries come first, then local rows, then remote rows.                     */
6081:   /* ---------------------------------------------------------------------------*/
6082:   PetscCount n1 = coo_n,*perm1;
6083:   PetscInt   *i1,*j1; /* Copies of input COOs along with a permutation array */
6084:   PetscMalloc3(n1,&i1,n1,&j1,n1,&perm1);
6085:   PetscArraycpy(i1,coo_i,n1); /* Make a copy since we'll modify it */
6086:   PetscArraycpy(j1,coo_j,n1);
6087:   for (k=0; k<n1; k++) perm1[k] = k;

6089:   /* Manipulate indices so that entries with negative row or col indices will have smallest
6090:      row indices, local entries will have greater but negative row indices, and remote entries
6091:      will have positive row indices.
6092:   */
6093:   for (k=0; k<n1; k++) {
6094:     if (i1[k] < 0 || j1[k] < 0) i1[k] = PETSC_MIN_INT; /* e.g., -2^31, minimal to move them ahead */
6095:     else if (i1[k] >= rstart && i1[k] < rend) i1[k] -= PETSC_MAX_INT; /* e.g., minus 2^31-1 to shift local rows to range of [-PETSC_MAX_INT, -1] */
6097:     else if (mpiaij->donotstash) i1[k] = PETSC_MIN_INT; /* Ignore offproc entries as if they had negative indices */
6098:   }

6100:   /* Sort by row; after that, [0,k) have ignored entires, [k,rem) have local rows and [rem,n1) have remote rows */
6101:   PetscSortIntWithIntCountArrayPair(n1,i1,j1,perm1);
6102:   for (k=0; k<n1; k++) {if (i1[k] > PETSC_MIN_INT) break;} /* Advance k to the first entry we need to take care of */
6103:   PetscSortedIntUpperBound(i1,k,n1,rend-1-PETSC_MAX_INT,&rem); /* rem is upper bound of the last local row */
6104:   for (; k<rem; k++) i1[k] += PETSC_MAX_INT; /* Revert row indices of local rows*/

6106:   /* ---------------------------------------------------------------------------*/
6107:   /*           Split local rows into diag/offdiag portions                      */
6108:   /* ---------------------------------------------------------------------------*/
6109:   PetscCount   *rowBegin1,*rowMid1,*rowEnd1;
6110:   PetscCount   *Ajmap1,*Aperm1,*Bjmap1,*Bperm1,*Cperm1;
6111:   PetscCount   Annz1,Bnnz1,Atot1,Btot1;

6113:   PetscCalloc3(m,&rowBegin1,m,&rowMid1,m,&rowEnd1);
6114:   PetscMalloc1(n1-rem,&Cperm1);
6115:   MatSplitEntries_Internal(mat,rem,i1,j1,perm1,rowBegin1,rowMid1,rowEnd1,&Atot1,&Aperm1,&Annz1,&Ajmap1,&Btot1,&Bperm1,&Bnnz1,&Bjmap1);

6117:   /* ---------------------------------------------------------------------------*/
6118:   /*           Send remote rows to their owner                                  */
6119:   /* ---------------------------------------------------------------------------*/
6120:   /* Find which rows should be sent to which remote ranks*/
6121:   PetscInt       nsend = 0; /* Number of MPI ranks to send data to */
6122:   PetscMPIInt    *sendto; /* [nsend], storing remote ranks */
6123:   PetscInt       *nentries; /* [nsend], storing number of entries sent to remote ranks; Assume PetscInt is big enough for this count, and error if not */
6124:   const PetscInt *ranges;
6125:   PetscInt       maxNsend = size >= 128? 128 : size; /* Assume max 128 neighbors; realloc when needed */

6127:   PetscLayoutGetRanges(mat->rmap,&ranges);
6128:   PetscMalloc2(maxNsend,&sendto,maxNsend,&nentries);
6129:   for (k=rem; k<n1;) {
6130:     PetscMPIInt  owner;
6131:     PetscInt     firstRow,lastRow;

6133:     /* Locate a row range */
6134:     firstRow = i1[k]; /* first row of this owner */
6135:     PetscLayoutFindOwner(mat->rmap,firstRow,&owner);
6136:     lastRow  = ranges[owner+1]-1; /* last row of this owner */

6138:     /* Find the first index 'p' in [k,n) with i[p] belonging to next owner */
6139:     PetscSortedIntUpperBound(i1,k,n1,lastRow,&p);

6141:     /* All entries in [k,p) belong to this remote owner */
6142:     if (nsend >= maxNsend) { /* Double the remote ranks arrays if not long enough */
6143:       PetscMPIInt *sendto2;
6144:       PetscInt    *nentries2;
6145:       PetscInt    maxNsend2 = (maxNsend <= size/2) ? maxNsend*2 : size;

6147:       PetscMalloc2(maxNsend2,&sendto2,maxNsend2,&nentries2);
6148:       PetscArraycpy(sendto2,sendto,maxNsend);
6149:       PetscArraycpy(nentries2,nentries2,maxNsend+1);
6150:       PetscFree2(sendto,nentries2);
6151:       sendto      = sendto2;
6152:       nentries    = nentries2;
6153:       maxNsend    = maxNsend2;
6154:     }
6155:     sendto[nsend]   = owner;
6156:     nentries[nsend] = p - k;
6157:     PetscCountCast(p-k,&nentries[nsend]);
6158:     nsend++;
6159:     k = p;
6160:   }

6162:   /* Build 1st SF to know offsets on remote to send data */
6163:   PetscSF     sf1;
6164:   PetscInt    nroots = 1,nroots2 = 0;
6165:   PetscInt    nleaves = nsend,nleaves2 = 0;
6166:   PetscInt    *offsets;
6167:   PetscSFNode *iremote;

6169:   PetscSFCreate(comm,&sf1);
6170:   PetscMalloc1(nsend,&iremote);
6171:   PetscMalloc1(nsend,&offsets);
6172:   for (k=0; k<nsend; k++) {
6173:     iremote[k].rank  = sendto[k];
6174:     iremote[k].index = 0;
6175:     nleaves2        += nentries[k];
6177:   }
6178:   PetscSFSetGraph(sf1,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
6179:   PetscSFFetchAndOpWithMemTypeBegin(sf1,MPIU_INT,PETSC_MEMTYPE_HOST,&nroots2/*rootdata*/,PETSC_MEMTYPE_HOST,nentries/*leafdata*/,PETSC_MEMTYPE_HOST,offsets/*leafupdate*/,MPI_SUM);
6180:   PetscSFFetchAndOpEnd(sf1,MPIU_INT,&nroots2,nentries,offsets,MPI_SUM); /* Would nroots2 overflow, we check offsets[] below */
6181:   PetscSFDestroy(&sf1);
6182:   PetscAssert(nleaves2 == n1-rem,PETSC_COMM_SELF,PETSC_ERR_PLIB,"nleaves2 " PetscInt_FMT " != number of remote entries " PetscCount_FMT "",nleaves2,n1-rem);

6184:   /* Build 2nd SF to send remote COOs to their owner */
6185:   PetscSF sf2;
6186:   nroots  = nroots2;
6187:   nleaves = nleaves2;
6188:   PetscSFCreate(comm,&sf2);
6189:   PetscSFSetFromOptions(sf2);
6190:   PetscMalloc1(nleaves,&iremote);
6191:   p       = 0;
6192:   for (k=0; k<nsend; k++) {
6194:     for (q=0; q<nentries[k]; q++,p++) {
6195:       iremote[p].rank  = sendto[k];
6196:       iremote[p].index = offsets[k] + q;
6197:     }
6198:   }
6199:   PetscSFSetGraph(sf2,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);

6201:   /* sf2 only sends contiguous leafdata to contiguous rootdata. We record the permutation which will be used to fill leafdata */
6202:   PetscArraycpy(Cperm1,perm1+rem,n1-rem);

6204:   /* Send the remote COOs to their owner */
6205:   PetscInt   n2 = nroots,*i2,*j2; /* Buffers for received COOs from other ranks, along with a permutation array */
6206:   PetscCount *perm2; /* Though PetscInt is enough for remote entries, we use PetscCount here as we want to reuse MatSplitEntries_Internal() */
6207:   PetscMalloc3(n2,&i2,n2,&j2,n2,&perm2);
6208:   PetscSFReduceWithMemTypeBegin(sf2,MPIU_INT,PETSC_MEMTYPE_HOST,i1+rem,PETSC_MEMTYPE_HOST,i2,MPI_REPLACE);
6209:   PetscSFReduceEnd(sf2,MPIU_INT,i1+rem,i2,MPI_REPLACE);
6210:   PetscSFReduceWithMemTypeBegin(sf2,MPIU_INT,PETSC_MEMTYPE_HOST,j1+rem,PETSC_MEMTYPE_HOST,j2,MPI_REPLACE);
6211:   PetscSFReduceEnd(sf2,MPIU_INT,j1+rem,j2,MPI_REPLACE);

6213:   PetscFree(offsets);
6214:   PetscFree2(sendto,nentries);

6216:   /* ---------------------------------------------------------------*/
6217:   /* Sort received COOs by row along with the permutation array     */
6218:   /* ---------------------------------------------------------------*/
6219:   for (k=0; k<n2; k++) perm2[k] = k;
6220:   PetscSortIntWithIntCountArrayPair(n2,i2,j2,perm2);

6222:   /* ---------------------------------------------------------------*/
6223:   /* Split received COOs into diag/offdiag portions                 */
6224:   /* ---------------------------------------------------------------*/
6225:   PetscCount  *rowBegin2,*rowMid2,*rowEnd2;
6226:   PetscCount  *Ajmap2,*Aperm2,*Bjmap2,*Bperm2;
6227:   PetscCount  Annz2,Bnnz2,Atot2,Btot2;

6229:   PetscCalloc3(m,&rowBegin2,m,&rowMid2,m,&rowEnd2);
6230:   MatSplitEntries_Internal(mat,n2,i2,j2,perm2,rowBegin2,rowMid2,rowEnd2,&Atot2,&Aperm2,&Annz2,&Ajmap2,&Btot2,&Bperm2,&Bnnz2,&Bjmap2);

6232:   /* --------------------------------------------------------------------------*/
6233:   /* Merge local COOs with received COOs: diag with diag, offdiag with offdiag */
6234:   /* --------------------------------------------------------------------------*/
6235:   PetscInt   *Ai,*Bi;
6236:   PetscInt   *Aj,*Bj;

6238:   PetscMalloc1(m+1,&Ai);
6239:   PetscMalloc1(m+1,&Bi);
6240:   PetscMalloc1(Annz1+Annz2,&Aj); /* Since local and remote entries might have dups, we might allocate excess memory */
6241:   PetscMalloc1(Bnnz1+Bnnz2,&Bj);

6243:   PetscCount *Aimap1,*Bimap1,*Aimap2,*Bimap2;
6244:   PetscMalloc4(Annz1,&Aimap1,Bnnz1,&Bimap1,Annz2,&Aimap2,Bnnz2,&Bimap2);

6246:   MatMergeEntries_Internal(mat,j1,j2,rowBegin1,rowMid1,rowBegin2,rowMid2,Ajmap1,Ajmap2,Aimap1,Aimap2,Ai,Aj);
6247:   MatMergeEntries_Internal(mat,j1,j2,rowMid1,  rowEnd1,rowMid2,  rowEnd2,Bjmap1,Bjmap2,Bimap1,Bimap2,Bi,Bj);
6248:   PetscFree3(rowBegin1,rowMid1,rowEnd1);
6249:   PetscFree3(rowBegin2,rowMid2,rowEnd2);
6250:   PetscFree3(i1,j1,perm1);
6251:   PetscFree3(i2,j2,perm2);

6253:   /* Reallocate Aj, Bj once we know actual numbers of unique nonzeros in A and B */
6254:   PetscInt Annz = Ai[m];
6255:   PetscInt Bnnz = Bi[m];
6256:   if (Annz < Annz1 + Annz2) {
6257:     PetscInt *Aj_new;
6258:     PetscMalloc1(Annz,&Aj_new);
6259:     PetscArraycpy(Aj_new,Aj,Annz);
6260:     PetscFree(Aj);
6261:     Aj   = Aj_new;
6262:   }

6264:   if (Bnnz < Bnnz1 + Bnnz2) {
6265:     PetscInt *Bj_new;
6266:     PetscMalloc1(Bnnz,&Bj_new);
6267:     PetscArraycpy(Bj_new,Bj,Bnnz);
6268:     PetscFree(Bj);
6269:     Bj   = Bj_new;
6270:   }

6272:   /* --------------------------------------------------------------------------------*/
6273:   /* Create new submatrices for on-process and off-process coupling                  */
6274:   /* --------------------------------------------------------------------------------*/
6275:   PetscScalar   *Aa,*Ba;
6276:   MatType       rtype;
6277:   Mat_SeqAIJ    *a,*b;
6278:   PetscCalloc1(Annz,&Aa); /* Zero matrix on device */
6279:   PetscCalloc1(Bnnz,&Ba);
6280:   /* make Aj[] local, i.e, based off the start column of the diagonal portion */
6281:   if (cstart) {for (k=0; k<Annz; k++) Aj[k] -= cstart;}
6282:   MatDestroy(&mpiaij->A);
6283:   MatDestroy(&mpiaij->B);
6284:   MatGetRootType_Private(mat,&rtype);
6285:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,Ai,Aj,Aa,&mpiaij->A);
6286:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,mat->cmap->N,Bi,Bj,Ba,&mpiaij->B);
6287:   MatSetUpMultiply_MPIAIJ(mat);

6289:   a = (Mat_SeqAIJ*)mpiaij->A->data;
6290:   b = (Mat_SeqAIJ*)mpiaij->B->data;
6291:   a->singlemalloc = b->singlemalloc = PETSC_FALSE; /* Let newmat own Ai,Aj,Aa,Bi,Bj,Ba */
6292:   a->free_a       = b->free_a       = PETSC_TRUE;
6293:   a->free_ij      = b->free_ij      = PETSC_TRUE;

6295:   /* conversion must happen AFTER multiply setup */
6296:   MatConvert(mpiaij->A,rtype,MAT_INPLACE_MATRIX,&mpiaij->A);
6297:   MatConvert(mpiaij->B,rtype,MAT_INPLACE_MATRIX,&mpiaij->B);
6298:   VecDestroy(&mpiaij->lvec);
6299:   MatCreateVecs(mpiaij->B,&mpiaij->lvec,NULL);
6300:   PetscLogObjectParent((PetscObject)mat,(PetscObject)mpiaij->lvec);

6302:   mpiaij->coo_n   = coo_n;
6303:   mpiaij->coo_sf  = sf2;
6304:   mpiaij->sendlen = nleaves;
6305:   mpiaij->recvlen = nroots;

6307:   mpiaij->Annz1   = Annz1;
6308:   mpiaij->Annz2   = Annz2;
6309:   mpiaij->Bnnz1   = Bnnz1;
6310:   mpiaij->Bnnz2   = Bnnz2;

6312:   mpiaij->Atot1   = Atot1;
6313:   mpiaij->Atot2   = Atot2;
6314:   mpiaij->Btot1   = Btot1;
6315:   mpiaij->Btot2   = Btot2;

6317:   mpiaij->Aimap1  = Aimap1;
6318:   mpiaij->Aimap2  = Aimap2;
6319:   mpiaij->Bimap1  = Bimap1;
6320:   mpiaij->Bimap2  = Bimap2;

6322:   mpiaij->Ajmap1  = Ajmap1;
6323:   mpiaij->Ajmap2  = Ajmap2;
6324:   mpiaij->Bjmap1  = Bjmap1;
6325:   mpiaij->Bjmap2  = Bjmap2;

6327:   mpiaij->Aperm1  = Aperm1;
6328:   mpiaij->Aperm2  = Aperm2;
6329:   mpiaij->Bperm1  = Bperm1;
6330:   mpiaij->Bperm2  = Bperm2;

6332:   mpiaij->Cperm1  = Cperm1;

6334:   /* Allocate in preallocation. If not used, it has zero cost on host */
6335:   PetscMalloc2(mpiaij->sendlen,&mpiaij->sendbuf,mpiaij->recvlen,&mpiaij->recvbuf);
6336:   return 0;
6337: }

6339: static PetscErrorCode MatSetValuesCOO_MPIAIJ(Mat mat,const PetscScalar v[],InsertMode imode)
6340: {
6341:   Mat_MPIAIJ           *mpiaij = (Mat_MPIAIJ*)mat->data;
6342:   Mat                  A = mpiaij->A,B = mpiaij->B;
6343:   PetscCount           Annz1 = mpiaij->Annz1,Annz2 = mpiaij->Annz2,Bnnz1 = mpiaij->Bnnz1,Bnnz2 = mpiaij->Bnnz2;
6344:   PetscScalar          *Aa,*Ba;
6345:   PetscScalar          *sendbuf = mpiaij->sendbuf;
6346:   PetscScalar          *recvbuf = mpiaij->recvbuf;
6347:   const PetscCount     *Ajmap1 = mpiaij->Ajmap1,*Ajmap2 = mpiaij->Ajmap2,*Aimap1 = mpiaij->Aimap1,*Aimap2 = mpiaij->Aimap2;
6348:   const PetscCount     *Bjmap1 = mpiaij->Bjmap1,*Bjmap2 = mpiaij->Bjmap2,*Bimap1 = mpiaij->Bimap1,*Bimap2 = mpiaij->Bimap2;
6349:   const PetscCount     *Aperm1 = mpiaij->Aperm1,*Aperm2 = mpiaij->Aperm2,*Bperm1 = mpiaij->Bperm1,*Bperm2 = mpiaij->Bperm2;
6350:   const PetscCount     *Cperm1 = mpiaij->Cperm1;

6352:   MatSeqAIJGetArray(A,&Aa); /* Might read and write matrix values */
6353:   MatSeqAIJGetArray(B,&Ba);
6354:   if (imode == INSERT_VALUES) {
6355:     PetscMemzero(Aa,((Mat_SeqAIJ*)A->data)->nz*sizeof(PetscScalar));
6356:     PetscMemzero(Ba,((Mat_SeqAIJ*)B->data)->nz*sizeof(PetscScalar));
6357:   }

6359:   /* Pack entries to be sent to remote */
6360:   for (PetscCount i=0; i<mpiaij->sendlen; i++) sendbuf[i] = v[Cperm1[i]];

6362:   /* Send remote entries to their owner and overlap the communication with local computation */
6363:   PetscSFReduceWithMemTypeBegin(mpiaij->coo_sf,MPIU_SCALAR,PETSC_MEMTYPE_HOST,sendbuf,PETSC_MEMTYPE_HOST,recvbuf,MPI_REPLACE);
6364:   /* Add local entries to A and B */
6365:   for (PetscCount i=0; i<Annz1; i++) {
6366:     for (PetscCount k=Ajmap1[i]; k<Ajmap1[i+1]; k++) Aa[Aimap1[i]] += v[Aperm1[k]];
6367:   }
6368:   for (PetscCount i=0; i<Bnnz1; i++) {
6369:     for (PetscCount k=Bjmap1[i]; k<Bjmap1[i+1]; k++) Ba[Bimap1[i]] += v[Bperm1[k]];
6370:   }
6371:   PetscSFReduceEnd(mpiaij->coo_sf,MPIU_SCALAR,sendbuf,recvbuf,MPI_REPLACE);

6373:   /* Add received remote entries to A and B */
6374:   for (PetscCount i=0; i<Annz2; i++) {
6375:     for (PetscCount k=Ajmap2[i]; k<Ajmap2[i+1]; k++) Aa[Aimap2[i]] += recvbuf[Aperm2[k]];
6376:   }
6377:   for (PetscCount i=0; i<Bnnz2; i++) {
6378:     for (PetscCount k=Bjmap2[i]; k<Bjmap2[i+1]; k++) Ba[Bimap2[i]] += recvbuf[Bperm2[k]];
6379:   }
6380:   MatSeqAIJRestoreArray(A,&Aa);
6381:   MatSeqAIJRestoreArray(B,&Ba);
6382:   return 0;
6383: }

6385: /* ----------------------------------------------------------------*/

6387: /*MC
6388:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

6390:    Options Database Keys:
6391: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

6393:    Level: beginner

6395:    Notes:
6396:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
6397:     in this case the values associated with the rows and columns one passes in are set to zero
6398:     in the matrix

6400:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
6401:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

6403: .seealso: MatCreateAIJ()
6404: M*/

6406: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6407: {
6408:   Mat_MPIAIJ     *b;
6409:   PetscMPIInt    size;

6411:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

6413:   PetscNewLog(B,&b);
6414:   B->data       = (void*)b;
6415:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
6416:   B->assembled  = PETSC_FALSE;
6417:   B->insertmode = NOT_SET_VALUES;
6418:   b->size       = size;

6420:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

6422:   /* build cache for off array entries formed */
6423:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

6425:   b->donotstash  = PETSC_FALSE;
6426:   b->colmap      = NULL;
6427:   b->garray      = NULL;
6428:   b->roworiented = PETSC_TRUE;

6430:   /* stuff used for matrix vector multiply */
6431:   b->lvec  = NULL;
6432:   b->Mvctx = NULL;

6434:   /* stuff for MatGetRow() */
6435:   b->rowindices   = NULL;
6436:   b->rowvalues    = NULL;
6437:   b->getrowactive = PETSC_FALSE;

6439:   /* flexible pointer used in CUSPARSE classes */
6440:   b->spptr = NULL;

6442:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
6443:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
6444:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
6445:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
6446:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
6447:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
6448:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
6449:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
6450:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
6451:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
6452: #if defined(PETSC_HAVE_CUDA)
6453:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcusparse_C",MatConvert_MPIAIJ_MPIAIJCUSPARSE);
6454: #endif
6455: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6456:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijkokkos_C",MatConvert_MPIAIJ_MPIAIJKokkos);
6457: #endif
6458: #if defined(PETSC_HAVE_MKL_SPARSE)
6459:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
6460: #endif
6461:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
6462:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpibaij_C",MatConvert_MPIAIJ_MPIBAIJ);
6463:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
6464:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpidense_C",MatConvert_MPIAIJ_MPIDense);
6465: #if defined(PETSC_HAVE_ELEMENTAL)
6466:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
6467: #endif
6468: #if defined(PETSC_HAVE_SCALAPACK)
6469:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_scalapack_C",MatConvert_AIJ_ScaLAPACK);
6470: #endif
6471:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
6472:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
6473: #if defined(PETSC_HAVE_HYPRE)
6474:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
6475:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",MatProductSetFromOptions_Transpose_AIJ_AIJ);
6476: #endif
6477:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_is_mpiaij_C",MatProductSetFromOptions_IS_XAIJ);
6478:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpiaij_C",MatProductSetFromOptions_MPIAIJ);
6479:   PetscObjectComposeFunction((PetscObject)B,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_MPIAIJ);
6480:   PetscObjectComposeFunction((PetscObject)B,"MatSetValuesCOO_C",MatSetValuesCOO_MPIAIJ);
6481:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
6482:   return 0;
6483: }

6485: /*@C
6486:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
6487:          and "off-diagonal" part of the matrix in CSR format.

6489:    Collective

6491:    Input Parameters:
6492: +  comm - MPI communicator
6493: .  m - number of local rows (Cannot be PETSC_DECIDE)
6494: .  n - This value should be the same as the local size used in creating the
6495:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
6496:        calculated if N is given) For square matrices n is almost always m.
6497: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
6498: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
6499: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6500: .   j - column indices, which must be local, i.e., based off the start column of the diagonal portion
6501: .   a - matrix values
6502: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6503: .   oj - column indices, which must be global, representing global columns in the MPIAIJ matrix
6504: -   oa - matrix values

6506:    Output Parameter:
6507: .   mat - the matrix

6509:    Level: advanced

6511:    Notes:
6512:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6513:        must free the arrays once the matrix has been destroyed and not before.

6515:        The i and j indices are 0 based

6517:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

6519:        This sets local rows and cannot be used to set off-processor values.

6521:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6522:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6523:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6524:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6525:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
6526:        communication if it is known that only local entries will be set.

6528: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
6529:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
6530: @*/
6531: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
6532: {
6533:   Mat_MPIAIJ     *maij;

6538:   MatCreate(comm,mat);
6539:   MatSetSizes(*mat,m,n,M,N);
6540:   MatSetType(*mat,MATMPIAIJ);
6541:   maij = (Mat_MPIAIJ*) (*mat)->data;

6543:   (*mat)->preallocated = PETSC_TRUE;

6545:   PetscLayoutSetUp((*mat)->rmap);
6546:   PetscLayoutSetUp((*mat)->cmap);

6548:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6549:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

6551:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6552:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6553:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6554:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6555:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6556:   return 0;
6557: }

6559: typedef struct {
6560:   Mat       *mp;    /* intermediate products */
6561:   PetscBool *mptmp; /* is the intermediate product temporary ? */
6562:   PetscInt  cp;     /* number of intermediate products */

6564:   /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */
6565:   PetscInt    *startsj_s,*startsj_r;
6566:   PetscScalar *bufa;
6567:   Mat         P_oth;

6569:   /* may take advantage of merging product->B */
6570:   Mat Bloc; /* B-local by merging diag and off-diag */

6572:   /* cusparse does not have support to split between symbolic and numeric phases.
6573:      When api_user is true, we don't need to update the numerical values
6574:      of the temporary storage */
6575:   PetscBool reusesym;

6577:   /* support for COO values insertion */
6578:   PetscScalar  *coo_v,*coo_w; /* store on-process and off-process COO scalars, and used as MPI recv/send buffers respectively */
6579:   PetscInt     **own; /* own[i] points to address of on-process COO indices for Mat mp[i] */
6580:   PetscInt     **off; /* off[i] points to address of off-process COO indices for Mat mp[i] */
6581:   PetscBool    hasoffproc; /* if true, have off-process values insertion (i.e. AtB or PtAP) */
6582:   PetscSF      sf; /* used for non-local values insertion and memory malloc */
6583:   PetscMemType mtype;

6585:   /* customization */
6586:   PetscBool abmerge;
6587:   PetscBool P_oth_bind;
6588: } MatMatMPIAIJBACKEND;

6590: PetscErrorCode MatDestroy_MatMatMPIAIJBACKEND(void *data)
6591: {
6592:   MatMatMPIAIJBACKEND *mmdata = (MatMatMPIAIJBACKEND*)data;
6593:   PetscInt            i;

6595:   PetscFree2(mmdata->startsj_s,mmdata->startsj_r);
6596:   PetscFree(mmdata->bufa);
6597:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_v);
6598:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_w);
6599:   MatDestroy(&mmdata->P_oth);
6600:   MatDestroy(&mmdata->Bloc);
6601:   PetscSFDestroy(&mmdata->sf);
6602:   for (i = 0; i < mmdata->cp; i++) {
6603:     MatDestroy(&mmdata->mp[i]);
6604:   }
6605:   PetscFree2(mmdata->mp,mmdata->mptmp);
6606:   PetscFree(mmdata->own[0]);
6607:   PetscFree(mmdata->own);
6608:   PetscFree(mmdata->off[0]);
6609:   PetscFree(mmdata->off);
6610:   PetscFree(mmdata);
6611:   return 0;
6612: }

6614: /* Copy selected n entries with indices in idx[] of A to v[].
6615:    If idx is NULL, copy the whole data array of A to v[]
6616:  */
6617: static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
6618: {
6619:   PetscErrorCode (*f)(Mat,PetscInt,const PetscInt[],PetscScalar[]);

6621:   PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",&f);
6622:   if (f) {
6623:     (*f)(A,n,idx,v);
6624:   } else {
6625:     const PetscScalar *vv;

6627:     MatSeqAIJGetArrayRead(A,&vv);
6628:     if (n && idx) {
6629:       PetscScalar    *w = v;
6630:       const PetscInt *oi = idx;
6631:       PetscInt       j;

6633:       for (j = 0; j < n; j++) *w++ = vv[*oi++];
6634:     } else {
6635:       PetscArraycpy(v,vv,n);
6636:     }
6637:     MatSeqAIJRestoreArrayRead(A,&vv);
6638:   }
6639:   return 0;
6640: }

6642: static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C)
6643: {
6644:   MatMatMPIAIJBACKEND *mmdata;
6645:   PetscInt            i,n_d,n_o;

6647:   MatCheckProduct(C,1);
6649:   mmdata = (MatMatMPIAIJBACKEND*)C->product->data;
6650:   if (!mmdata->reusesym) { /* update temporary matrices */
6651:     if (mmdata->P_oth) {
6652:       MatGetBrowsOfAoCols_MPIAIJ(C->product->A,C->product->B,MAT_REUSE_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6653:     }
6654:     if (mmdata->Bloc) {
6655:       MatMPIAIJGetLocalMatMerge(C->product->B,MAT_REUSE_MATRIX,NULL,&mmdata->Bloc);
6656:     }
6657:   }
6658:   mmdata->reusesym = PETSC_FALSE;

6660:   for (i = 0; i < mmdata->cp; i++) {
6662:     (*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i]);
6663:   }
6664:   for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) {
6665:     PetscInt noff = mmdata->off[i+1] - mmdata->off[i];

6667:     if (mmdata->mptmp[i]) continue;
6668:     if (noff) {
6669:       PetscInt nown = mmdata->own[i+1] - mmdata->own[i];

6671:       MatSeqAIJCopySubArray(mmdata->mp[i],noff,mmdata->off[i],mmdata->coo_w + n_o);
6672:       MatSeqAIJCopySubArray(mmdata->mp[i],nown,mmdata->own[i],mmdata->coo_v + n_d);
6673:       n_o += noff;
6674:       n_d += nown;
6675:     } else {
6676:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mmdata->mp[i]->data;

6678:       MatSeqAIJCopySubArray(mmdata->mp[i],mm->nz,NULL,mmdata->coo_v + n_d);
6679:       n_d += mm->nz;
6680:     }
6681:   }
6682:   if (mmdata->hasoffproc) { /* offprocess insertion */
6683:     PetscSFGatherBegin(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6684:     PetscSFGatherEnd(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6685:   }
6686:   MatSetValuesCOO(C,mmdata->coo_v,INSERT_VALUES);
6687:   return 0;
6688: }

6690: /* Support for Pt * A, A * P, or Pt * A * P */
6691: #define MAX_NUMBER_INTERMEDIATE 4
6692: PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C)
6693: {
6694:   Mat_Product            *product = C->product;
6695:   Mat                    A,P,mp[MAX_NUMBER_INTERMEDIATE]; /* A, P and a series of intermediate matrices */
6696:   Mat_MPIAIJ             *a,*p;
6697:   MatMatMPIAIJBACKEND    *mmdata;
6698:   ISLocalToGlobalMapping P_oth_l2g = NULL;
6699:   IS                     glob = NULL;
6700:   const char             *prefix;
6701:   char                   pprefix[256];
6702:   const PetscInt         *globidx,*P_oth_idx;
6703:   PetscInt               i,j,cp,m,n,M,N,*coo_i,*coo_j;
6704:   PetscCount             ncoo,ncoo_d,ncoo_o,ncoo_oown;
6705:   PetscInt               cmapt[MAX_NUMBER_INTERMEDIATE],rmapt[MAX_NUMBER_INTERMEDIATE]; /* col/row map type for each Mat in mp[]. */
6706:                                                                                         /* type-0: consecutive, start from 0; type-1: consecutive with */
6707:                                                                                         /* a base offset; type-2: sparse with a local to global map table */
6708:   const PetscInt         *cmapa[MAX_NUMBER_INTERMEDIATE],*rmapa[MAX_NUMBER_INTERMEDIATE]; /* col/row local to global map array (table) for type-2 map type */

6710:   MatProductType         ptype;
6711:   PetscBool              mptmp[MAX_NUMBER_INTERMEDIATE],hasoffproc = PETSC_FALSE,iscuda,iskokk;
6712:   PetscMPIInt            size;
6713:   PetscErrorCode         ierr;

6715:   MatCheckProduct(C,1);
6717:   ptype = product->type;
6718:   if (product->A->symmetric && ptype == MATPRODUCT_AtB) {
6719:     ptype = MATPRODUCT_AB;
6720:     product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
6721:   }
6722:   switch (ptype) {
6723:   case MATPRODUCT_AB:
6724:     A = product->A;
6725:     P = product->B;
6726:     m = A->rmap->n;
6727:     n = P->cmap->n;
6728:     M = A->rmap->N;
6729:     N = P->cmap->N;
6730:     hasoffproc = PETSC_FALSE; /* will not scatter mat product values to other processes */
6731:     break;
6732:   case MATPRODUCT_AtB:
6733:     P = product->A;
6734:     A = product->B;
6735:     m = P->cmap->n;
6736:     n = A->cmap->n;
6737:     M = P->cmap->N;
6738:     N = A->cmap->N;
6739:     hasoffproc = PETSC_TRUE;
6740:     break;
6741:   case MATPRODUCT_PtAP:
6742:     A = product->A;
6743:     P = product->B;
6744:     m = P->cmap->n;
6745:     n = P->cmap->n;
6746:     M = P->cmap->N;
6747:     N = P->cmap->N;
6748:     hasoffproc = PETSC_TRUE;
6749:     break;
6750:   default:
6751:     SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6752:   }
6753:   MPI_Comm_size(PetscObjectComm((PetscObject)C),&size);
6754:   if (size == 1) hasoffproc = PETSC_FALSE;

6756:   /* defaults */
6757:   for (i=0;i<MAX_NUMBER_INTERMEDIATE;i++) {
6758:     mp[i]    = NULL;
6759:     mptmp[i] = PETSC_FALSE;
6760:     rmapt[i] = -1;
6761:     cmapt[i] = -1;
6762:     rmapa[i] = NULL;
6763:     cmapa[i] = NULL;
6764:   }

6766:   /* customization */
6767:   PetscNew(&mmdata);
6768:   mmdata->reusesym = product->api_user;
6769:   if (ptype == MATPRODUCT_AB) {
6770:     if (product->api_user) {
6771:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
6772:       PetscOptionsBool("-matmatmult_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6773:       PetscOptionsBool("-matmatmult_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6774:       PetscOptionsEnd();
6775:     } else {
6776:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
6777:       PetscOptionsBool("-mat_product_algorithm_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6778:       PetscOptionsBool("-mat_product_algorithm_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6779:       PetscOptionsEnd();
6780:     }
6781:   } else if (ptype == MATPRODUCT_PtAP) {
6782:     if (product->api_user) {
6783:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
6784:       PetscOptionsBool("-matptap_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6785:       PetscOptionsEnd();
6786:     } else {
6787:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
6788:       PetscOptionsBool("-mat_product_algorithm_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6789:       PetscOptionsEnd();
6790:     }
6791:   }
6792:   a = (Mat_MPIAIJ*)A->data;
6793:   p = (Mat_MPIAIJ*)P->data;
6794:   MatSetSizes(C,m,n,M,N);
6795:   PetscLayoutSetUp(C->rmap);
6796:   PetscLayoutSetUp(C->cmap);
6797:   MatSetType(C,((PetscObject)A)->type_name);
6798:   MatGetOptionsPrefix(C,&prefix);

6800:   cp   = 0;
6801:   switch (ptype) {
6802:   case MATPRODUCT_AB: /* A * P */
6803:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);

6805:     /* A_diag * P_local (merged or not) */
6806:     if (mmdata->abmerge) { /* P's diagonal and off-diag blocks are merged to one matrix, then multiplied by A_diag */
6807:       /* P is product->B */
6808:       MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6809:       MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6810:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6811:       MatProductSetFill(mp[cp],product->fill);
6812:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6813:       MatSetOptionsPrefix(mp[cp],prefix);
6814:       MatAppendOptionsPrefix(mp[cp],pprefix);
6815:       mp[cp]->product->api_user = product->api_user;
6816:       MatProductSetFromOptions(mp[cp]);
6818:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6819:       ISGetIndices(glob,&globidx);
6820:       rmapt[cp] = 1;
6821:       cmapt[cp] = 2;
6822:       cmapa[cp] = globidx;
6823:       mptmp[cp] = PETSC_FALSE;
6824:       cp++;
6825:     } else { /* A_diag * P_diag and A_diag * P_off */
6826:       MatProductCreate(a->A,p->A,NULL,&mp[cp]);
6827:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6828:       MatProductSetFill(mp[cp],product->fill);
6829:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6830:       MatSetOptionsPrefix(mp[cp],prefix);
6831:       MatAppendOptionsPrefix(mp[cp],pprefix);
6832:       mp[cp]->product->api_user = product->api_user;
6833:       MatProductSetFromOptions(mp[cp]);
6835:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6836:       rmapt[cp] = 1;
6837:       cmapt[cp] = 1;
6838:       mptmp[cp] = PETSC_FALSE;
6839:       cp++;
6840:       MatProductCreate(a->A,p->B,NULL,&mp[cp]);
6841:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6842:       MatProductSetFill(mp[cp],product->fill);
6843:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6844:       MatSetOptionsPrefix(mp[cp],prefix);
6845:       MatAppendOptionsPrefix(mp[cp],pprefix);
6846:       mp[cp]->product->api_user = product->api_user;
6847:       MatProductSetFromOptions(mp[cp]);
6849:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6850:       rmapt[cp] = 1;
6851:       cmapt[cp] = 2;
6852:       cmapa[cp] = p->garray;
6853:       mptmp[cp] = PETSC_FALSE;
6854:       cp++;
6855:     }

6857:     /* A_off * P_other */
6858:     if (mmdata->P_oth) {
6859:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g); /* make P_oth use local col ids */
6860:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6861:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6862:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6863:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6864:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6865:       MatProductSetFill(mp[cp],product->fill);
6866:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6867:       MatSetOptionsPrefix(mp[cp],prefix);
6868:       MatAppendOptionsPrefix(mp[cp],pprefix);
6869:       mp[cp]->product->api_user = product->api_user;
6870:       MatProductSetFromOptions(mp[cp]);
6872:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6873:       rmapt[cp] = 1;
6874:       cmapt[cp] = 2;
6875:       cmapa[cp] = P_oth_idx;
6876:       mptmp[cp] = PETSC_FALSE;
6877:       cp++;
6878:     }
6879:     break;

6881:   case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */
6882:     /* A is product->B */
6883:     MatMPIAIJGetLocalMatMerge(A,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6884:     if (A == P) { /* when A==P, we can take advantage of the already merged mmdata->Bloc */
6885:       MatProductCreate(mmdata->Bloc,mmdata->Bloc,NULL,&mp[cp]);
6886:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6887:       MatProductSetFill(mp[cp],product->fill);
6888:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6889:       MatSetOptionsPrefix(mp[cp],prefix);
6890:       MatAppendOptionsPrefix(mp[cp],pprefix);
6891:       mp[cp]->product->api_user = product->api_user;
6892:       MatProductSetFromOptions(mp[cp]);
6894:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6895:       ISGetIndices(glob,&globidx);
6896:       rmapt[cp] = 2;
6897:       rmapa[cp] = globidx;
6898:       cmapt[cp] = 2;
6899:       cmapa[cp] = globidx;
6900:       mptmp[cp] = PETSC_FALSE;
6901:       cp++;
6902:     } else {
6903:       MatProductCreate(p->A,mmdata->Bloc,NULL,&mp[cp]);
6904:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6905:       MatProductSetFill(mp[cp],product->fill);
6906:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6907:       MatSetOptionsPrefix(mp[cp],prefix);
6908:       MatAppendOptionsPrefix(mp[cp],pprefix);
6909:       mp[cp]->product->api_user = product->api_user;
6910:       MatProductSetFromOptions(mp[cp]);
6912:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6913:       ISGetIndices(glob,&globidx);
6914:       rmapt[cp] = 1;
6915:       cmapt[cp] = 2;
6916:       cmapa[cp] = globidx;
6917:       mptmp[cp] = PETSC_FALSE;
6918:       cp++;
6919:       MatProductCreate(p->B,mmdata->Bloc,NULL,&mp[cp]);
6920:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6921:       MatProductSetFill(mp[cp],product->fill);
6922:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6923:       MatSetOptionsPrefix(mp[cp],prefix);
6924:       MatAppendOptionsPrefix(mp[cp],pprefix);
6925:       mp[cp]->product->api_user = product->api_user;
6926:       MatProductSetFromOptions(mp[cp]);
6928:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6929:       rmapt[cp] = 2;
6930:       rmapa[cp] = p->garray;
6931:       cmapt[cp] = 2;
6932:       cmapa[cp] = globidx;
6933:       mptmp[cp] = PETSC_FALSE;
6934:       cp++;
6935:     }
6936:     break;
6937:   case MATPRODUCT_PtAP:
6938:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6939:     /* P is product->B */
6940:     MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6941:     MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6942:     MatProductSetType(mp[cp],MATPRODUCT_PtAP);
6943:     MatProductSetFill(mp[cp],product->fill);
6944:     PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6945:     MatSetOptionsPrefix(mp[cp],prefix);
6946:     MatAppendOptionsPrefix(mp[cp],pprefix);
6947:     mp[cp]->product->api_user = product->api_user;
6948:     MatProductSetFromOptions(mp[cp]);
6950:     (*mp[cp]->ops->productsymbolic)(mp[cp]);
6951:     ISGetIndices(glob,&globidx);
6952:     rmapt[cp] = 2;
6953:     rmapa[cp] = globidx;
6954:     cmapt[cp] = 2;
6955:     cmapa[cp] = globidx;
6956:     mptmp[cp] = PETSC_FALSE;
6957:     cp++;
6958:     if (mmdata->P_oth) {
6959:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g);
6960:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6961:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6962:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6963:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6964:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6965:       MatProductSetFill(mp[cp],product->fill);
6966:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6967:       MatSetOptionsPrefix(mp[cp],prefix);
6968:       MatAppendOptionsPrefix(mp[cp],pprefix);
6969:       mp[cp]->product->api_user = product->api_user;
6970:       MatProductSetFromOptions(mp[cp]);
6972:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6973:       mptmp[cp] = PETSC_TRUE;
6974:       cp++;
6975:       MatProductCreate(mmdata->Bloc,mp[1],NULL,&mp[cp]);
6976:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6977:       MatProductSetFill(mp[cp],product->fill);
6978:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%" PetscInt_FMT "_",cp);
6979:       MatSetOptionsPrefix(mp[cp],prefix);
6980:       MatAppendOptionsPrefix(mp[cp],pprefix);
6981:       mp[cp]->product->api_user = product->api_user;
6982:       MatProductSetFromOptions(mp[cp]);
6984:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6985:       rmapt[cp] = 2;
6986:       rmapa[cp] = globidx;
6987:       cmapt[cp] = 2;
6988:       cmapa[cp] = P_oth_idx;
6989:       mptmp[cp] = PETSC_FALSE;
6990:       cp++;
6991:     }
6992:     break;
6993:   default:
6994:     SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6995:   }
6996:   /* sanity check */

6999:   PetscMalloc2(cp,&mmdata->mp,cp,&mmdata->mptmp);
7000:   for (i = 0; i < cp; i++) {
7001:     mmdata->mp[i]    = mp[i];
7002:     mmdata->mptmp[i] = mptmp[i];
7003:   }
7004:   mmdata->cp = cp;
7005:   C->product->data       = mmdata;
7006:   C->product->destroy    = MatDestroy_MatMatMPIAIJBACKEND;
7007:   C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND;

7009:   /* memory type */
7010:   mmdata->mtype = PETSC_MEMTYPE_HOST;
7011:   PetscObjectTypeCompareAny((PetscObject)C,&iscuda,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,"");
7012:   PetscObjectTypeCompareAny((PetscObject)C,&iskokk,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,"");
7013:   if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA;
7014:   else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_KOKKOS;

7016:   /* prepare coo coordinates for values insertion */

7018:   /* count total nonzeros of those intermediate seqaij Mats
7019:     ncoo_d:    # of nonzeros of matrices that do not have offproc entries
7020:     ncoo_o:    # of nonzeros (of matrices that might have offproc entries) that will be inserted to remote procs
7021:     ncoo_oown: # of nonzeros (of matrices that might have offproc entries) that will be inserted locally
7022:   */
7023:   for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) {
7024:     Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
7025:     if (mptmp[cp]) continue;
7026:     if (rmapt[cp] == 2 && hasoffproc) { /* the rows need to be scatter to all processes (might include self) */
7027:       const PetscInt *rmap = rmapa[cp];
7028:       const PetscInt mr = mp[cp]->rmap->n;
7029:       const PetscInt rs = C->rmap->rstart;
7030:       const PetscInt re = C->rmap->rend;
7031:       const PetscInt *ii  = mm->i;
7032:       for (i = 0; i < mr; i++) {
7033:         const PetscInt gr = rmap[i];
7034:         const PetscInt nz = ii[i+1] - ii[i];
7035:         if (gr < rs || gr >= re) ncoo_o += nz; /* this row is offproc */
7036:         else ncoo_oown += nz; /* this row is local */
7037:       }
7038:     } else ncoo_d += mm->nz;
7039:   }

7041:   /*
7042:     ncoo: total number of nonzeros (including those inserted by remote procs) belonging to this proc

7044:     ncoo = ncoo_d + ncoo_oown + ncoo2, which ncoo2 is number of nonzeros inserted to me by other procs.

7046:     off[0] points to a big index array, which is shared by off[1,2,...]. Similarily, for own[0].

7048:     off[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert to others
7049:     own[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert locally
7050:     so, off[p+1]-off[p] is the number of nonzeros that mp[p] will send to others.

7052:     coo_i/j/v[]: [ncoo] row/col/val of nonzeros belonging to this proc.
7053:     Ex. coo_i[]: the beginning part (of size ncoo_d + ncoo_oown) stores i of local nonzeros, and the remaing part stores i of nonzeros I will receive.
7054:   */
7055:   PetscCalloc1(mmdata->cp+1,&mmdata->off); /* +1 to make a csr-like data structure */
7056:   PetscCalloc1(mmdata->cp+1,&mmdata->own);

7058:   /* gather (i,j) of nonzeros inserted by remote procs */
7059:   if (hasoffproc) {
7060:     PetscSF  msf;
7061:     PetscInt ncoo2,*coo_i2,*coo_j2;

7063:     PetscMalloc1(ncoo_o,&mmdata->off[0]);
7064:     PetscMalloc1(ncoo_oown,&mmdata->own[0]);
7065:     PetscMalloc2(ncoo_o,&coo_i,ncoo_o,&coo_j); /* to collect (i,j) of entries to be sent to others */

7067:     for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) {
7068:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
7069:       PetscInt   *idxoff = mmdata->off[cp];
7070:       PetscInt   *idxown = mmdata->own[cp];
7071:       if (!mptmp[cp] && rmapt[cp] == 2) { /* row map is sparse */
7072:         const PetscInt *rmap = rmapa[cp];
7073:         const PetscInt *cmap = cmapa[cp];
7074:         const PetscInt *ii  = mm->i;
7075:         PetscInt       *coi = coo_i + ncoo_o;
7076:         PetscInt       *coj = coo_j + ncoo_o;
7077:         const PetscInt mr = mp[cp]->rmap->n;
7078:         const PetscInt rs = C->rmap->rstart;
7079:         const PetscInt re = C->rmap->rend;
7080:         const PetscInt cs = C->cmap->rstart;
7081:         for (i = 0; i < mr; i++) {
7082:           const PetscInt *jj = mm->j + ii[i];
7083:           const PetscInt gr  = rmap[i];
7084:           const PetscInt nz  = ii[i+1] - ii[i];
7085:           if (gr < rs || gr >= re) { /* this is an offproc row */
7086:             for (j = ii[i]; j < ii[i+1]; j++) {
7087:               *coi++ = gr;
7088:               *idxoff++ = j;
7089:             }
7090:             if (!cmapt[cp]) { /* already global */
7091:               for (j = 0; j < nz; j++) *coj++ = jj[j];
7092:             } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7093:               for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7094:             } else { /* offdiag */
7095:               for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7096:             }
7097:             ncoo_o += nz;
7098:           } else { /* this is a local row */
7099:             for (j = ii[i]; j < ii[i+1]; j++) *idxown++ = j;
7100:           }
7101:         }
7102:       }
7103:       mmdata->off[cp + 1] = idxoff;
7104:       mmdata->own[cp + 1] = idxown;
7105:     }

7107:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
7108:     PetscSFSetGraphLayout(mmdata->sf,C->rmap,ncoo_o/*nleaves*/,NULL/*ilocal*/,PETSC_OWN_POINTER,coo_i);
7109:     PetscSFGetMultiSF(mmdata->sf,&msf);
7110:     PetscSFGetGraph(msf,&ncoo2/*nroots*/,NULL,NULL,NULL);
7111:     ncoo = ncoo_d + ncoo_oown + ncoo2;
7112:     PetscMalloc2(ncoo,&coo_i2,ncoo,&coo_j2);
7113:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown); /* put (i,j) of remote nonzeros at back */
7114:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown);
7115:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
7116:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
7117:     PetscFree2(coo_i,coo_j);
7118:     /* allocate MPI send buffer to collect nonzero values to be sent to remote procs */
7119:     PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo_o*sizeof(PetscScalar),(void**)&mmdata->coo_w);
7120:     coo_i = coo_i2;
7121:     coo_j = coo_j2;
7122:   } else { /* no offproc values insertion */
7123:     ncoo = ncoo_d;
7124:     PetscMalloc2(ncoo,&coo_i,ncoo,&coo_j);

7126:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
7127:     PetscSFSetGraph(mmdata->sf,0,0,NULL,PETSC_OWN_POINTER,NULL,PETSC_OWN_POINTER);
7128:     PetscSFSetUp(mmdata->sf);
7129:   }
7130:   mmdata->hasoffproc = hasoffproc;

7132:   /* gather (i,j) of nonzeros inserted locally */
7133:   for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) {
7134:     Mat_SeqAIJ     *mm = (Mat_SeqAIJ*)mp[cp]->data;
7135:     PetscInt       *coi = coo_i + ncoo_d;
7136:     PetscInt       *coj = coo_j + ncoo_d;
7137:     const PetscInt *jj  = mm->j;
7138:     const PetscInt *ii  = mm->i;
7139:     const PetscInt *cmap = cmapa[cp];
7140:     const PetscInt *rmap = rmapa[cp];
7141:     const PetscInt mr = mp[cp]->rmap->n;
7142:     const PetscInt rs = C->rmap->rstart;
7143:     const PetscInt re = C->rmap->rend;
7144:     const PetscInt cs = C->cmap->rstart;

7146:     if (mptmp[cp]) continue;
7147:     if (rmapt[cp] == 1) { /* consecutive rows */
7148:       /* fill coo_i */
7149:       for (i = 0; i < mr; i++) {
7150:         const PetscInt gr = i + rs;
7151:         for (j = ii[i]; j < ii[i+1]; j++) coi[j] = gr;
7152:       }
7153:       /* fill coo_j */
7154:       if (!cmapt[cp]) { /* type-0, already global */
7155:         PetscArraycpy(coj,jj,mm->nz);
7156:       } else if (cmapt[cp] == 1) { /* type-1, local to global for consecutive columns of C */
7157:         for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs; /* lid + col start */
7158:       } else { /* type-2, local to global for sparse columns */
7159:         for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]];
7160:       }
7161:       ncoo_d += mm->nz;
7162:     } else if (rmapt[cp] == 2) { /* sparse rows */
7163:       for (i = 0; i < mr; i++) {
7164:         const PetscInt *jj = mm->j + ii[i];
7165:         const PetscInt gr  = rmap[i];
7166:         const PetscInt nz  = ii[i+1] - ii[i];
7167:         if (gr >= rs && gr < re) { /* local rows */
7168:           for (j = ii[i]; j < ii[i+1]; j++) *coi++ = gr;
7169:           if (!cmapt[cp]) { /* type-0, already global */
7170:             for (j = 0; j < nz; j++) *coj++ = jj[j];
7171:           } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7172:             for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7173:           } else { /* type-2, local to global for sparse columns */
7174:             for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7175:           }
7176:           ncoo_d += nz;
7177:         }
7178:       }
7179:     }
7180:   }
7181:   if (glob) {
7182:     ISRestoreIndices(glob,&globidx);
7183:   }
7184:   ISDestroy(&glob);
7185:   if (P_oth_l2g) {
7186:     ISLocalToGlobalMappingRestoreIndices(P_oth_l2g,&P_oth_idx);
7187:   }
7188:   ISLocalToGlobalMappingDestroy(&P_oth_l2g);
7189:   /* allocate an array to store all nonzeros (inserted locally or remotely) belonging to this proc */
7190:   PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo*sizeof(PetscScalar),(void**)&mmdata->coo_v);

7192:   /* preallocate with COO data */
7193:   MatSetPreallocationCOO(C,ncoo,coo_i,coo_j);
7194:   PetscFree2(coo_i,coo_j);
7195:   return 0;
7196: }

7198: PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat)
7199: {
7200:   Mat_Product *product = mat->product;
7201: #if defined(PETSC_HAVE_DEVICE)
7202:   PetscBool    match   = PETSC_FALSE;
7203:   PetscBool    usecpu  = PETSC_FALSE;
7204: #else
7205:   PetscBool    match   = PETSC_TRUE;
7206: #endif

7208:   MatCheckProduct(mat,1);
7209: #if defined(PETSC_HAVE_DEVICE)
7210:   if (!product->A->boundtocpu && !product->B->boundtocpu) {
7211:     PetscObjectTypeCompare((PetscObject)product->B,((PetscObject)product->A)->type_name,&match);
7212:   }
7213:   if (match) { /* we can always fallback to the CPU if requested */
7215:     switch (product->type) {
7216:     case MATPRODUCT_AB:
7217:       if (product->api_user) {
7218:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");
7219:         PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
7220:         PetscOptionsEnd();
7221:       } else {
7222:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");
7223:         PetscOptionsBool("-mat_product_algorithm_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
7224:         PetscOptionsEnd();
7225:       }
7226:       break;
7227:     case MATPRODUCT_AtB:
7228:       if (product->api_user) {
7229:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");
7230:         PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
7231:         PetscOptionsEnd();
7232:       } else {
7233:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");
7234:         PetscOptionsBool("-mat_product_algorithm_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
7235:         PetscOptionsEnd();
7236:       }
7237:       break;
7238:     case MATPRODUCT_PtAP:
7239:       if (product->api_user) {
7240:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");
7241:         PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
7242:         PetscOptionsEnd();
7243:       } else {
7244:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");
7245:         PetscOptionsBool("-mat_product_algorithm_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
7246:         PetscOptionsEnd();
7247:       }
7248:       break;
7249:     default:
7250:       break;
7251:     }
7252:     match = (PetscBool)!usecpu;
7253:   }
7254: #endif
7255:   if (match) {
7256:     switch (product->type) {
7257:     case MATPRODUCT_AB:
7258:     case MATPRODUCT_AtB:
7259:     case MATPRODUCT_PtAP:
7260:       mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND;
7261:       break;
7262:     default:
7263:       break;
7264:     }
7265:   }
7266:   /* fallback to MPIAIJ ops */
7267:   if (!mat->ops->productsymbolic) MatProductSetFromOptions_MPIAIJ(mat);
7268:   return 0;
7269: }

7271: /*
7272:     Special version for direct calls from Fortran
7273: */
7274: #include <petsc/private/fortranimpl.h>

7276: /* Change these macros so can be used in void function */
7277: /* Identical to PetscCallVoid, except it assigns to *_ierr */
7278: #undef  PetscCall
7279: #define PetscCall(...) do {                                                                    \
7280:     PetscErrorCode ierr_msv_mpiaij = __VA_ARGS__;                                              \
7281:     if (PetscUnlikely(ierr_msv_mpiaij)) {                                                      \
7282:       *_PetscError(PETSC_COMM_SELF,__LINE__,PETSC_FUNCTION_NAME,__FILE__,ierr_msv_mpiaij,PETSC_ERROR_REPEAT," "); \
7283:       return;                                                                                  \
7284:     }                                                                                          \
7285:   } while (0)

7287: #undef SETERRQ
7288: #define SETERRQ(comm,ierr,...) do {                                                            \
7289:     *_PetscError(comm,__LINE__,PETSC_FUNCTION_NAME,__FILE__,ierr,PETSC_ERROR_INITIAL,__VA_ARGS__); \
7290:     return;                                                                                    \
7291:   } while (0)

7293: #if defined(PETSC_HAVE_FORTRAN_CAPS)
7294: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
7295: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
7296: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
7297: #else
7298: #endif
7299: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
7300: {
7301:   Mat          mat  = *mmat;
7302:   PetscInt     m    = *mm, n = *mn;
7303:   InsertMode   addv = *maddv;
7304:   Mat_MPIAIJ  *aij  = (Mat_MPIAIJ*)mat->data;
7305:   PetscScalar  value;

7307:   MatCheckPreallocated(mat,1);
7308:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
7310:   {
7311:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
7312:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
7313:     PetscBool roworiented = aij->roworiented;

7315:     /* Some Variables required in the macro */
7316:     Mat        A                    = aij->A;
7317:     Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
7318:     PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
7319:     MatScalar  *aa;
7320:     PetscBool  ignorezeroentries    = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
7321:     Mat        B                    = aij->B;
7322:     Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
7323:     PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
7324:     MatScalar  *ba;
7325:     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
7326:      * cannot use "#if defined" inside a macro. */
7327:     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

7329:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
7330:     PetscInt  nonew = a->nonew;
7331:     MatScalar *ap1,*ap2;

7333:     MatSeqAIJGetArray(A,&aa);
7334:     MatSeqAIJGetArray(B,&ba);
7335:     for (i=0; i<m; i++) {
7336:       if (im[i] < 0) continue;
7338:       if (im[i] >= rstart && im[i] < rend) {
7339:         row      = im[i] - rstart;
7340:         lastcol1 = -1;
7341:         rp1      = aj + ai[row];
7342:         ap1      = aa + ai[row];
7343:         rmax1    = aimax[row];
7344:         nrow1    = ailen[row];
7345:         low1     = 0;
7346:         high1    = nrow1;
7347:         lastcol2 = -1;
7348:         rp2      = bj + bi[row];
7349:         ap2      = ba + bi[row];
7350:         rmax2    = bimax[row];
7351:         nrow2    = bilen[row];
7352:         low2     = 0;
7353:         high2    = nrow2;

7355:         for (j=0; j<n; j++) {
7356:           if (roworiented) value = v[i*n+j];
7357:           else value = v[i+j*m];
7358:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
7359:           if (in[j] >= cstart && in[j] < cend) {
7360:             col = in[j] - cstart;
7361:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
7362:           } else if (in[j] < 0) continue;
7363:           else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
7364:             /* extra brace on SETERRQ() is required for --with-errorchecking=0 - due to the next 'else' clause */
7365:             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
7366:           } else {
7367:             if (mat->was_assembled) {
7368:               if (!aij->colmap) {
7369:                 MatCreateColmap_MPIAIJ_Private(mat);
7370:               }
7371: #if defined(PETSC_USE_CTABLE)
7372:               PetscTableFind(aij->colmap,in[j]+1,&col);
7373:               col--;
7374: #else
7375:               col = aij->colmap[in[j]] - 1;
7376: #endif
7377:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
7378:                 MatDisAssemble_MPIAIJ(mat);
7379:                 col  =  in[j];
7380:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
7381:                 B        = aij->B;
7382:                 b        = (Mat_SeqAIJ*)B->data;
7383:                 bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
7384:                 rp2      = bj + bi[row];
7385:                 ap2      = ba + bi[row];
7386:                 rmax2    = bimax[row];
7387:                 nrow2    = bilen[row];
7388:                 low2     = 0;
7389:                 high2    = nrow2;
7390:                 bm       = aij->B->rmap->n;
7391:                 ba       = b->a;
7392:                 inserted = PETSC_FALSE;
7393:               }
7394:             } else col = in[j];
7395:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
7396:           }
7397:         }
7398:       } else if (!aij->donotstash) {
7399:         if (roworiented) {
7400:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
7401:         } else {
7402:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
7403:         }
7404:       }
7405:     }
7406:     MatSeqAIJRestoreArray(A,&aa);
7407:     MatSeqAIJRestoreArray(B,&ba);
7408:   }
7409:   return;
7410: }
7411: /* Undefining these here since they were redefined from their original definition above! No
7412:  * other PETSc functions should be defined past this point, as it is impossible to recover the
7413:  * original definitions */
7414: #undef PetscCall
7415: #undef SETERRQ