Actual source code: mpisell.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: #include <../src/mat/impls/sell/mpi/mpisell.h>
  3: #include <petsc/private/vecimpl.h>
  4: #include <petsc/private/isimpl.h>
  5: #include <petscblaslapack.h>
  6: #include <petscsf.h>

  8: /*MC
  9:    MATSELL - MATSELL = "sell" - A matrix type to be used for sparse matrices.

 11:    This matrix type is identical to `MATSEQSELL` when constructed with a single process communicator,
 12:    and `MATMPISELL` otherwise.  As a result, for single process communicators,
 13:   `MatSeqSELLSetPreallocation()` is supported, and similarly `MatMPISELLSetPreallocation()` is supported
 14:   for communicators controlling multiple processes.  It is recommended that you call both of
 15:   the above preallocation routines for simplicity.

 17:    Options Database Keys:
 18: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`

 20:   Level: beginner

 22: .seealso: `Mat`, `MATAIJ`, `MATBAIJ`, `MATSBAIJ`, `MatCreateSELL()`, `MatCreateSeqSELL()`, `MATSEQSELL`, `MATMPISELL`
 23: M*/

 25: static PetscErrorCode MatDiagonalSet_MPISELL(Mat Y, Vec D, InsertMode is)
 26: {
 27:   Mat_MPISELL *sell = (Mat_MPISELL *)Y->data;

 29:   PetscFunctionBegin;
 30:   if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
 31:     PetscCall(MatDiagonalSet(sell->A, D, is));
 32:   } else {
 33:     PetscCall(MatDiagonalSet_Default(Y, D, is));
 34:   }
 35:   PetscFunctionReturn(PETSC_SUCCESS);
 36: }

 38: /*
 39:   Local utility routine that creates a mapping from the global column
 40: number to the local number in the off-diagonal part of the local
 41: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
 42: a slightly higher hash table cost; without it it is not scalable (each processor
 43: has an order N integer array but is fast to access.
 44: */
 45: PetscErrorCode MatCreateColmap_MPISELL_Private(Mat mat)
 46: {
 47:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
 48:   PetscInt     n    = sell->B->cmap->n, i;

 50:   PetscFunctionBegin;
 51:   PetscCheck(sell->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPISELL Matrix was assembled but is missing garray");
 52: #if defined(PETSC_USE_CTABLE)
 53:   PetscCall(PetscHMapICreateWithSize(n, &sell->colmap));
 54:   for (i = 0; i < n; i++) PetscCall(PetscHMapISet(sell->colmap, sell->garray[i] + 1, i + 1));
 55: #else
 56:   PetscCall(PetscCalloc1(mat->cmap->N + 1, &sell->colmap));
 57:   for (i = 0; i < n; i++) sell->colmap[sell->garray[i]] = i + 1;
 58: #endif
 59:   PetscFunctionReturn(PETSC_SUCCESS);
 60: }

 62: #define MatSetValues_SeqSELL_A_Private(row, col, value, addv, orow, ocol) \
 63:   { \
 64:     if (col <= lastcol1) low1 = 0; \
 65:     else high1 = nrow1; \
 66:     lastcol1 = col; \
 67:     while (high1 - low1 > 5) { \
 68:       t = (low1 + high1) / 2; \
 69:       if (cp1[sliceheight * t] > col) high1 = t; \
 70:       else low1 = t; \
 71:     } \
 72:     for (_i = low1; _i < high1; _i++) { \
 73:       if (cp1[sliceheight * _i] > col) break; \
 74:       if (cp1[sliceheight * _i] == col) { \
 75:         if (addv == ADD_VALUES) vp1[sliceheight * _i] += value; \
 76:         else vp1[sliceheight * _i] = value; \
 77:         inserted = PETSC_TRUE; \
 78:         goto a_noinsert; \
 79:       } \
 80:     } \
 81:     if (value == 0.0 && ignorezeroentries) { \
 82:       low1  = 0; \
 83:       high1 = nrow1; \
 84:       goto a_noinsert; \
 85:     } \
 86:     if (nonew == 1) { \
 87:       low1  = 0; \
 88:       high1 = nrow1; \
 89:       goto a_noinsert; \
 90:     } \
 91:     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
 92:     MatSeqXSELLReallocateSELL(A, am, 1, nrow1, a->sliidx, a->sliceheight, row / sliceheight, row, col, a->colidx, a->val, cp1, vp1, nonew, MatScalar); \
 93:     /* shift up all the later entries in this row */ \
 94:     for (ii = nrow1 - 1; ii >= _i; ii--) { \
 95:       cp1[sliceheight * (ii + 1)] = cp1[sliceheight * ii]; \
 96:       vp1[sliceheight * (ii + 1)] = vp1[sliceheight * ii]; \
 97:     } \
 98:     cp1[sliceheight * _i] = col; \
 99:     vp1[sliceheight * _i] = value; \
100:     a->nz++; \
101:     nrow1++; \
102:   a_noinsert:; \
103:     a->rlen[row] = nrow1; \
104:   }

106: #define MatSetValues_SeqSELL_B_Private(row, col, value, addv, orow, ocol) \
107:   { \
108:     if (col <= lastcol2) low2 = 0; \
109:     else high2 = nrow2; \
110:     lastcol2 = col; \
111:     while (high2 - low2 > 5) { \
112:       t = (low2 + high2) / 2; \
113:       if (cp2[sliceheight * t] > col) high2 = t; \
114:       else low2 = t; \
115:     } \
116:     for (_i = low2; _i < high2; _i++) { \
117:       if (cp2[sliceheight * _i] > col) break; \
118:       if (cp2[sliceheight * _i] == col) { \
119:         if (addv == ADD_VALUES) vp2[sliceheight * _i] += value; \
120:         else vp2[sliceheight * _i] = value; \
121:         inserted = PETSC_TRUE; \
122:         goto b_noinsert; \
123:       } \
124:     } \
125:     if (value == 0.0 && ignorezeroentries) { \
126:       low2  = 0; \
127:       high2 = nrow2; \
128:       goto b_noinsert; \
129:     } \
130:     if (nonew == 1) { \
131:       low2  = 0; \
132:       high2 = nrow2; \
133:       goto b_noinsert; \
134:     } \
135:     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
136:     MatSeqXSELLReallocateSELL(B, bm, 1, nrow2, b->sliidx, b->sliceheight, row / sliceheight, row, col, b->colidx, b->val, cp2, vp2, nonew, MatScalar); \
137:     /* shift up all the later entries in this row */ \
138:     for (ii = nrow2 - 1; ii >= _i; ii--) { \
139:       cp2[sliceheight * (ii + 1)] = cp2[sliceheight * ii]; \
140:       vp2[sliceheight * (ii + 1)] = vp2[sliceheight * ii]; \
141:     } \
142:     cp2[sliceheight * _i] = col; \
143:     vp2[sliceheight * _i] = value; \
144:     b->nz++; \
145:     nrow2++; \
146:   b_noinsert:; \
147:     b->rlen[row] = nrow2; \
148:   }

150: static PetscErrorCode MatSetValues_MPISELL(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
151: {
152:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
153:   PetscScalar  value;
154:   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend, shift1, shift2;
155:   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
156:   PetscBool    roworiented = sell->roworiented;

158:   /* Some Variables required in the macro */
159:   Mat          A                 = sell->A;
160:   Mat_SeqSELL *a                 = (Mat_SeqSELL *)A->data;
161:   PetscBool    ignorezeroentries = a->ignorezeroentries, found;
162:   Mat          B                 = sell->B;
163:   Mat_SeqSELL *b                 = (Mat_SeqSELL *)B->data;
164:   PetscInt    *cp1, *cp2, ii, _i, nrow1, nrow2, low1, high1, low2, high2, t, lastcol1, lastcol2, sliceheight = a->sliceheight;
165:   MatScalar   *vp1, *vp2;

167:   PetscFunctionBegin;
168:   for (i = 0; i < m; i++) {
169:     if (im[i] < 0) continue;
170:     PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
171:     if (im[i] >= rstart && im[i] < rend) {
172:       row      = im[i] - rstart;
173:       lastcol1 = -1;
174:       shift1   = a->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
175:       cp1      = PetscSafePointerPlusOffset(a->colidx, shift1);
176:       vp1      = PetscSafePointerPlusOffset(a->val, shift1);
177:       nrow1    = a->rlen[row];
178:       low1     = 0;
179:       high1    = nrow1;
180:       lastcol2 = -1;
181:       shift2   = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
182:       cp2      = PetscSafePointerPlusOffset(b->colidx, shift2);
183:       vp2      = PetscSafePointerPlusOffset(b->val, shift2);
184:       nrow2    = b->rlen[row];
185:       low2     = 0;
186:       high2    = nrow2;

188:       for (j = 0; j < n; j++) {
189:         if (roworiented) value = v[i * n + j];
190:         else value = v[i + j * m];
191:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
192:         if (in[j] >= cstart && in[j] < cend) {
193:           col = in[j] - cstart;
194:           MatSetValue_SeqSELL_Private(A, row, col, value, addv, im[i], in[j], cp1, vp1, lastcol1, low1, high1); /* set one value */
195: #if defined(PETSC_HAVE_CUDA)
196:           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) A->offloadmask = PETSC_OFFLOAD_CPU;
197: #endif
198:         } else if (in[j] < 0) {
199:           continue;
200:         } else {
201:           PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
202:           if (mat->was_assembled) {
203:             if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
204: #if defined(PETSC_USE_CTABLE)
205:             PetscCall(PetscHMapIGetWithDefault(sell->colmap, in[j] + 1, 0, &col));
206:             col--;
207: #else
208:             col = sell->colmap[in[j]] - 1;
209: #endif
210:             if (col < 0 && !((Mat_SeqSELL *)sell->B->data)->nonew) {
211:               PetscCall(MatDisAssemble_MPISELL(mat));
212:               col = in[j];
213:               /* Reinitialize the variables required by MatSetValues_SeqSELL_B_Private() */
214:               B      = sell->B;
215:               b      = (Mat_SeqSELL *)B->data;
216:               shift2 = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
217:               cp2    = b->colidx + shift2;
218:               vp2    = b->val + shift2;
219:               nrow2  = b->rlen[row];
220:               low2   = 0;
221:               high2  = nrow2;
222:               found  = PETSC_FALSE;
223:             } else {
224:               PetscCheck(col >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
225:             }
226:           } else col = in[j];
227:           MatSetValue_SeqSELL_Private(B, row, col, value, addv, im[i], in[j], cp2, vp2, lastcol2, low2, high2); /* set one value */
228: #if defined(PETSC_HAVE_CUDA)
229:           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) B->offloadmask = PETSC_OFFLOAD_CPU;
230: #endif
231:         }
232:       }
233:     } else {
234:       PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
235:       if (!sell->donotstash) {
236:         mat->assembled = PETSC_FALSE;
237:         if (roworiented) {
238:           PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
239:         } else {
240:           PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
241:         }
242:       }
243:     }
244:   }
245:   PetscFunctionReturn(PETSC_SUCCESS);
246: }

248: static PetscErrorCode MatGetValues_MPISELL(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
249: {
250:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
251:   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
252:   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;

254:   PetscFunctionBegin;
255:   for (i = 0; i < m; i++) {
256:     if (idxm[i] < 0) continue; /* negative row */
257:     PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
258:     if (idxm[i] >= rstart && idxm[i] < rend) {
259:       row = idxm[i] - rstart;
260:       for (j = 0; j < n; j++) {
261:         if (idxn[j] < 0) continue; /* negative column */
262:         PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
263:         if (idxn[j] >= cstart && idxn[j] < cend) {
264:           col = idxn[j] - cstart;
265:           PetscCall(MatGetValues(sell->A, 1, &row, 1, &col, v + i * n + j));
266:         } else {
267:           if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
268: #if defined(PETSC_USE_CTABLE)
269:           PetscCall(PetscHMapIGetWithDefault(sell->colmap, idxn[j] + 1, 0, &col));
270:           col--;
271: #else
272:           col = sell->colmap[idxn[j]] - 1;
273: #endif
274:           if ((col < 0) || (sell->garray[col] != idxn[j])) *(v + i * n + j) = 0.0;
275:           else PetscCall(MatGetValues(sell->B, 1, &row, 1, &col, v + i * n + j));
276:         }
277:       }
278:     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported");
279:   }
280:   PetscFunctionReturn(PETSC_SUCCESS);
281: }

283: static PetscErrorCode MatAssemblyBegin_MPISELL(Mat mat, MatAssemblyType mode)
284: {
285:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
286:   PetscInt     nstash, reallocs;

288:   PetscFunctionBegin;
289:   if (sell->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);

291:   PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
292:   PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
293:   PetscCall(PetscInfo(sell->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
294:   PetscFunctionReturn(PETSC_SUCCESS);
295: }

297: PetscErrorCode MatAssemblyEnd_MPISELL(Mat mat, MatAssemblyType mode)
298: {
299:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
300:   PetscMPIInt  n;
301:   PetscInt     i, flg;
302:   PetscInt    *row, *col;
303:   PetscScalar *val;
304:   PetscBool    other_disassembled;
305:   /* do not use 'b = (Mat_SeqSELL*)sell->B->data' as B can be reset in disassembly */
306:   PetscFunctionBegin;
307:   if (!sell->donotstash && !mat->nooffprocentries) {
308:     while (1) {
309:       PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
310:       if (!flg) break;

312:       for (i = 0; i < n; i++) { /* assemble one by one */
313:         PetscCall(MatSetValues_MPISELL(mat, 1, row + i, 1, col + i, val + i, mat->insertmode));
314:       }
315:     }
316:     PetscCall(MatStashScatterEnd_Private(&mat->stash));
317:   }
318: #if defined(PETSC_HAVE_CUDA)
319:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) sell->A->offloadmask = PETSC_OFFLOAD_CPU;
320: #endif
321:   PetscCall(MatAssemblyBegin(sell->A, mode));
322:   PetscCall(MatAssemblyEnd(sell->A, mode));

324:   /*
325:      determine if any processor has disassembled, if so we must
326:      also disassemble ourselves, in order that we may reassemble.
327:   */
328:   /*
329:      if nonzero structure of submatrix B cannot change then we know that
330:      no processor disassembled thus we can skip this stuff
331:   */
332:   if (!((Mat_SeqSELL *)sell->B->data)->nonew) {
333:     PetscCall(MPIU_Allreduce(&mat->was_assembled, &other_disassembled, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
334:     if (mat->was_assembled && !other_disassembled) PetscCall(MatDisAssemble_MPISELL(mat));
335:   }
336:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPISELL(mat));
337: #if defined(PETSC_HAVE_CUDA)
338:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && sell->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) sell->B->offloadmask = PETSC_OFFLOAD_CPU;
339: #endif
340:   PetscCall(MatAssemblyBegin(sell->B, mode));
341:   PetscCall(MatAssemblyEnd(sell->B, mode));
342:   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
343:   sell->rowvalues = NULL;
344:   PetscCall(VecDestroy(&sell->diag));

346:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
347:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqSELL *)sell->A->data)->nonew) {
348:     PetscObjectState state = sell->A->nonzerostate + sell->B->nonzerostate;
349:     PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
350:   }
351: #if defined(PETSC_HAVE_CUDA)
352:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
353: #endif
354:   PetscFunctionReturn(PETSC_SUCCESS);
355: }

357: static PetscErrorCode MatZeroEntries_MPISELL(Mat A)
358: {
359:   Mat_MPISELL *l = (Mat_MPISELL *)A->data;

361:   PetscFunctionBegin;
362:   PetscCall(MatZeroEntries(l->A));
363:   PetscCall(MatZeroEntries(l->B));
364:   PetscFunctionReturn(PETSC_SUCCESS);
365: }

367: static PetscErrorCode MatMult_MPISELL(Mat A, Vec xx, Vec yy)
368: {
369:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
370:   PetscInt     nt;

372:   PetscFunctionBegin;
373:   PetscCall(VecGetLocalSize(xx, &nt));
374:   PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
375:   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
376:   PetscCall((*a->A->ops->mult)(a->A, xx, yy));
377:   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
378:   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, yy, yy));
379:   PetscFunctionReturn(PETSC_SUCCESS);
380: }

382: static PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat A, Vec bb, Vec xx)
383: {
384:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

386:   PetscFunctionBegin;
387:   PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
388:   PetscFunctionReturn(PETSC_SUCCESS);
389: }

391: static PetscErrorCode MatMultAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
392: {
393:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

395:   PetscFunctionBegin;
396:   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
397:   PetscCall((*a->A->ops->multadd)(a->A, xx, yy, zz));
398:   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
399:   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, zz, zz));
400:   PetscFunctionReturn(PETSC_SUCCESS);
401: }

403: static PetscErrorCode MatMultTranspose_MPISELL(Mat A, Vec xx, Vec yy)
404: {
405:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

407:   PetscFunctionBegin;
408:   /* do nondiagonal part */
409:   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
410:   /* do local part */
411:   PetscCall((*a->A->ops->multtranspose)(a->A, xx, yy));
412:   /* add partial results together */
413:   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
414:   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
415:   PetscFunctionReturn(PETSC_SUCCESS);
416: }

418: static PetscErrorCode MatIsTranspose_MPISELL(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
419: {
420:   MPI_Comm     comm;
421:   Mat_MPISELL *Asell = (Mat_MPISELL *)Amat->data, *Bsell;
422:   Mat          Adia  = Asell->A, Bdia, Aoff, Boff, *Aoffs, *Boffs;
423:   IS           Me, Notme;
424:   PetscInt     M, N, first, last, *notme, i;
425:   PetscMPIInt  size;

427:   PetscFunctionBegin;
428:   /* Easy test: symmetric diagonal block */
429:   Bsell = (Mat_MPISELL *)Bmat->data;
430:   Bdia  = Bsell->A;
431:   PetscCall(MatIsTranspose(Adia, Bdia, tol, f));
432:   if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
433:   PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
434:   PetscCallMPI(MPI_Comm_size(comm, &size));
435:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);

437:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
438:   PetscCall(MatGetSize(Amat, &M, &N));
439:   PetscCall(MatGetOwnershipRange(Amat, &first, &last));
440:   PetscCall(PetscMalloc1(N - last + first, &notme));
441:   for (i = 0; i < first; i++) notme[i] = i;
442:   for (i = last; i < M; i++) notme[i - last + first] = i;
443:   PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
444:   PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
445:   PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
446:   Aoff = Aoffs[0];
447:   PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
448:   Boff = Boffs[0];
449:   PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
450:   PetscCall(MatDestroyMatrices(1, &Aoffs));
451:   PetscCall(MatDestroyMatrices(1, &Boffs));
452:   PetscCall(ISDestroy(&Me));
453:   PetscCall(ISDestroy(&Notme));
454:   PetscCall(PetscFree(notme));
455:   PetscFunctionReturn(PETSC_SUCCESS);
456: }

458: static PetscErrorCode MatMultTransposeAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
459: {
460:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

462:   PetscFunctionBegin;
463:   /* do nondiagonal part */
464:   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
465:   /* do local part */
466:   PetscCall((*a->A->ops->multtransposeadd)(a->A, xx, yy, zz));
467:   /* add partial results together */
468:   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
469:   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
470:   PetscFunctionReturn(PETSC_SUCCESS);
471: }

473: /*
474:   This only works correctly for square matrices where the subblock A->A is the
475:    diagonal block
476: */
477: static PetscErrorCode MatGetDiagonal_MPISELL(Mat A, Vec v)
478: {
479:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

481:   PetscFunctionBegin;
482:   PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
483:   PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
484:   PetscCall(MatGetDiagonal(a->A, v));
485:   PetscFunctionReturn(PETSC_SUCCESS);
486: }

488: static PetscErrorCode MatScale_MPISELL(Mat A, PetscScalar aa)
489: {
490:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

492:   PetscFunctionBegin;
493:   PetscCall(MatScale(a->A, aa));
494:   PetscCall(MatScale(a->B, aa));
495:   PetscFunctionReturn(PETSC_SUCCESS);
496: }

498: PetscErrorCode MatDestroy_MPISELL(Mat mat)
499: {
500:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

502:   PetscFunctionBegin;
503:   PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
504:   PetscCall(MatStashDestroy_Private(&mat->stash));
505:   PetscCall(VecDestroy(&sell->diag));
506:   PetscCall(MatDestroy(&sell->A));
507:   PetscCall(MatDestroy(&sell->B));
508: #if defined(PETSC_USE_CTABLE)
509:   PetscCall(PetscHMapIDestroy(&sell->colmap));
510: #else
511:   PetscCall(PetscFree(sell->colmap));
512: #endif
513:   PetscCall(PetscFree(sell->garray));
514:   PetscCall(VecDestroy(&sell->lvec));
515:   PetscCall(VecScatterDestroy(&sell->Mvctx));
516:   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
517:   PetscCall(PetscFree(sell->ld));
518:   PetscCall(PetscFree(mat->data));

520:   PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
521:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
522:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
523:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
524:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPISELLSetPreallocation_C", NULL));
525:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpiaij_C", NULL));
526: #if defined(PETSC_HAVE_CUDA)
527:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpisellcuda_C", NULL));
528: #endif
529:   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
530:   PetscFunctionReturn(PETSC_SUCCESS);
531: }

533: #include <petscdraw.h>
534: static PetscErrorCode MatView_MPISELL_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
535: {
536:   Mat_MPISELL      *sell = (Mat_MPISELL *)mat->data;
537:   PetscMPIInt       rank = sell->rank, size = sell->size;
538:   PetscBool         isdraw, iascii, isbinary;
539:   PetscViewer       sviewer;
540:   PetscViewerFormat format;

542:   PetscFunctionBegin;
543:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
544:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
545:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
546:   if (iascii) {
547:     PetscCall(PetscViewerGetFormat(viewer, &format));
548:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
549:       MatInfo   info;
550:       PetscInt *inodes;

552:       PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
553:       PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
554:       PetscCall(MatInodeGetInodeSizes(sell->A, NULL, &inodes, NULL));
555:       PetscCall(PetscViewerASCIIPushSynchronized(viewer));
556:       if (!inodes) {
557:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
558:                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
559:       } else {
560:         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
561:                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
562:       }
563:       PetscCall(MatGetInfo(sell->A, MAT_LOCAL, &info));
564:       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
565:       PetscCall(MatGetInfo(sell->B, MAT_LOCAL, &info));
566:       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
567:       PetscCall(PetscViewerFlush(viewer));
568:       PetscCall(PetscViewerASCIIPopSynchronized(viewer));
569:       PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
570:       PetscCall(VecScatterView(sell->Mvctx, viewer));
571:       PetscFunctionReturn(PETSC_SUCCESS);
572:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
573:       PetscInt inodecount, inodelimit, *inodes;
574:       PetscCall(MatInodeGetInodeSizes(sell->A, &inodecount, &inodes, &inodelimit));
575:       if (inodes) {
576:         PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
577:       } else {
578:         PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
579:       }
580:       PetscFunctionReturn(PETSC_SUCCESS);
581:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
582:       PetscFunctionReturn(PETSC_SUCCESS);
583:     }
584:   } else if (isbinary) {
585:     if (size == 1) {
586:       PetscCall(PetscObjectSetName((PetscObject)sell->A, ((PetscObject)mat)->name));
587:       PetscCall(MatView(sell->A, viewer));
588:     } else {
589:       /* PetscCall(MatView_MPISELL_Binary(mat,viewer)); */
590:     }
591:     PetscFunctionReturn(PETSC_SUCCESS);
592:   } else if (isdraw) {
593:     PetscDraw draw;
594:     PetscBool isnull;
595:     PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
596:     PetscCall(PetscDrawIsNull(draw, &isnull));
597:     if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
598:   }

600:   {
601:     /* assemble the entire matrix onto first processor. */
602:     Mat          A;
603:     Mat_SeqSELL *Aloc;
604:     PetscInt     M = mat->rmap->N, N = mat->cmap->N, *acolidx, row, col, i, j;
605:     MatScalar   *aval;
606:     PetscBool    isnonzero;

608:     PetscCall(MatCreate(PetscObjectComm((PetscObject)mat), &A));
609:     if (rank == 0) {
610:       PetscCall(MatSetSizes(A, M, N, M, N));
611:     } else {
612:       PetscCall(MatSetSizes(A, 0, 0, M, N));
613:     }
614:     /* This is just a temporary matrix, so explicitly using MATMPISELL is probably best */
615:     PetscCall(MatSetType(A, MATMPISELL));
616:     PetscCall(MatMPISELLSetPreallocation(A, 0, NULL, 0, NULL));
617:     PetscCall(MatSetOption(A, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE));

619:     /* copy over the A part */
620:     Aloc    = (Mat_SeqSELL *)sell->A->data;
621:     acolidx = Aloc->colidx;
622:     aval    = Aloc->val;
623:     for (i = 0; i < Aloc->totalslices; i++) { /* loop over slices */
624:       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
625:         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
626:         if (isnonzero) { /* check the mask bit */
627:           row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
628:           col = *acolidx + mat->rmap->rstart;
629:           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
630:         }
631:         aval++;
632:         acolidx++;
633:       }
634:     }

636:     /* copy over the B part */
637:     Aloc    = (Mat_SeqSELL *)sell->B->data;
638:     acolidx = Aloc->colidx;
639:     aval    = Aloc->val;
640:     for (i = 0; i < Aloc->totalslices; i++) {
641:       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
642:         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
643:         if (isnonzero) {
644:           row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
645:           col = sell->garray[*acolidx];
646:           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
647:         }
648:         aval++;
649:         acolidx++;
650:       }
651:     }

653:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
654:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
655:     /*
656:        Everyone has to call to draw the matrix since the graphics waits are
657:        synchronized across all processors that share the PetscDraw object
658:     */
659:     PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
660:     if (rank == 0) {
661:       PetscCall(PetscObjectSetName((PetscObject)((Mat_MPISELL *)A->data)->A, ((PetscObject)mat)->name));
662:       PetscCall(MatView_SeqSELL(((Mat_MPISELL *)A->data)->A, sviewer));
663:     }
664:     PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
665:     PetscCall(MatDestroy(&A));
666:   }
667:   PetscFunctionReturn(PETSC_SUCCESS);
668: }

670: static PetscErrorCode MatView_MPISELL(Mat mat, PetscViewer viewer)
671: {
672:   PetscBool iascii, isdraw, issocket, isbinary;

674:   PetscFunctionBegin;
675:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
676:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
677:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
678:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
679:   if (iascii || isdraw || isbinary || issocket) PetscCall(MatView_MPISELL_ASCIIorDraworSocket(mat, viewer));
680:   PetscFunctionReturn(PETSC_SUCCESS);
681: }

683: static PetscErrorCode MatGetGhosts_MPISELL(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
684: {
685:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

687:   PetscFunctionBegin;
688:   PetscCall(MatGetSize(sell->B, NULL, nghosts));
689:   if (ghosts) *ghosts = sell->garray;
690:   PetscFunctionReturn(PETSC_SUCCESS);
691: }

693: static PetscErrorCode MatGetInfo_MPISELL(Mat matin, MatInfoType flag, MatInfo *info)
694: {
695:   Mat_MPISELL   *mat = (Mat_MPISELL *)matin->data;
696:   Mat            A = mat->A, B = mat->B;
697:   PetscLogDouble isend[5], irecv[5];

699:   PetscFunctionBegin;
700:   info->block_size = 1.0;
701:   PetscCall(MatGetInfo(A, MAT_LOCAL, info));

703:   isend[0] = info->nz_used;
704:   isend[1] = info->nz_allocated;
705:   isend[2] = info->nz_unneeded;
706:   isend[3] = info->memory;
707:   isend[4] = info->mallocs;

709:   PetscCall(MatGetInfo(B, MAT_LOCAL, info));

711:   isend[0] += info->nz_used;
712:   isend[1] += info->nz_allocated;
713:   isend[2] += info->nz_unneeded;
714:   isend[3] += info->memory;
715:   isend[4] += info->mallocs;
716:   if (flag == MAT_LOCAL) {
717:     info->nz_used      = isend[0];
718:     info->nz_allocated = isend[1];
719:     info->nz_unneeded  = isend[2];
720:     info->memory       = isend[3];
721:     info->mallocs      = isend[4];
722:   } else if (flag == MAT_GLOBAL_MAX) {
723:     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));

725:     info->nz_used      = irecv[0];
726:     info->nz_allocated = irecv[1];
727:     info->nz_unneeded  = irecv[2];
728:     info->memory       = irecv[3];
729:     info->mallocs      = irecv[4];
730:   } else if (flag == MAT_GLOBAL_SUM) {
731:     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));

733:     info->nz_used      = irecv[0];
734:     info->nz_allocated = irecv[1];
735:     info->nz_unneeded  = irecv[2];
736:     info->memory       = irecv[3];
737:     info->mallocs      = irecv[4];
738:   }
739:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
740:   info->fill_ratio_needed = 0;
741:   info->factor_mallocs    = 0;
742:   PetscFunctionReturn(PETSC_SUCCESS);
743: }

745: static PetscErrorCode MatSetOption_MPISELL(Mat A, MatOption op, PetscBool flg)
746: {
747:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

749:   PetscFunctionBegin;
750:   switch (op) {
751:   case MAT_NEW_NONZERO_LOCATIONS:
752:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
753:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
754:   case MAT_KEEP_NONZERO_PATTERN:
755:   case MAT_NEW_NONZERO_LOCATION_ERR:
756:   case MAT_USE_INODES:
757:   case MAT_IGNORE_ZERO_ENTRIES:
758:     MatCheckPreallocated(A, 1);
759:     PetscCall(MatSetOption(a->A, op, flg));
760:     PetscCall(MatSetOption(a->B, op, flg));
761:     break;
762:   case MAT_ROW_ORIENTED:
763:     MatCheckPreallocated(A, 1);
764:     a->roworiented = flg;

766:     PetscCall(MatSetOption(a->A, op, flg));
767:     PetscCall(MatSetOption(a->B, op, flg));
768:     break;
769:   case MAT_FORCE_DIAGONAL_ENTRIES:
770:   case MAT_SORTED_FULL:
771:     PetscCall(PetscInfo(A, "Option %s ignored\n", MatOptions[op]));
772:     break;
773:   case MAT_IGNORE_OFF_PROC_ENTRIES:
774:     a->donotstash = flg;
775:     break;
776:   case MAT_SPD:
777:   case MAT_SPD_ETERNAL:
778:     break;
779:   case MAT_SYMMETRIC:
780:     MatCheckPreallocated(A, 1);
781:     PetscCall(MatSetOption(a->A, op, flg));
782:     break;
783:   case MAT_STRUCTURALLY_SYMMETRIC:
784:     MatCheckPreallocated(A, 1);
785:     PetscCall(MatSetOption(a->A, op, flg));
786:     break;
787:   case MAT_HERMITIAN:
788:     MatCheckPreallocated(A, 1);
789:     PetscCall(MatSetOption(a->A, op, flg));
790:     break;
791:   case MAT_SYMMETRY_ETERNAL:
792:     MatCheckPreallocated(A, 1);
793:     PetscCall(MatSetOption(a->A, op, flg));
794:     break;
795:   case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
796:     MatCheckPreallocated(A, 1);
797:     PetscCall(MatSetOption(a->A, op, flg));
798:     break;
799:   default:
800:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unknown option %d", op);
801:   }
802:   PetscFunctionReturn(PETSC_SUCCESS);
803: }

805: static PetscErrorCode MatDiagonalScale_MPISELL(Mat mat, Vec ll, Vec rr)
806: {
807:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
808:   Mat          a = sell->A, b = sell->B;
809:   PetscInt     s1, s2, s3;

811:   PetscFunctionBegin;
812:   PetscCall(MatGetLocalSize(mat, &s2, &s3));
813:   if (rr) {
814:     PetscCall(VecGetLocalSize(rr, &s1));
815:     PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
816:     /* Overlap communication with computation. */
817:     PetscCall(VecScatterBegin(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
818:   }
819:   if (ll) {
820:     PetscCall(VecGetLocalSize(ll, &s1));
821:     PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
822:     PetscUseTypeMethod(b, diagonalscale, ll, NULL);
823:   }
824:   /* scale  the diagonal block */
825:   PetscUseTypeMethod(a, diagonalscale, ll, rr);

827:   if (rr) {
828:     /* Do a scatter end and then right scale the off-diagonal block */
829:     PetscCall(VecScatterEnd(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
830:     PetscUseTypeMethod(b, diagonalscale, NULL, sell->lvec);
831:   }
832:   PetscFunctionReturn(PETSC_SUCCESS);
833: }

835: static PetscErrorCode MatSetUnfactored_MPISELL(Mat A)
836: {
837:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

839:   PetscFunctionBegin;
840:   PetscCall(MatSetUnfactored(a->A));
841:   PetscFunctionReturn(PETSC_SUCCESS);
842: }

844: static PetscErrorCode MatEqual_MPISELL(Mat A, Mat B, PetscBool *flag)
845: {
846:   Mat_MPISELL *matB = (Mat_MPISELL *)B->data, *matA = (Mat_MPISELL *)A->data;
847:   Mat          a, b, c, d;
848:   PetscBool    flg;

850:   PetscFunctionBegin;
851:   a = matA->A;
852:   b = matA->B;
853:   c = matB->A;
854:   d = matB->B;

856:   PetscCall(MatEqual(a, c, &flg));
857:   if (flg) PetscCall(MatEqual(b, d, &flg));
858:   PetscCall(MPIU_Allreduce(&flg, flag, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
859:   PetscFunctionReturn(PETSC_SUCCESS);
860: }

862: static PetscErrorCode MatCopy_MPISELL(Mat A, Mat B, MatStructure str)
863: {
864:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
865:   Mat_MPISELL *b = (Mat_MPISELL *)B->data;

867:   PetscFunctionBegin;
868:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
869:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
870:     /* because of the column compression in the off-processor part of the matrix a->B,
871:        the number of columns in a->B and b->B may be different, hence we cannot call
872:        the MatCopy() directly on the two parts. If need be, we can provide a more
873:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
874:        then copying the submatrices */
875:     PetscCall(MatCopy_Basic(A, B, str));
876:   } else {
877:     PetscCall(MatCopy(a->A, b->A, str));
878:     PetscCall(MatCopy(a->B, b->B, str));
879:   }
880:   PetscFunctionReturn(PETSC_SUCCESS);
881: }

883: static PetscErrorCode MatSetUp_MPISELL(Mat A)
884: {
885:   PetscFunctionBegin;
886:   PetscCall(MatMPISELLSetPreallocation(A, PETSC_DEFAULT, NULL, PETSC_DEFAULT, NULL));
887:   PetscFunctionReturn(PETSC_SUCCESS);
888: }

890: static PetscErrorCode MatConjugate_MPISELL(Mat mat)
891: {
892:   PetscFunctionBegin;
893:   if (PetscDefined(USE_COMPLEX)) {
894:     Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

896:     PetscCall(MatConjugate_SeqSELL(sell->A));
897:     PetscCall(MatConjugate_SeqSELL(sell->B));
898:   }
899:   PetscFunctionReturn(PETSC_SUCCESS);
900: }

902: static PetscErrorCode MatRealPart_MPISELL(Mat A)
903: {
904:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

906:   PetscFunctionBegin;
907:   PetscCall(MatRealPart(a->A));
908:   PetscCall(MatRealPart(a->B));
909:   PetscFunctionReturn(PETSC_SUCCESS);
910: }

912: static PetscErrorCode MatImaginaryPart_MPISELL(Mat A)
913: {
914:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

916:   PetscFunctionBegin;
917:   PetscCall(MatImaginaryPart(a->A));
918:   PetscCall(MatImaginaryPart(a->B));
919:   PetscFunctionReturn(PETSC_SUCCESS);
920: }

922: static PetscErrorCode MatInvertBlockDiagonal_MPISELL(Mat A, const PetscScalar **values)
923: {
924:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

926:   PetscFunctionBegin;
927:   PetscCall(MatInvertBlockDiagonal(a->A, values));
928:   A->factorerrortype = a->A->factorerrortype;
929:   PetscFunctionReturn(PETSC_SUCCESS);
930: }

932: static PetscErrorCode MatSetRandom_MPISELL(Mat x, PetscRandom rctx)
933: {
934:   Mat_MPISELL *sell = (Mat_MPISELL *)x->data;

936:   PetscFunctionBegin;
937:   PetscCall(MatSetRandom(sell->A, rctx));
938:   PetscCall(MatSetRandom(sell->B, rctx));
939:   PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
940:   PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
941:   PetscFunctionReturn(PETSC_SUCCESS);
942: }

944: static PetscErrorCode MatSetFromOptions_MPISELL(Mat A, PetscOptionItems *PetscOptionsObject)
945: {
946:   PetscFunctionBegin;
947:   PetscOptionsHeadBegin(PetscOptionsObject, "MPISELL options");
948:   PetscOptionsHeadEnd();
949:   PetscFunctionReturn(PETSC_SUCCESS);
950: }

952: static PetscErrorCode MatShift_MPISELL(Mat Y, PetscScalar a)
953: {
954:   Mat_MPISELL *msell = (Mat_MPISELL *)Y->data;
955:   Mat_SeqSELL *sell  = (Mat_SeqSELL *)msell->A->data;

957:   PetscFunctionBegin;
958:   if (!Y->preallocated) {
959:     PetscCall(MatMPISELLSetPreallocation(Y, 1, NULL, 0, NULL));
960:   } else if (!sell->nz) {
961:     PetscInt nonew = sell->nonew;
962:     PetscCall(MatSeqSELLSetPreallocation(msell->A, 1, NULL));
963:     sell->nonew = nonew;
964:   }
965:   PetscCall(MatShift_Basic(Y, a));
966:   PetscFunctionReturn(PETSC_SUCCESS);
967: }

969: static PetscErrorCode MatMissingDiagonal_MPISELL(Mat A, PetscBool *missing, PetscInt *d)
970: {
971:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;

973:   PetscFunctionBegin;
974:   PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only works for square matrices");
975:   PetscCall(MatMissingDiagonal(a->A, missing, d));
976:   if (d) {
977:     PetscInt rstart;
978:     PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
979:     *d += rstart;
980:   }
981:   PetscFunctionReturn(PETSC_SUCCESS);
982: }

984: static PetscErrorCode MatGetDiagonalBlock_MPISELL(Mat A, Mat *a)
985: {
986:   PetscFunctionBegin;
987:   *a = ((Mat_MPISELL *)A->data)->A;
988:   PetscFunctionReturn(PETSC_SUCCESS);
989: }

991: static PetscErrorCode MatStoreValues_MPISELL(Mat mat)
992: {
993:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

995:   PetscFunctionBegin;
996:   PetscCall(MatStoreValues(sell->A));
997:   PetscCall(MatStoreValues(sell->B));
998:   PetscFunctionReturn(PETSC_SUCCESS);
999: }

1001: static PetscErrorCode MatRetrieveValues_MPISELL(Mat mat)
1002: {
1003:   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;

1005:   PetscFunctionBegin;
1006:   PetscCall(MatRetrieveValues(sell->A));
1007:   PetscCall(MatRetrieveValues(sell->B));
1008:   PetscFunctionReturn(PETSC_SUCCESS);
1009: }

1011: static PetscErrorCode MatMPISELLSetPreallocation_MPISELL(Mat B, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[])
1012: {
1013:   Mat_MPISELL *b;

1015:   PetscFunctionBegin;
1016:   PetscCall(PetscLayoutSetUp(B->rmap));
1017:   PetscCall(PetscLayoutSetUp(B->cmap));
1018:   b = (Mat_MPISELL *)B->data;

1020:   if (!B->preallocated) {
1021:     /* Explicitly create 2 MATSEQSELL matrices. */
1022:     PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
1023:     PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
1024:     PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
1025:     PetscCall(MatSetType(b->A, MATSEQSELL));
1026:     PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
1027:     PetscCall(MatSetSizes(b->B, B->rmap->n, B->cmap->N, B->rmap->n, B->cmap->N));
1028:     PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
1029:     PetscCall(MatSetType(b->B, MATSEQSELL));
1030:   }

1032:   PetscCall(MatSeqSELLSetPreallocation(b->A, d_rlenmax, d_rlen));
1033:   PetscCall(MatSeqSELLSetPreallocation(b->B, o_rlenmax, o_rlen));
1034:   B->preallocated  = PETSC_TRUE;
1035:   B->was_assembled = PETSC_FALSE;
1036:   /*
1037:     critical for MatAssemblyEnd to work.
1038:     MatAssemblyBegin checks it to set up was_assembled
1039:     and MatAssemblyEnd checks was_assembled to determine whether to build garray
1040:   */
1041:   B->assembled = PETSC_FALSE;
1042:   PetscFunctionReturn(PETSC_SUCCESS);
1043: }

1045: static PetscErrorCode MatDuplicate_MPISELL(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
1046: {
1047:   Mat          mat;
1048:   Mat_MPISELL *a, *oldmat = (Mat_MPISELL *)matin->data;

1050:   PetscFunctionBegin;
1051:   *newmat = NULL;
1052:   PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
1053:   PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
1054:   PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
1055:   PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
1056:   a = (Mat_MPISELL *)mat->data;

1058:   mat->factortype   = matin->factortype;
1059:   mat->assembled    = PETSC_TRUE;
1060:   mat->insertmode   = NOT_SET_VALUES;
1061:   mat->preallocated = PETSC_TRUE;

1063:   a->size         = oldmat->size;
1064:   a->rank         = oldmat->rank;
1065:   a->donotstash   = oldmat->donotstash;
1066:   a->roworiented  = oldmat->roworiented;
1067:   a->rowindices   = NULL;
1068:   a->rowvalues    = NULL;
1069:   a->getrowactive = PETSC_FALSE;

1071:   PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
1072:   PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));

1074:   if (oldmat->colmap) {
1075: #if defined(PETSC_USE_CTABLE)
1076:     PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
1077: #else
1078:     PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
1079:     PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
1080: #endif
1081:   } else a->colmap = NULL;
1082:   if (oldmat->garray) {
1083:     PetscInt len;
1084:     len = oldmat->B->cmap->n;
1085:     PetscCall(PetscMalloc1(len + 1, &a->garray));
1086:     if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
1087:   } else a->garray = NULL;

1089:   PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
1090:   PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx));
1091:   PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
1092:   PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
1093:   PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
1094:   *newmat = mat;
1095:   PetscFunctionReturn(PETSC_SUCCESS);
1096: }

1098: static const struct _MatOps MatOps_Values = {MatSetValues_MPISELL,
1099:                                              NULL,
1100:                                              NULL,
1101:                                              MatMult_MPISELL,
1102:                                              /* 4*/ MatMultAdd_MPISELL,
1103:                                              MatMultTranspose_MPISELL,
1104:                                              MatMultTransposeAdd_MPISELL,
1105:                                              NULL,
1106:                                              NULL,
1107:                                              NULL,
1108:                                              /*10*/ NULL,
1109:                                              NULL,
1110:                                              NULL,
1111:                                              MatSOR_MPISELL,
1112:                                              NULL,
1113:                                              /*15*/ MatGetInfo_MPISELL,
1114:                                              MatEqual_MPISELL,
1115:                                              MatGetDiagonal_MPISELL,
1116:                                              MatDiagonalScale_MPISELL,
1117:                                              NULL,
1118:                                              /*20*/ MatAssemblyBegin_MPISELL,
1119:                                              MatAssemblyEnd_MPISELL,
1120:                                              MatSetOption_MPISELL,
1121:                                              MatZeroEntries_MPISELL,
1122:                                              /*24*/ NULL,
1123:                                              NULL,
1124:                                              NULL,
1125:                                              NULL,
1126:                                              NULL,
1127:                                              /*29*/ MatSetUp_MPISELL,
1128:                                              NULL,
1129:                                              NULL,
1130:                                              MatGetDiagonalBlock_MPISELL,
1131:                                              NULL,
1132:                                              /*34*/ MatDuplicate_MPISELL,
1133:                                              NULL,
1134:                                              NULL,
1135:                                              NULL,
1136:                                              NULL,
1137:                                              /*39*/ NULL,
1138:                                              NULL,
1139:                                              NULL,
1140:                                              MatGetValues_MPISELL,
1141:                                              MatCopy_MPISELL,
1142:                                              /*44*/ NULL,
1143:                                              MatScale_MPISELL,
1144:                                              MatShift_MPISELL,
1145:                                              MatDiagonalSet_MPISELL,
1146:                                              NULL,
1147:                                              /*49*/ MatSetRandom_MPISELL,
1148:                                              NULL,
1149:                                              NULL,
1150:                                              NULL,
1151:                                              NULL,
1152:                                              /*54*/ MatFDColoringCreate_MPIXAIJ,
1153:                                              NULL,
1154:                                              MatSetUnfactored_MPISELL,
1155:                                              NULL,
1156:                                              NULL,
1157:                                              /*59*/ NULL,
1158:                                              MatDestroy_MPISELL,
1159:                                              MatView_MPISELL,
1160:                                              NULL,
1161:                                              NULL,
1162:                                              /*64*/ NULL,
1163:                                              NULL,
1164:                                              NULL,
1165:                                              NULL,
1166:                                              NULL,
1167:                                              /*69*/ NULL,
1168:                                              NULL,
1169:                                              NULL,
1170:                                              NULL,
1171:                                              NULL,
1172:                                              NULL,
1173:                                              /*75*/ MatFDColoringApply_AIJ, /* reuse AIJ function */
1174:                                              MatSetFromOptions_MPISELL,
1175:                                              NULL,
1176:                                              NULL,
1177:                                              NULL,
1178:                                              /*80*/ NULL,
1179:                                              NULL,
1180:                                              NULL,
1181:                                              /*83*/ NULL,
1182:                                              NULL,
1183:                                              NULL,
1184:                                              NULL,
1185:                                              NULL,
1186:                                              NULL,
1187:                                              /*89*/ NULL,
1188:                                              NULL,
1189:                                              NULL,
1190:                                              NULL,
1191:                                              NULL,
1192:                                              /*94*/ NULL,
1193:                                              NULL,
1194:                                              NULL,
1195:                                              NULL,
1196:                                              NULL,
1197:                                              /*99*/ NULL,
1198:                                              NULL,
1199:                                              NULL,
1200:                                              MatConjugate_MPISELL,
1201:                                              NULL,
1202:                                              /*104*/ NULL,
1203:                                              MatRealPart_MPISELL,
1204:                                              MatImaginaryPart_MPISELL,
1205:                                              NULL,
1206:                                              NULL,
1207:                                              /*109*/ NULL,
1208:                                              NULL,
1209:                                              NULL,
1210:                                              NULL,
1211:                                              MatMissingDiagonal_MPISELL,
1212:                                              /*114*/ NULL,
1213:                                              NULL,
1214:                                              MatGetGhosts_MPISELL,
1215:                                              NULL,
1216:                                              NULL,
1217:                                              /*119*/ MatMultDiagonalBlock_MPISELL,
1218:                                              NULL,
1219:                                              NULL,
1220:                                              NULL,
1221:                                              NULL,
1222:                                              /*124*/ NULL,
1223:                                              NULL,
1224:                                              MatInvertBlockDiagonal_MPISELL,
1225:                                              NULL,
1226:                                              NULL,
1227:                                              /*129*/ NULL,
1228:                                              NULL,
1229:                                              NULL,
1230:                                              NULL,
1231:                                              NULL,
1232:                                              /*134*/ NULL,
1233:                                              NULL,
1234:                                              NULL,
1235:                                              NULL,
1236:                                              NULL,
1237:                                              /*139*/ NULL,
1238:                                              NULL,
1239:                                              NULL,
1240:                                              MatFDColoringSetUp_MPIXAIJ,
1241:                                              NULL,
1242:                                              /*144*/ NULL,
1243:                                              NULL,
1244:                                              NULL,
1245:                                              NULL,
1246:                                              NULL,
1247:                                              NULL,
1248:                                              /*150*/ NULL,
1249:                                              NULL,
1250:                                              NULL};

1252: /*@C
1253:   MatMPISELLSetPreallocation - Preallocates memory for a `MATMPISELL` sparse parallel matrix in sell format.
1254:   For good matrix assembly performance the user should preallocate the matrix storage by
1255:   setting the parameters `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).

1257:   Collective

1259:   Input Parameters:
1260: + B     - the matrix
1261: . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
1262:            (same value is used for all local rows)
1263: . d_nnz - array containing the number of nonzeros in the various rows of the
1264:            DIAGONAL portion of the local submatrix (possibly different for each row)
1265:            or NULL (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
1266:            The size of this array is equal to the number of local rows, i.e 'm'.
1267:            For matrices that will be factored, you must leave room for (and set)
1268:            the diagonal entry even if it is zero.
1269: . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
1270:            submatrix (same value is used for all local rows).
1271: - o_nnz - array containing the number of nonzeros in the various rows of the
1272:            OFF-DIAGONAL portion of the local submatrix (possibly different for
1273:            each row) or NULL (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
1274:            structure. The size of this array is equal to the number
1275:            of local rows, i.e 'm'.

1277:   Example usage:
1278:   Consider the following 8x8 matrix with 34 non-zero values, that is
1279:   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1280:   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1281:   as follows

1283: .vb
1284:             1  2  0  |  0  3  0  |  0  4
1285:     Proc0   0  5  6  |  7  0  0  |  8  0
1286:             9  0 10  | 11  0  0  | 12  0
1287:     -------------------------------------
1288:            13  0 14  | 15 16 17  |  0  0
1289:     Proc1   0 18  0  | 19 20 21  |  0  0
1290:             0  0  0  | 22 23  0  | 24  0
1291:     -------------------------------------
1292:     Proc2  25 26 27  |  0  0 28  | 29  0
1293:            30  0  0  | 31 32 33  |  0 34
1294: .ve

1296:   This can be represented as a collection of submatrices as

1298: .vb
1299:       A B C
1300:       D E F
1301:       G H I
1302: .ve

1304:   Where the submatrices A,B,C are owned by proc0, D,E,F are
1305:   owned by proc1, G,H,I are owned by proc2.

1307:   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1308:   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1309:   The 'M','N' parameters are 8,8, and have the same values on all procs.

1311:   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1312:   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1313:   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1314:   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1315:   part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1316:   matrix, ans [DF] as another SeqSELL matrix.

1318:   When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
1319:   allocated for every row of the local diagonal submatrix, and o_nz
1320:   storage locations are allocated for every row of the OFF-DIAGONAL submat.
1321:   One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local
1322:   rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1323:   In this case, the values of d_nz,o_nz are
1324: .vb
1325:      proc0  dnz = 2, o_nz = 2
1326:      proc1  dnz = 3, o_nz = 2
1327:      proc2  dnz = 1, o_nz = 4
1328: .ve
1329:   We are allocating m*(d_nz+o_nz) storage locations for every proc. This
1330:   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1331:   for proc3. i.e we are using 12+15+10=37 storage locations to store
1332:   34 values.

1334:   When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
1335:   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1336:   In the above case the values for d_nnz,o_nnz are
1337: .vb
1338:      proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
1339:      proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
1340:      proc2 d_nnz = [1,1]   and o_nnz = [4,4]
1341: .ve
1342:   Here the space allocated is according to nz (or maximum values in the nnz
1343:   if nnz is provided) for DIAGONAL and OFF-DIAGONAL submatrices, i.e (2+2+3+2)*3+(1+4)*2=37

1345:   Level: intermediate

1347:   Notes:
1348:   If the *_nnz parameter is given then the *_nz parameter is ignored

1350:   The stored row and column indices begin with zero.

1352:   The parallel matrix is partitioned such that the first m0 rows belong to
1353:   process 0, the next m1 rows belong to process 1, the next m2 rows belong
1354:   to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

1356:   The DIAGONAL portion of the local submatrix of a processor can be defined
1357:   as the submatrix which is obtained by extraction the part corresponding to
1358:   the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
1359:   first row that belongs to the processor, r2 is the last row belonging to
1360:   the this processor, and c1-c2 is range of indices of the local part of a
1361:   vector suitable for applying the matrix to.  This is an mxn matrix.  In the
1362:   common case of a square matrix, the row and column ranges are the same and
1363:   the DIAGONAL part is also square. The remaining portion of the local
1364:   submatrix (mxN) constitute the OFF-DIAGONAL portion.

1366:   If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.

1368:   You can call `MatGetInfo()` to get information on how effective the preallocation was;
1369:   for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1370:   You can also run with the option -info and look for messages with the string
1371:   malloc in them to see if additional memory allocation was needed.

1373: .seealso: `Mat`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatCreateSELL()`,
1374:           `MATMPISELL`, `MatGetInfo()`, `PetscSplitOwnership()`, `MATSELL`
1375: @*/
1376: PetscErrorCode MatMPISELLSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
1377: {
1378:   PetscFunctionBegin;
1381:   PetscTryMethod(B, "MatMPISELLSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
1382:   PetscFunctionReturn(PETSC_SUCCESS);
1383: }

1385: /*MC
1386:    MATMPISELL - MATMPISELL = "mpisell" - A matrix type to be used for MPI sparse matrices,
1387:    based on the sliced Ellpack format

1389:    Options Database Key:
1390: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`

1392:    Level: beginner

1394: .seealso: `Mat`, `MatCreateSELL()`, `MATSEQSELL`, `MATSELL`, `MATSEQAIJ`, `MATAIJ`, `MATMPIAIJ`
1395: M*/

1397: /*@C
1398:   MatCreateSELL - Creates a sparse parallel matrix in `MATSELL` format.

1400:   Collective

1402:   Input Parameters:
1403: + comm      - MPI communicator
1404: . m         - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
1405:               This value should be the same as the local size used in creating the
1406:               y vector for the matrix-vector product y = Ax.
1407: . n         - This value should be the same as the local size used in creating the
1408:               x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
1409:               calculated if `N` is given) For square matrices n is almost always `m`.
1410: . M         - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
1411: . N         - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
1412: . d_rlenmax - max number of nonzeros per row in DIAGONAL portion of local submatrix
1413:              (same value is used for all local rows)
1414: . d_rlen    - array containing the number of nonzeros in the various rows of the
1415:               DIAGONAL portion of the local submatrix (possibly different for each row)
1416:               or `NULL`, if d_rlenmax is used to specify the nonzero structure.
1417:               The size of this array is equal to the number of local rows, i.e `m`.
1418: . o_rlenmax - max number of nonzeros per row in the OFF-DIAGONAL portion of local
1419:               submatrix (same value is used for all local rows).
1420: - o_rlen    - array containing the number of nonzeros in the various rows of the
1421:               OFF-DIAGONAL portion of the local submatrix (possibly different for
1422:               each row) or `NULL`, if `o_rlenmax` is used to specify the nonzero
1423:               structure. The size of this array is equal to the number
1424:               of local rows, i.e `m`.

1426:   Output Parameter:
1427: . A - the matrix

1429:   Options Database Key:
1430: . -mat_sell_oneindex - Internally use indexing starting at 1
1431:         rather than 0.  When calling `MatSetValues()`,
1432:         the user still MUST index entries starting at 0!

1434:   Example:
1435:   Consider the following 8x8 matrix with 34 non-zero values, that is
1436:   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1437:   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1438:   as follows

1440: .vb
1441:             1  2  0  |  0  3  0  |  0  4
1442:     Proc0   0  5  6  |  7  0  0  |  8  0
1443:             9  0 10  | 11  0  0  | 12  0
1444:     -------------------------------------
1445:            13  0 14  | 15 16 17  |  0  0
1446:     Proc1   0 18  0  | 19 20 21  |  0  0
1447:             0  0  0  | 22 23  0  | 24  0
1448:     -------------------------------------
1449:     Proc2  25 26 27  |  0  0 28  | 29  0
1450:            30  0  0  | 31 32 33  |  0 34
1451: .ve

1453:   This can be represented as a collection of submatrices as
1454: .vb
1455:       A B C
1456:       D E F
1457:       G H I
1458: .ve

1460:   Where the submatrices A,B,C are owned by proc0, D,E,F are
1461:   owned by proc1, G,H,I are owned by proc2.

1463:   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1464:   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1465:   The 'M','N' parameters are 8,8, and have the same values on all procs.

1467:   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1468:   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1469:   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1470:   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1471:   part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1472:   matrix, ans [DF] as another `MATSEQSELL` matrix.

1474:   When d_rlenmax, o_rlenmax parameters are specified, d_rlenmax storage elements are
1475:   allocated for every row of the local diagonal submatrix, and o_rlenmax
1476:   storage locations are allocated for every row of the OFF-DIAGONAL submat.
1477:   One way to choose d_rlenmax and o_rlenmax is to use the max nonzerors per local
1478:   rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1479:   In this case, the values of d_rlenmax,o_rlenmax are
1480: .vb
1481:      proc0 - d_rlenmax = 2, o_rlenmax = 2
1482:      proc1 - d_rlenmax = 3, o_rlenmax = 2
1483:      proc2 - d_rlenmax = 1, o_rlenmax = 4
1484: .ve
1485:   We are allocating m*(d_rlenmax+o_rlenmax) storage locations for every proc. This
1486:   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1487:   for proc3. i.e we are using 12+15+10=37 storage locations to store
1488:   34 values.

1490:   When `d_rlen`, `o_rlen` parameters are specified, the storage is specified
1491:   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1492:   In the above case the values for `d_nnz`, `o_nnz` are
1493: .vb
1494:      proc0 - d_nnz = [2,2,2] and o_nnz = [2,2,2]
1495:      proc1 - d_nnz = [3,3,2] and o_nnz = [2,1,1]
1496:      proc2 - d_nnz = [1,1]   and o_nnz = [4,4]
1497: .ve
1498:   Here the space allocated is still 37 though there are 34 nonzeros because
1499:   the allocation is always done according to rlenmax.

1501:   Level: intermediate

1503:   Notes:
1504:   It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
1505:   MatXXXXSetPreallocation() paradigm instead of this routine directly.
1506:   [MatXXXXSetPreallocation() is, for example, `MatSeqSELLSetPreallocation()`]

1508:   If the *_rlen parameter is given then the *_rlenmax parameter is ignored

1510:   `m`, `n`, `M`, `N` parameters specify the size of the matrix, and its partitioning across
1511:   processors, while `d_rlenmax`, `d_rlen`, `o_rlenmax` , `o_rlen` parameters specify the approximate
1512:   storage requirements for this matrix.

1514:   If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
1515:   processor than it must be used on all processors that share the object for
1516:   that argument.

1518:   The user MUST specify either the local or global matrix dimensions
1519:   (possibly both).

1521:   The parallel matrix is partitioned across processors such that the
1522:   first m0 rows belong to process 0, the next m1 rows belong to
1523:   process 1, the next m2 rows belong to process 2 etc.. where
1524:   m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
1525:   values corresponding to [`m` x `N`] submatrix.

1527:   The columns are logically partitioned with the n0 columns belonging
1528:   to 0th partition, the next n1 columns belonging to the next
1529:   partition etc.. where n0,n1,n2... are the input parameter `n`.

1531:   The DIAGONAL portion of the local submatrix on any given processor
1532:   is the submatrix corresponding to the rows and columns `m`, `n`
1533:   corresponding to the given processor. i.e diagonal matrix on
1534:   process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
1535:   etc. The remaining portion of the local submatrix [m x (N-n)]
1536:   constitute the OFF-DIAGONAL portion. The example below better
1537:   illustrates this concept.

1539:   For a square global matrix we define each processor's diagonal portion
1540:   to be its local rows and the corresponding columns (a square submatrix);
1541:   each processor's off-diagonal portion encompasses the remainder of the
1542:   local matrix (a rectangular submatrix).

1544:   If `o_rlen`, `d_rlen` are specified, then `o_rlenmax`, and `d_rlenmax` are ignored.

1546:   When calling this routine with a single process communicator, a matrix of
1547:   type `MATSEQSELL` is returned.  If a matrix of type `MATMPISELL` is desired for this
1548:   type of communicator, use the construction mechanism
1549: .vb
1550:    MatCreate(...,&A);
1551:    MatSetType(A,MATMPISELL);
1552:    MatSetSizes(A, m,n,M,N);
1553:    MatMPISELLSetPreallocation(A,...);
1554: .ve

1556: .seealso: `Mat`, `MATSELL`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatMPISELLSetPreallocation()`, `MATMPISELL`
1557: @*/
1558: PetscErrorCode MatCreateSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[], Mat *A)
1559: {
1560:   PetscMPIInt size;

1562:   PetscFunctionBegin;
1563:   PetscCall(MatCreate(comm, A));
1564:   PetscCall(MatSetSizes(*A, m, n, M, N));
1565:   PetscCallMPI(MPI_Comm_size(comm, &size));
1566:   if (size > 1) {
1567:     PetscCall(MatSetType(*A, MATMPISELL));
1568:     PetscCall(MatMPISELLSetPreallocation(*A, d_rlenmax, d_rlen, o_rlenmax, o_rlen));
1569:   } else {
1570:     PetscCall(MatSetType(*A, MATSEQSELL));
1571:     PetscCall(MatSeqSELLSetPreallocation(*A, d_rlenmax, d_rlen));
1572:   }
1573:   PetscFunctionReturn(PETSC_SUCCESS);
1574: }

1576: /*@C
1577:   MatMPISELLGetSeqSELL - Returns the local pieces of this distributed matrix

1579:   Not Collective

1581:   Input Parameter:
1582: . A - the `MATMPISELL` matrix

1584:   Output Parameters:
1585: + Ad     - The diagonal portion of `A`
1586: . Ao     - The off-diagonal portion of `A`
1587: - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix

1589:   Level: advanced

1591: .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`
1592: @*/
1593: PetscErrorCode MatMPISELLGetSeqSELL(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
1594: {
1595:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1596:   PetscBool    flg;

1598:   PetscFunctionBegin;
1599:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &flg));
1600:   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPISELL matrix as input");
1601:   if (Ad) *Ad = a->A;
1602:   if (Ao) *Ao = a->B;
1603:   if (colmap) *colmap = a->garray;
1604:   PetscFunctionReturn(PETSC_SUCCESS);
1605: }

1607: /*@C
1608:   MatMPISELLGetLocalMatCondensed - Creates a `MATSEQSELL` matrix from an `MATMPISELL` matrix by
1609:   taking all its local rows and NON-ZERO columns

1611:   Not Collective

1613:   Input Parameters:
1614: + A     - the matrix
1615: . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
1616: . row   - index sets of rows to extract (or `NULL`)
1617: - col   - index sets of columns to extract (or `NULL`)

1619:   Output Parameter:
1620: . A_loc - the local sequential matrix generated

1622:   Level: advanced

1624: .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`, `MatGetOwnershipRange()`, `MatMPISELLGetLocalMat()`
1625: @*/
1626: PetscErrorCode MatMPISELLGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
1627: {
1628:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1629:   PetscInt     i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
1630:   IS           isrowa, iscola;
1631:   Mat         *aloc;
1632:   PetscBool    match;

1634:   PetscFunctionBegin;
1635:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &match));
1636:   PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPISELL matrix as input");
1637:   PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1638:   if (!row) {
1639:     start = A->rmap->rstart;
1640:     end   = A->rmap->rend;
1641:     PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
1642:   } else {
1643:     isrowa = *row;
1644:   }
1645:   if (!col) {
1646:     start = A->cmap->rstart;
1647:     cmap  = a->garray;
1648:     nzA   = a->A->cmap->n;
1649:     nzB   = a->B->cmap->n;
1650:     PetscCall(PetscMalloc1(nzA + nzB, &idx));
1651:     ncols = 0;
1652:     for (i = 0; i < nzB; i++) {
1653:       if (cmap[i] < start) idx[ncols++] = cmap[i];
1654:       else break;
1655:     }
1656:     imark = i;
1657:     for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
1658:     for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
1659:     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
1660:   } else {
1661:     iscola = *col;
1662:   }
1663:   if (scall != MAT_INITIAL_MATRIX) {
1664:     PetscCall(PetscMalloc1(1, &aloc));
1665:     aloc[0] = *A_loc;
1666:   }
1667:   PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
1668:   *A_loc = aloc[0];
1669:   PetscCall(PetscFree(aloc));
1670:   if (!row) PetscCall(ISDestroy(&isrowa));
1671:   if (!col) PetscCall(ISDestroy(&iscola));
1672:   PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1673:   PetscFunctionReturn(PETSC_SUCCESS);
1674: }

1676: #include <../src/mat/impls/aij/mpi/mpiaij.h>

1678: PetscErrorCode MatConvert_MPISELL_MPIAIJ(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1679: {
1680:   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1681:   Mat          B;
1682:   Mat_MPIAIJ  *b;

1684:   PetscFunctionBegin;
1685:   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");

1687:   if (reuse == MAT_REUSE_MATRIX) {
1688:     B = *newmat;
1689:   } else {
1690:     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1691:     PetscCall(MatSetType(B, MATMPIAIJ));
1692:     PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
1693:     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1694:     PetscCall(MatSeqAIJSetPreallocation(B, 0, NULL));
1695:     PetscCall(MatMPIAIJSetPreallocation(B, 0, NULL, 0, NULL));
1696:   }
1697:   b = (Mat_MPIAIJ *)B->data;

1699:   if (reuse == MAT_REUSE_MATRIX) {
1700:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_REUSE_MATRIX, &b->A));
1701:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_REUSE_MATRIX, &b->B));
1702:   } else {
1703:     PetscCall(MatDestroy(&b->A));
1704:     PetscCall(MatDestroy(&b->B));
1705:     PetscCall(MatDisAssemble_MPISELL(A));
1706:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A));
1707:     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B));
1708:     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1709:     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1710:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1711:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1712:   }

1714:   if (reuse == MAT_INPLACE_MATRIX) {
1715:     PetscCall(MatHeaderReplace(A, &B));
1716:   } else {
1717:     *newmat = B;
1718:   }
1719:   PetscFunctionReturn(PETSC_SUCCESS);
1720: }

1722: PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1723: {
1724:   Mat_MPIAIJ  *a = (Mat_MPIAIJ *)A->data;
1725:   Mat          B;
1726:   Mat_MPISELL *b;

1728:   PetscFunctionBegin;
1729:   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");

1731:   if (reuse == MAT_REUSE_MATRIX) {
1732:     B = *newmat;
1733:   } else {
1734:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)a->A->data, *Ba = (Mat_SeqAIJ *)a->B->data;
1735:     PetscInt    i, d_nz = 0, o_nz = 0, m = A->rmap->N, n = A->cmap->N, lm = A->rmap->n, ln = A->cmap->n;
1736:     PetscInt   *d_nnz, *o_nnz;
1737:     PetscCall(PetscMalloc2(lm, &d_nnz, lm, &o_nnz));
1738:     for (i = 0; i < lm; i++) {
1739:       d_nnz[i] = Aa->i[i + 1] - Aa->i[i];
1740:       o_nnz[i] = Ba->i[i + 1] - Ba->i[i];
1741:       if (d_nnz[i] > d_nz) d_nz = d_nnz[i];
1742:       if (o_nnz[i] > o_nz) o_nz = o_nnz[i];
1743:     }
1744:     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1745:     PetscCall(MatSetType(B, MATMPISELL));
1746:     PetscCall(MatSetSizes(B, lm, ln, m, n));
1747:     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1748:     PetscCall(MatSeqSELLSetPreallocation(B, d_nz, d_nnz));
1749:     PetscCall(MatMPISELLSetPreallocation(B, d_nz, d_nnz, o_nz, o_nnz));
1750:     PetscCall(PetscFree2(d_nnz, o_nnz));
1751:   }
1752:   b = (Mat_MPISELL *)B->data;

1754:   if (reuse == MAT_REUSE_MATRIX) {
1755:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_REUSE_MATRIX, &b->A));
1756:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_REUSE_MATRIX, &b->B));
1757:   } else {
1758:     PetscCall(MatDestroy(&b->A));
1759:     PetscCall(MatDestroy(&b->B));
1760:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_INITIAL_MATRIX, &b->A));
1761:     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_INITIAL_MATRIX, &b->B));
1762:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1763:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1764:     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1765:     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1766:   }

1768:   if (reuse == MAT_INPLACE_MATRIX) {
1769:     PetscCall(MatHeaderReplace(A, &B));
1770:   } else {
1771:     *newmat = B;
1772:   }
1773:   PetscFunctionReturn(PETSC_SUCCESS);
1774: }

1776: PetscErrorCode MatSOR_MPISELL(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1777: {
1778:   Mat_MPISELL *mat = (Mat_MPISELL *)matin->data;
1779:   Vec          bb1 = NULL;

1781:   PetscFunctionBegin;
1782:   if (flag == SOR_APPLY_UPPER) {
1783:     PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1784:     PetscFunctionReturn(PETSC_SUCCESS);
1785:   }

1787:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));

1789:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1790:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1791:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1792:       its--;
1793:     }

1795:     while (its--) {
1796:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1797:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1799:       /* update rhs: bb1 = bb - B*x */
1800:       PetscCall(VecScale(mat->lvec, -1.0));
1801:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1803:       /* local sweep */
1804:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx));
1805:     }
1806:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1807:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1808:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1809:       its--;
1810:     }
1811:     while (its--) {
1812:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1813:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1815:       /* update rhs: bb1 = bb - B*x */
1816:       PetscCall(VecScale(mat->lvec, -1.0));
1817:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1819:       /* local sweep */
1820:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx));
1821:     }
1822:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1823:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1824:       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1825:       its--;
1826:     }
1827:     while (its--) {
1828:       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1829:       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));

1831:       /* update rhs: bb1 = bb - B*x */
1832:       PetscCall(VecScale(mat->lvec, -1.0));
1833:       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));

1835:       /* local sweep */
1836:       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx));
1837:     }
1838:   } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");

1840:   PetscCall(VecDestroy(&bb1));

1842:   matin->factorerrortype = mat->A->factorerrortype;
1843:   PetscFunctionReturn(PETSC_SUCCESS);
1844: }

1846: #if defined(PETSC_HAVE_CUDA)
1847: PETSC_INTERN PetscErrorCode MatConvert_MPISELL_MPISELLCUDA(Mat, MatType, MatReuse, Mat *);
1848: #endif

1850: /*MC
1851:    MATMPISELL - MATMPISELL = "MPISELL" - A matrix type to be used for parallel sparse matrices.

1853:    Options Database Keys:
1854: . -mat_type mpisell - sets the matrix type to `MATMPISELL` during a call to `MatSetFromOptions()`

1856:   Level: beginner

1858: .seealso: `Mat`, `MATSELL`, `MATSEQSELL` `MatCreateSELL()`
1859: M*/
1860: PETSC_EXTERN PetscErrorCode MatCreate_MPISELL(Mat B)
1861: {
1862:   Mat_MPISELL *b;
1863:   PetscMPIInt  size;

1865:   PetscFunctionBegin;
1866:   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
1867:   PetscCall(PetscNew(&b));
1868:   B->data       = (void *)b;
1869:   B->ops[0]     = MatOps_Values;
1870:   B->assembled  = PETSC_FALSE;
1871:   B->insertmode = NOT_SET_VALUES;
1872:   b->size       = size;
1873:   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
1874:   /* build cache for off array entries formed */
1875:   PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));

1877:   b->donotstash  = PETSC_FALSE;
1878:   b->colmap      = NULL;
1879:   b->garray      = NULL;
1880:   b->roworiented = PETSC_TRUE;

1882:   /* stuff used for matrix vector multiply */
1883:   b->lvec  = NULL;
1884:   b->Mvctx = NULL;

1886:   /* stuff for MatGetRow() */
1887:   b->rowindices   = NULL;
1888:   b->rowvalues    = NULL;
1889:   b->getrowactive = PETSC_FALSE;

1891:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPISELL));
1892:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPISELL));
1893:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPISELL));
1894:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPISELLSetPreallocation_C", MatMPISELLSetPreallocation_MPISELL));
1895:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpiaij_C", MatConvert_MPISELL_MPIAIJ));
1896: #if defined(PETSC_HAVE_CUDA)
1897:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpisellcuda_C", MatConvert_MPISELL_MPISELLCUDA));
1898: #endif
1899:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPISELL));
1900:   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPISELL));
1901:   PetscFunctionReturn(PETSC_SUCCESS);
1902: }