Actual source code: mpisell.c
1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
2: #include <../src/mat/impls/sell/mpi/mpisell.h>
3: #include <petsc/private/vecimpl.h>
4: #include <petsc/private/isimpl.h>
5: #include <petscblaslapack.h>
6: #include <petscsf.h>
8: /*MC
9: MATSELL - MATSELL = "sell" - A matrix type to be used for sparse matrices.
11: This matrix type is identical to `MATSEQSELL` when constructed with a single process communicator,
12: and `MATMPISELL` otherwise. As a result, for single process communicators,
13: `MatSeqSELLSetPreallocation()` is supported, and similarly `MatMPISELLSetPreallocation()` is supported
14: for communicators controlling multiple processes. It is recommended that you call both of
15: the above preallocation routines for simplicity.
17: Options Database Keys:
18: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`
20: Level: beginner
22: .seealso: `Mat`, `MATAIJ`, `MATBAIJ`, `MATSBAIJ`, `MatCreateSELL()`, `MatCreateSeqSELL()`, `MATSEQSELL`, `MATMPISELL`
23: M*/
25: static PetscErrorCode MatDiagonalSet_MPISELL(Mat Y, Vec D, InsertMode is)
26: {
27: Mat_MPISELL *sell = (Mat_MPISELL *)Y->data;
29: PetscFunctionBegin;
30: if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
31: PetscCall(MatDiagonalSet(sell->A, D, is));
32: } else {
33: PetscCall(MatDiagonalSet_Default(Y, D, is));
34: }
35: PetscFunctionReturn(PETSC_SUCCESS);
36: }
38: /*
39: Local utility routine that creates a mapping from the global column
40: number to the local number in the off-diagonal part of the local
41: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
42: a slightly higher hash table cost; without it it is not scalable (each processor
43: has an order N integer array but is fast to access.
44: */
45: PetscErrorCode MatCreateColmap_MPISELL_Private(Mat mat)
46: {
47: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
48: PetscInt n = sell->B->cmap->n, i;
50: PetscFunctionBegin;
51: PetscCheck(sell->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPISELL Matrix was assembled but is missing garray");
52: #if defined(PETSC_USE_CTABLE)
53: PetscCall(PetscHMapICreateWithSize(n, &sell->colmap));
54: for (i = 0; i < n; i++) PetscCall(PetscHMapISet(sell->colmap, sell->garray[i] + 1, i + 1));
55: #else
56: PetscCall(PetscCalloc1(mat->cmap->N + 1, &sell->colmap));
57: for (i = 0; i < n; i++) sell->colmap[sell->garray[i]] = i + 1;
58: #endif
59: PetscFunctionReturn(PETSC_SUCCESS);
60: }
62: #define MatSetValues_SeqSELL_A_Private(row, col, value, addv, orow, ocol) \
63: { \
64: if (col <= lastcol1) low1 = 0; \
65: else high1 = nrow1; \
66: lastcol1 = col; \
67: while (high1 - low1 > 5) { \
68: t = (low1 + high1) / 2; \
69: if (cp1[sliceheight * t] > col) high1 = t; \
70: else low1 = t; \
71: } \
72: for (_i = low1; _i < high1; _i++) { \
73: if (cp1[sliceheight * _i] > col) break; \
74: if (cp1[sliceheight * _i] == col) { \
75: if (addv == ADD_VALUES) vp1[sliceheight * _i] += value; \
76: else vp1[sliceheight * _i] = value; \
77: inserted = PETSC_TRUE; \
78: goto a_noinsert; \
79: } \
80: } \
81: if (value == 0.0 && ignorezeroentries) { \
82: low1 = 0; \
83: high1 = nrow1; \
84: goto a_noinsert; \
85: } \
86: if (nonew == 1) { \
87: low1 = 0; \
88: high1 = nrow1; \
89: goto a_noinsert; \
90: } \
91: PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
92: MatSeqXSELLReallocateSELL(A, am, 1, nrow1, a->sliidx, a->sliceheight, row / sliceheight, row, col, a->colidx, a->val, cp1, vp1, nonew, MatScalar); \
93: /* shift up all the later entries in this row */ \
94: for (ii = nrow1 - 1; ii >= _i; ii--) { \
95: cp1[sliceheight * (ii + 1)] = cp1[sliceheight * ii]; \
96: vp1[sliceheight * (ii + 1)] = vp1[sliceheight * ii]; \
97: } \
98: cp1[sliceheight * _i] = col; \
99: vp1[sliceheight * _i] = value; \
100: a->nz++; \
101: nrow1++; \
102: a_noinsert:; \
103: a->rlen[row] = nrow1; \
104: }
106: #define MatSetValues_SeqSELL_B_Private(row, col, value, addv, orow, ocol) \
107: { \
108: if (col <= lastcol2) low2 = 0; \
109: else high2 = nrow2; \
110: lastcol2 = col; \
111: while (high2 - low2 > 5) { \
112: t = (low2 + high2) / 2; \
113: if (cp2[sliceheight * t] > col) high2 = t; \
114: else low2 = t; \
115: } \
116: for (_i = low2; _i < high2; _i++) { \
117: if (cp2[sliceheight * _i] > col) break; \
118: if (cp2[sliceheight * _i] == col) { \
119: if (addv == ADD_VALUES) vp2[sliceheight * _i] += value; \
120: else vp2[sliceheight * _i] = value; \
121: inserted = PETSC_TRUE; \
122: goto b_noinsert; \
123: } \
124: } \
125: if (value == 0.0 && ignorezeroentries) { \
126: low2 = 0; \
127: high2 = nrow2; \
128: goto b_noinsert; \
129: } \
130: if (nonew == 1) { \
131: low2 = 0; \
132: high2 = nrow2; \
133: goto b_noinsert; \
134: } \
135: PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
136: MatSeqXSELLReallocateSELL(B, bm, 1, nrow2, b->sliidx, b->sliceheight, row / sliceheight, row, col, b->colidx, b->val, cp2, vp2, nonew, MatScalar); \
137: /* shift up all the later entries in this row */ \
138: for (ii = nrow2 - 1; ii >= _i; ii--) { \
139: cp2[sliceheight * (ii + 1)] = cp2[sliceheight * ii]; \
140: vp2[sliceheight * (ii + 1)] = vp2[sliceheight * ii]; \
141: } \
142: cp2[sliceheight * _i] = col; \
143: vp2[sliceheight * _i] = value; \
144: b->nz++; \
145: nrow2++; \
146: b_noinsert:; \
147: b->rlen[row] = nrow2; \
148: }
150: static PetscErrorCode MatSetValues_MPISELL(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
151: {
152: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
153: PetscScalar value;
154: PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend, shift1, shift2;
155: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
156: PetscBool roworiented = sell->roworiented;
158: /* Some Variables required in the macro */
159: Mat A = sell->A;
160: Mat_SeqSELL *a = (Mat_SeqSELL *)A->data;
161: PetscBool ignorezeroentries = a->ignorezeroentries, found;
162: Mat B = sell->B;
163: Mat_SeqSELL *b = (Mat_SeqSELL *)B->data;
164: PetscInt *cp1, *cp2, ii, _i, nrow1, nrow2, low1, high1, low2, high2, t, lastcol1, lastcol2, sliceheight = a->sliceheight;
165: MatScalar *vp1, *vp2;
167: PetscFunctionBegin;
168: for (i = 0; i < m; i++) {
169: if (im[i] < 0) continue;
170: PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
171: if (im[i] >= rstart && im[i] < rend) {
172: row = im[i] - rstart;
173: lastcol1 = -1;
174: shift1 = a->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
175: cp1 = PetscSafePointerPlusOffset(a->colidx, shift1);
176: vp1 = PetscSafePointerPlusOffset(a->val, shift1);
177: nrow1 = a->rlen[row];
178: low1 = 0;
179: high1 = nrow1;
180: lastcol2 = -1;
181: shift2 = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
182: cp2 = PetscSafePointerPlusOffset(b->colidx, shift2);
183: vp2 = PetscSafePointerPlusOffset(b->val, shift2);
184: nrow2 = b->rlen[row];
185: low2 = 0;
186: high2 = nrow2;
188: for (j = 0; j < n; j++) {
189: if (roworiented) value = v[i * n + j];
190: else value = v[i + j * m];
191: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
192: if (in[j] >= cstart && in[j] < cend) {
193: col = in[j] - cstart;
194: MatSetValue_SeqSELL_Private(A, row, col, value, addv, im[i], in[j], cp1, vp1, lastcol1, low1, high1); /* set one value */
195: #if defined(PETSC_HAVE_CUDA)
196: if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) A->offloadmask = PETSC_OFFLOAD_CPU;
197: #endif
198: } else if (in[j] < 0) {
199: continue;
200: } else {
201: PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
202: if (mat->was_assembled) {
203: if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
204: #if defined(PETSC_USE_CTABLE)
205: PetscCall(PetscHMapIGetWithDefault(sell->colmap, in[j] + 1, 0, &col));
206: col--;
207: #else
208: col = sell->colmap[in[j]] - 1;
209: #endif
210: if (col < 0 && !((Mat_SeqSELL *)sell->B->data)->nonew) {
211: PetscCall(MatDisAssemble_MPISELL(mat));
212: col = in[j];
213: /* Reinitialize the variables required by MatSetValues_SeqSELL_B_Private() */
214: B = sell->B;
215: b = (Mat_SeqSELL *)B->data;
216: shift2 = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
217: cp2 = b->colidx + shift2;
218: vp2 = b->val + shift2;
219: nrow2 = b->rlen[row];
220: low2 = 0;
221: high2 = nrow2;
222: found = PETSC_FALSE;
223: } else {
224: PetscCheck(col >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
225: }
226: } else col = in[j];
227: MatSetValue_SeqSELL_Private(B, row, col, value, addv, im[i], in[j], cp2, vp2, lastcol2, low2, high2); /* set one value */
228: #if defined(PETSC_HAVE_CUDA)
229: if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) B->offloadmask = PETSC_OFFLOAD_CPU;
230: #endif
231: }
232: }
233: } else {
234: PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
235: if (!sell->donotstash) {
236: mat->assembled = PETSC_FALSE;
237: if (roworiented) {
238: PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
239: } else {
240: PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
241: }
242: }
243: }
244: }
245: PetscFunctionReturn(PETSC_SUCCESS);
246: }
248: static PetscErrorCode MatGetValues_MPISELL(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
249: {
250: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
251: PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
252: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
254: PetscFunctionBegin;
255: for (i = 0; i < m; i++) {
256: if (idxm[i] < 0) continue; /* negative row */
257: PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
258: PetscCheck(idxm[i] >= rstart && idxm[i] < rend, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported");
259: row = idxm[i] - rstart;
260: for (j = 0; j < n; j++) {
261: if (idxn[j] < 0) continue; /* negative column */
262: PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
263: if (idxn[j] >= cstart && idxn[j] < cend) {
264: col = idxn[j] - cstart;
265: PetscCall(MatGetValues(sell->A, 1, &row, 1, &col, v + i * n + j));
266: } else {
267: if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
268: #if defined(PETSC_USE_CTABLE)
269: PetscCall(PetscHMapIGetWithDefault(sell->colmap, idxn[j] + 1, 0, &col));
270: col--;
271: #else
272: col = sell->colmap[idxn[j]] - 1;
273: #endif
274: if (col < 0 || sell->garray[col] != idxn[j]) *(v + i * n + j) = 0.0;
275: else PetscCall(MatGetValues(sell->B, 1, &row, 1, &col, v + i * n + j));
276: }
277: }
278: }
279: PetscFunctionReturn(PETSC_SUCCESS);
280: }
282: static PetscErrorCode MatAssemblyBegin_MPISELL(Mat mat, MatAssemblyType mode)
283: {
284: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
285: PetscInt nstash, reallocs;
287: PetscFunctionBegin;
288: if (sell->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);
290: PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
291: PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
292: PetscCall(PetscInfo(sell->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
293: PetscFunctionReturn(PETSC_SUCCESS);
294: }
296: PetscErrorCode MatAssemblyEnd_MPISELL(Mat mat, MatAssemblyType mode)
297: {
298: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
299: PetscMPIInt n;
300: PetscInt i, flg;
301: PetscInt *row, *col;
302: PetscScalar *val;
303: PetscBool all_assembled;
304: /* do not use 'b = (Mat_SeqSELL*)sell->B->data' as B can be reset in disassembly */
305: PetscFunctionBegin;
306: if (!sell->donotstash && !mat->nooffprocentries) {
307: while (1) {
308: PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
309: if (!flg) break;
311: for (i = 0; i < n; i++) { /* assemble one by one */
312: PetscCall(MatSetValues_MPISELL(mat, 1, row + i, 1, col + i, val + i, mat->insertmode));
313: }
314: }
315: PetscCall(MatStashScatterEnd_Private(&mat->stash));
316: }
317: #if defined(PETSC_HAVE_CUDA)
318: if (mat->offloadmask == PETSC_OFFLOAD_CPU) sell->A->offloadmask = PETSC_OFFLOAD_CPU;
319: #endif
320: PetscCall(MatAssemblyBegin(sell->A, mode));
321: PetscCall(MatAssemblyEnd(sell->A, mode));
323: /*
324: determine if any process has disassembled, if so we must
325: also disassemble ourselves, in order that we may reassemble.
326: */
327: /*
328: if nonzero structure of submatrix B cannot change then we know that
329: no process disassembled thus we can skip this stuff
330: */
331: if (!((Mat_SeqSELL *)sell->B->data)->nonew) {
332: PetscCallMPI(MPIU_Allreduce(&mat->was_assembled, &all_assembled, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
333: if (mat->was_assembled && !all_assembled) PetscCall(MatDisAssemble_MPISELL(mat));
334: }
335: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPISELL(mat));
336: #if defined(PETSC_HAVE_CUDA)
337: if (mat->offloadmask == PETSC_OFFLOAD_CPU && sell->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) sell->B->offloadmask = PETSC_OFFLOAD_CPU;
338: #endif
339: PetscCall(MatAssemblyBegin(sell->B, mode));
340: PetscCall(MatAssemblyEnd(sell->B, mode));
341: PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
342: sell->rowvalues = NULL;
343: PetscCall(VecDestroy(&sell->diag));
345: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
346: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqSELL *)sell->A->data)->nonew) {
347: PetscObjectState state = sell->A->nonzerostate + sell->B->nonzerostate;
348: PetscCallMPI(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
349: }
350: #if defined(PETSC_HAVE_CUDA)
351: mat->offloadmask = PETSC_OFFLOAD_BOTH;
352: #endif
353: PetscFunctionReturn(PETSC_SUCCESS);
354: }
356: static PetscErrorCode MatZeroEntries_MPISELL(Mat A)
357: {
358: Mat_MPISELL *l = (Mat_MPISELL *)A->data;
360: PetscFunctionBegin;
361: PetscCall(MatZeroEntries(l->A));
362: PetscCall(MatZeroEntries(l->B));
363: PetscFunctionReturn(PETSC_SUCCESS);
364: }
366: static PetscErrorCode MatMult_MPISELL(Mat A, Vec xx, Vec yy)
367: {
368: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
369: PetscInt nt;
371: PetscFunctionBegin;
372: PetscCall(VecGetLocalSize(xx, &nt));
373: PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
374: PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
375: PetscUseTypeMethod(a->A, mult, xx, yy);
376: PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
377: PetscUseTypeMethod(a->B, multadd, a->lvec, yy, yy);
378: PetscFunctionReturn(PETSC_SUCCESS);
379: }
381: static PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat A, Vec bb, Vec xx)
382: {
383: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
385: PetscFunctionBegin;
386: PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
387: PetscFunctionReturn(PETSC_SUCCESS);
388: }
390: static PetscErrorCode MatMultAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
391: {
392: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
394: PetscFunctionBegin;
395: PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
396: PetscUseTypeMethod(a->A, multadd, xx, yy, zz);
397: PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
398: PetscUseTypeMethod(a->B, multadd, a->lvec, zz, zz);
399: PetscFunctionReturn(PETSC_SUCCESS);
400: }
402: static PetscErrorCode MatMultTranspose_MPISELL(Mat A, Vec xx, Vec yy)
403: {
404: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
406: PetscFunctionBegin;
407: /* do nondiagonal part */
408: PetscUseTypeMethod(a->B, multtranspose, xx, a->lvec);
409: /* do local part */
410: PetscUseTypeMethod(a->A, multtranspose, xx, yy);
411: /* add partial results together */
412: PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
413: PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
414: PetscFunctionReturn(PETSC_SUCCESS);
415: }
417: static PetscErrorCode MatIsTranspose_MPISELL(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
418: {
419: MPI_Comm comm;
420: Mat_MPISELL *Asell = (Mat_MPISELL *)Amat->data, *Bsell;
421: Mat Adia = Asell->A, Bdia, Aoff, Boff, *Aoffs, *Boffs;
422: IS Me, Notme;
423: PetscInt M, N, first, last, *notme, i;
424: PetscMPIInt size;
426: PetscFunctionBegin;
427: /* Easy test: symmetric diagonal block */
428: Bsell = (Mat_MPISELL *)Bmat->data;
429: Bdia = Bsell->A;
430: PetscCall(MatIsTranspose(Adia, Bdia, tol, f));
431: if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
432: PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
433: PetscCallMPI(MPI_Comm_size(comm, &size));
434: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
436: /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
437: PetscCall(MatGetSize(Amat, &M, &N));
438: PetscCall(MatGetOwnershipRange(Amat, &first, &last));
439: PetscCall(PetscMalloc1(N - last + first, ¬me));
440: for (i = 0; i < first; i++) notme[i] = i;
441: for (i = last; i < M; i++) notme[i - last + first] = i;
442: PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
443: PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
444: PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
445: Aoff = Aoffs[0];
446: PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
447: Boff = Boffs[0];
448: PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
449: PetscCall(MatDestroyMatrices(1, &Aoffs));
450: PetscCall(MatDestroyMatrices(1, &Boffs));
451: PetscCall(ISDestroy(&Me));
452: PetscCall(ISDestroy(&Notme));
453: PetscCall(PetscFree(notme));
454: PetscFunctionReturn(PETSC_SUCCESS);
455: }
457: static PetscErrorCode MatMultTransposeAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
458: {
459: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
461: PetscFunctionBegin;
462: /* do nondiagonal part */
463: PetscUseTypeMethod(a->B, multtranspose, xx, a->lvec);
464: /* do local part */
465: PetscUseTypeMethod(a->A, multtransposeadd, xx, yy, zz);
466: /* add partial results together */
467: PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
468: PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
469: PetscFunctionReturn(PETSC_SUCCESS);
470: }
472: /*
473: This only works correctly for square matrices where the subblock A->A is the
474: diagonal block
475: */
476: static PetscErrorCode MatGetDiagonal_MPISELL(Mat A, Vec v)
477: {
478: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
480: PetscFunctionBegin;
481: PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
482: PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
483: PetscCall(MatGetDiagonal(a->A, v));
484: PetscFunctionReturn(PETSC_SUCCESS);
485: }
487: static PetscErrorCode MatScale_MPISELL(Mat A, PetscScalar aa)
488: {
489: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
491: PetscFunctionBegin;
492: PetscCall(MatScale(a->A, aa));
493: PetscCall(MatScale(a->B, aa));
494: PetscFunctionReturn(PETSC_SUCCESS);
495: }
497: PetscErrorCode MatDestroy_MPISELL(Mat mat)
498: {
499: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
501: PetscFunctionBegin;
502: PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
503: PetscCall(MatStashDestroy_Private(&mat->stash));
504: PetscCall(VecDestroy(&sell->diag));
505: PetscCall(MatDestroy(&sell->A));
506: PetscCall(MatDestroy(&sell->B));
507: #if defined(PETSC_USE_CTABLE)
508: PetscCall(PetscHMapIDestroy(&sell->colmap));
509: #else
510: PetscCall(PetscFree(sell->colmap));
511: #endif
512: PetscCall(PetscFree(sell->garray));
513: PetscCall(VecDestroy(&sell->lvec));
514: PetscCall(VecScatterDestroy(&sell->Mvctx));
515: PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
516: PetscCall(PetscFree(sell->ld));
517: PetscCall(PetscFree(mat->data));
519: PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
520: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
521: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
522: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
523: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPISELLSetPreallocation_C", NULL));
524: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpiaij_C", NULL));
525: #if defined(PETSC_HAVE_CUDA)
526: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpisellcuda_C", NULL));
527: #endif
528: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
529: PetscFunctionReturn(PETSC_SUCCESS);
530: }
532: #include <petscdraw.h>
533: static PetscErrorCode MatView_MPISELL_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
534: {
535: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
536: PetscMPIInt rank = sell->rank, size = sell->size;
537: PetscBool isdraw, isascii, isbinary;
538: PetscViewer sviewer;
539: PetscViewerFormat format;
541: PetscFunctionBegin;
542: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
543: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
544: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
545: if (isascii) {
546: PetscCall(PetscViewerGetFormat(viewer, &format));
547: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
548: MatInfo info;
549: PetscInt *inodes;
551: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
552: PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
553: PetscCall(MatInodeGetInodeSizes(sell->A, NULL, &inodes, NULL));
554: PetscCall(PetscViewerASCIIPushSynchronized(viewer));
555: if (!inodes) {
556: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
557: (PetscInt)info.nz_allocated, (PetscInt)info.memory));
558: } else {
559: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
560: (PetscInt)info.nz_allocated, (PetscInt)info.memory));
561: }
562: PetscCall(MatGetInfo(sell->A, MAT_LOCAL, &info));
563: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
564: PetscCall(MatGetInfo(sell->B, MAT_LOCAL, &info));
565: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
566: PetscCall(PetscViewerFlush(viewer));
567: PetscCall(PetscViewerASCIIPopSynchronized(viewer));
568: PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
569: PetscCall(VecScatterView(sell->Mvctx, viewer));
570: PetscFunctionReturn(PETSC_SUCCESS);
571: } else if (format == PETSC_VIEWER_ASCII_INFO) {
572: PetscInt inodecount, inodelimit, *inodes;
573: PetscCall(MatInodeGetInodeSizes(sell->A, &inodecount, &inodes, &inodelimit));
574: if (inodes) {
575: PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
576: } else {
577: PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
578: }
579: PetscFunctionReturn(PETSC_SUCCESS);
580: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
581: PetscFunctionReturn(PETSC_SUCCESS);
582: }
583: } else if (isbinary) {
584: if (size == 1) {
585: PetscCall(PetscObjectSetName((PetscObject)sell->A, ((PetscObject)mat)->name));
586: PetscCall(MatView(sell->A, viewer));
587: } else {
588: /* PetscCall(MatView_MPISELL_Binary(mat,viewer)); */
589: }
590: PetscFunctionReturn(PETSC_SUCCESS);
591: } else if (isdraw) {
592: PetscDraw draw;
593: PetscBool isnull;
594: PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
595: PetscCall(PetscDrawIsNull(draw, &isnull));
596: if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
597: }
599: {
600: /* assemble the entire matrix onto first processor. */
601: Mat A;
602: Mat_SeqSELL *Aloc;
603: PetscInt M = mat->rmap->N, N = mat->cmap->N, *acolidx, row, col, i, j;
604: MatScalar *aval;
605: PetscBool isnonzero;
607: PetscCall(MatCreate(PetscObjectComm((PetscObject)mat), &A));
608: if (rank == 0) {
609: PetscCall(MatSetSizes(A, M, N, M, N));
610: } else {
611: PetscCall(MatSetSizes(A, 0, 0, M, N));
612: }
613: /* This is just a temporary matrix, so explicitly using MATMPISELL is probably best */
614: PetscCall(MatSetType(A, MATMPISELL));
615: PetscCall(MatMPISELLSetPreallocation(A, 0, NULL, 0, NULL));
616: PetscCall(MatSetOption(A, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE));
618: /* copy over the A part */
619: Aloc = (Mat_SeqSELL *)sell->A->data;
620: acolidx = Aloc->colidx;
621: aval = Aloc->val;
622: for (i = 0; i < Aloc->totalslices; i++) { /* loop over slices */
623: for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
624: isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
625: if (isnonzero) { /* check the mask bit */
626: row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
627: col = *acolidx + mat->rmap->rstart;
628: PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
629: }
630: aval++;
631: acolidx++;
632: }
633: }
635: /* copy over the B part */
636: Aloc = (Mat_SeqSELL *)sell->B->data;
637: acolidx = Aloc->colidx;
638: aval = Aloc->val;
639: for (i = 0; i < Aloc->totalslices; i++) {
640: for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
641: isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
642: if (isnonzero) {
643: row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
644: col = sell->garray[*acolidx];
645: PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
646: }
647: aval++;
648: acolidx++;
649: }
650: }
652: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
653: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
654: /*
655: Everyone has to call to draw the matrix since the graphics waits are
656: synchronized across all processors that share the PetscDraw object
657: */
658: PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
659: if (rank == 0) {
660: PetscCall(PetscObjectSetName((PetscObject)((Mat_MPISELL *)A->data)->A, ((PetscObject)mat)->name));
661: PetscCall(MatView_SeqSELL(((Mat_MPISELL *)A->data)->A, sviewer));
662: }
663: PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
664: PetscCall(MatDestroy(&A));
665: }
666: PetscFunctionReturn(PETSC_SUCCESS);
667: }
669: static PetscErrorCode MatView_MPISELL(Mat mat, PetscViewer viewer)
670: {
671: PetscBool isascii, isdraw, issocket, isbinary;
673: PetscFunctionBegin;
674: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
675: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
676: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
677: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
678: if (isascii || isdraw || isbinary || issocket) PetscCall(MatView_MPISELL_ASCIIorDraworSocket(mat, viewer));
679: PetscFunctionReturn(PETSC_SUCCESS);
680: }
682: static PetscErrorCode MatGetGhosts_MPISELL(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
683: {
684: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
686: PetscFunctionBegin;
687: PetscCall(MatGetSize(sell->B, NULL, nghosts));
688: if (ghosts) *ghosts = sell->garray;
689: PetscFunctionReturn(PETSC_SUCCESS);
690: }
692: static PetscErrorCode MatGetInfo_MPISELL(Mat matin, MatInfoType flag, MatInfo *info)
693: {
694: Mat_MPISELL *mat = (Mat_MPISELL *)matin->data;
695: Mat A = mat->A, B = mat->B;
696: PetscLogDouble isend[5], irecv[5];
698: PetscFunctionBegin;
699: info->block_size = 1.0;
700: PetscCall(MatGetInfo(A, MAT_LOCAL, info));
702: isend[0] = info->nz_used;
703: isend[1] = info->nz_allocated;
704: isend[2] = info->nz_unneeded;
705: isend[3] = info->memory;
706: isend[4] = info->mallocs;
708: PetscCall(MatGetInfo(B, MAT_LOCAL, info));
710: isend[0] += info->nz_used;
711: isend[1] += info->nz_allocated;
712: isend[2] += info->nz_unneeded;
713: isend[3] += info->memory;
714: isend[4] += info->mallocs;
715: if (flag == MAT_LOCAL) {
716: info->nz_used = isend[0];
717: info->nz_allocated = isend[1];
718: info->nz_unneeded = isend[2];
719: info->memory = isend[3];
720: info->mallocs = isend[4];
721: } else if (flag == MAT_GLOBAL_MAX) {
722: PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));
724: info->nz_used = irecv[0];
725: info->nz_allocated = irecv[1];
726: info->nz_unneeded = irecv[2];
727: info->memory = irecv[3];
728: info->mallocs = irecv[4];
729: } else if (flag == MAT_GLOBAL_SUM) {
730: PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));
732: info->nz_used = irecv[0];
733: info->nz_allocated = irecv[1];
734: info->nz_unneeded = irecv[2];
735: info->memory = irecv[3];
736: info->mallocs = irecv[4];
737: }
738: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
739: info->fill_ratio_needed = 0;
740: info->factor_mallocs = 0;
741: PetscFunctionReturn(PETSC_SUCCESS);
742: }
744: static PetscErrorCode MatSetOption_MPISELL(Mat A, MatOption op, PetscBool flg)
745: {
746: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
748: PetscFunctionBegin;
749: switch (op) {
750: case MAT_NEW_NONZERO_LOCATIONS:
751: case MAT_NEW_NONZERO_ALLOCATION_ERR:
752: case MAT_UNUSED_NONZERO_LOCATION_ERR:
753: case MAT_KEEP_NONZERO_PATTERN:
754: case MAT_NEW_NONZERO_LOCATION_ERR:
755: case MAT_USE_INODES:
756: case MAT_IGNORE_ZERO_ENTRIES:
757: MatCheckPreallocated(A, 1);
758: PetscCall(MatSetOption(a->A, op, flg));
759: PetscCall(MatSetOption(a->B, op, flg));
760: break;
761: case MAT_ROW_ORIENTED:
762: MatCheckPreallocated(A, 1);
763: a->roworiented = flg;
765: PetscCall(MatSetOption(a->A, op, flg));
766: PetscCall(MatSetOption(a->B, op, flg));
767: break;
768: case MAT_IGNORE_OFF_PROC_ENTRIES:
769: a->donotstash = flg;
770: break;
771: case MAT_SYMMETRIC:
772: MatCheckPreallocated(A, 1);
773: PetscCall(MatSetOption(a->A, op, flg));
774: break;
775: case MAT_STRUCTURALLY_SYMMETRIC:
776: MatCheckPreallocated(A, 1);
777: PetscCall(MatSetOption(a->A, op, flg));
778: break;
779: case MAT_HERMITIAN:
780: MatCheckPreallocated(A, 1);
781: PetscCall(MatSetOption(a->A, op, flg));
782: break;
783: case MAT_SYMMETRY_ETERNAL:
784: MatCheckPreallocated(A, 1);
785: PetscCall(MatSetOption(a->A, op, flg));
786: break;
787: case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
788: MatCheckPreallocated(A, 1);
789: PetscCall(MatSetOption(a->A, op, flg));
790: break;
791: default:
792: break;
793: }
794: PetscFunctionReturn(PETSC_SUCCESS);
795: }
797: static PetscErrorCode MatDiagonalScale_MPISELL(Mat mat, Vec ll, Vec rr)
798: {
799: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
800: Mat a = sell->A, b = sell->B;
801: PetscInt s1, s2, s3;
803: PetscFunctionBegin;
804: PetscCall(MatGetLocalSize(mat, &s2, &s3));
805: if (rr) {
806: PetscCall(VecGetLocalSize(rr, &s1));
807: PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
808: /* Overlap communication with computation. */
809: PetscCall(VecScatterBegin(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
810: }
811: if (ll) {
812: PetscCall(VecGetLocalSize(ll, &s1));
813: PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
814: PetscUseTypeMethod(b, diagonalscale, ll, NULL);
815: }
816: /* scale the diagonal block */
817: PetscUseTypeMethod(a, diagonalscale, ll, rr);
819: if (rr) {
820: /* Do a scatter end and then right scale the off-diagonal block */
821: PetscCall(VecScatterEnd(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
822: PetscUseTypeMethod(b, diagonalscale, NULL, sell->lvec);
823: }
824: PetscFunctionReturn(PETSC_SUCCESS);
825: }
827: static PetscErrorCode MatSetUnfactored_MPISELL(Mat A)
828: {
829: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
831: PetscFunctionBegin;
832: PetscCall(MatSetUnfactored(a->A));
833: PetscFunctionReturn(PETSC_SUCCESS);
834: }
836: static PetscErrorCode MatEqual_MPISELL(Mat A, Mat B, PetscBool *flag)
837: {
838: Mat_MPISELL *matB = (Mat_MPISELL *)B->data, *matA = (Mat_MPISELL *)A->data;
839: Mat a, b, c, d;
840: PetscBool flg;
842: PetscFunctionBegin;
843: a = matA->A;
844: b = matA->B;
845: c = matB->A;
846: d = matB->B;
848: PetscCall(MatEqual(a, c, &flg));
849: if (flg) PetscCall(MatEqual(b, d, &flg));
850: PetscCallMPI(MPIU_Allreduce(&flg, flag, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
851: PetscFunctionReturn(PETSC_SUCCESS);
852: }
854: static PetscErrorCode MatCopy_MPISELL(Mat A, Mat B, MatStructure str)
855: {
856: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
857: Mat_MPISELL *b = (Mat_MPISELL *)B->data;
859: PetscFunctionBegin;
860: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
861: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
862: /* because of the column compression in the off-processor part of the matrix a->B,
863: the number of columns in a->B and b->B may be different, hence we cannot call
864: the MatCopy() directly on the two parts. If need be, we can provide a more
865: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
866: then copying the submatrices */
867: PetscCall(MatCopy_Basic(A, B, str));
868: } else {
869: PetscCall(MatCopy(a->A, b->A, str));
870: PetscCall(MatCopy(a->B, b->B, str));
871: }
872: PetscFunctionReturn(PETSC_SUCCESS);
873: }
875: static PetscErrorCode MatSetUp_MPISELL(Mat A)
876: {
877: PetscFunctionBegin;
878: PetscCall(MatMPISELLSetPreallocation(A, PETSC_DEFAULT, NULL, PETSC_DEFAULT, NULL));
879: PetscFunctionReturn(PETSC_SUCCESS);
880: }
882: static PetscErrorCode MatConjugate_MPISELL(Mat mat)
883: {
884: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
886: PetscFunctionBegin;
887: PetscCall(MatConjugate_SeqSELL(sell->A));
888: PetscCall(MatConjugate_SeqSELL(sell->B));
889: PetscFunctionReturn(PETSC_SUCCESS);
890: }
892: static PetscErrorCode MatInvertBlockDiagonal_MPISELL(Mat A, const PetscScalar **values)
893: {
894: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
896: PetscFunctionBegin;
897: PetscCall(MatInvertBlockDiagonal(a->A, values));
898: A->factorerrortype = a->A->factorerrortype;
899: PetscFunctionReturn(PETSC_SUCCESS);
900: }
902: static PetscErrorCode MatSetRandom_MPISELL(Mat x, PetscRandom rctx)
903: {
904: Mat_MPISELL *sell = (Mat_MPISELL *)x->data;
906: PetscFunctionBegin;
907: PetscCall(MatSetRandom(sell->A, rctx));
908: PetscCall(MatSetRandom(sell->B, rctx));
909: PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
910: PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
911: PetscFunctionReturn(PETSC_SUCCESS);
912: }
914: static PetscErrorCode MatSetFromOptions_MPISELL(Mat A, PetscOptionItems PetscOptionsObject)
915: {
916: PetscFunctionBegin;
917: PetscOptionsHeadBegin(PetscOptionsObject, "MPISELL options");
918: PetscOptionsHeadEnd();
919: PetscFunctionReturn(PETSC_SUCCESS);
920: }
922: static PetscErrorCode MatShift_MPISELL(Mat Y, PetscScalar a)
923: {
924: Mat_MPISELL *msell = (Mat_MPISELL *)Y->data;
925: Mat_SeqSELL *sell = (Mat_SeqSELL *)msell->A->data;
927: PetscFunctionBegin;
928: if (!Y->preallocated) {
929: PetscCall(MatMPISELLSetPreallocation(Y, 1, NULL, 0, NULL));
930: } else if (!sell->nz) {
931: PetscInt nonew = sell->nonew;
932: PetscCall(MatSeqSELLSetPreallocation(msell->A, 1, NULL));
933: sell->nonew = nonew;
934: }
935: PetscCall(MatShift_Basic(Y, a));
936: PetscFunctionReturn(PETSC_SUCCESS);
937: }
939: static PetscErrorCode MatGetDiagonalBlock_MPISELL(Mat A, Mat *a)
940: {
941: PetscFunctionBegin;
942: *a = ((Mat_MPISELL *)A->data)->A;
943: PetscFunctionReturn(PETSC_SUCCESS);
944: }
946: static PetscErrorCode MatStoreValues_MPISELL(Mat mat)
947: {
948: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
950: PetscFunctionBegin;
951: PetscCall(MatStoreValues(sell->A));
952: PetscCall(MatStoreValues(sell->B));
953: PetscFunctionReturn(PETSC_SUCCESS);
954: }
956: static PetscErrorCode MatRetrieveValues_MPISELL(Mat mat)
957: {
958: Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
960: PetscFunctionBegin;
961: PetscCall(MatRetrieveValues(sell->A));
962: PetscCall(MatRetrieveValues(sell->B));
963: PetscFunctionReturn(PETSC_SUCCESS);
964: }
966: static PetscErrorCode MatMPISELLSetPreallocation_MPISELL(Mat B, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[])
967: {
968: Mat_MPISELL *b;
970: PetscFunctionBegin;
971: PetscCall(PetscLayoutSetUp(B->rmap));
972: PetscCall(PetscLayoutSetUp(B->cmap));
973: b = (Mat_MPISELL *)B->data;
975: if (!B->preallocated) {
976: /* Explicitly create 2 MATSEQSELL matrices. */
977: PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
978: PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
979: PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
980: PetscCall(MatSetType(b->A, MATSEQSELL));
981: PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
982: PetscCall(MatSetSizes(b->B, B->rmap->n, B->cmap->N, B->rmap->n, B->cmap->N));
983: PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
984: PetscCall(MatSetType(b->B, MATSEQSELL));
985: }
987: PetscCall(MatSeqSELLSetPreallocation(b->A, d_rlenmax, d_rlen));
988: PetscCall(MatSeqSELLSetPreallocation(b->B, o_rlenmax, o_rlen));
989: B->preallocated = PETSC_TRUE;
990: B->was_assembled = PETSC_FALSE;
991: /*
992: critical for MatAssemblyEnd to work.
993: MatAssemblyBegin checks it to set up was_assembled
994: and MatAssemblyEnd checks was_assembled to determine whether to build garray
995: */
996: B->assembled = PETSC_FALSE;
997: PetscFunctionReturn(PETSC_SUCCESS);
998: }
1000: static PetscErrorCode MatDuplicate_MPISELL(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
1001: {
1002: Mat mat;
1003: Mat_MPISELL *a, *oldmat = (Mat_MPISELL *)matin->data;
1005: PetscFunctionBegin;
1006: *newmat = NULL;
1007: PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
1008: PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
1009: PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
1010: PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
1011: a = (Mat_MPISELL *)mat->data;
1013: mat->factortype = matin->factortype;
1014: mat->assembled = PETSC_TRUE;
1015: mat->insertmode = NOT_SET_VALUES;
1016: mat->preallocated = PETSC_TRUE;
1018: a->size = oldmat->size;
1019: a->rank = oldmat->rank;
1020: a->donotstash = oldmat->donotstash;
1021: a->roworiented = oldmat->roworiented;
1022: a->rowindices = NULL;
1023: a->rowvalues = NULL;
1024: a->getrowactive = PETSC_FALSE;
1026: PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
1027: PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));
1029: if (oldmat->colmap) {
1030: #if defined(PETSC_USE_CTABLE)
1031: PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
1032: #else
1033: PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
1034: PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
1035: #endif
1036: } else a->colmap = NULL;
1037: if (oldmat->garray) {
1038: PetscInt len;
1039: len = oldmat->B->cmap->n;
1040: PetscCall(PetscMalloc1(len + 1, &a->garray));
1041: if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
1042: } else a->garray = NULL;
1044: PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
1045: PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx));
1046: PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
1047: PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
1048: PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
1049: *newmat = mat;
1050: PetscFunctionReturn(PETSC_SUCCESS);
1051: }
1053: static const struct _MatOps MatOps_Values = {MatSetValues_MPISELL,
1054: NULL,
1055: NULL,
1056: MatMult_MPISELL,
1057: /* 4*/ MatMultAdd_MPISELL,
1058: MatMultTranspose_MPISELL,
1059: MatMultTransposeAdd_MPISELL,
1060: NULL,
1061: NULL,
1062: NULL,
1063: /*10*/ NULL,
1064: NULL,
1065: NULL,
1066: MatSOR_MPISELL,
1067: NULL,
1068: /*15*/ MatGetInfo_MPISELL,
1069: MatEqual_MPISELL,
1070: MatGetDiagonal_MPISELL,
1071: MatDiagonalScale_MPISELL,
1072: NULL,
1073: /*20*/ MatAssemblyBegin_MPISELL,
1074: MatAssemblyEnd_MPISELL,
1075: MatSetOption_MPISELL,
1076: MatZeroEntries_MPISELL,
1077: /*24*/ NULL,
1078: NULL,
1079: NULL,
1080: NULL,
1081: NULL,
1082: /*29*/ MatSetUp_MPISELL,
1083: NULL,
1084: NULL,
1085: MatGetDiagonalBlock_MPISELL,
1086: NULL,
1087: /*34*/ MatDuplicate_MPISELL,
1088: NULL,
1089: NULL,
1090: NULL,
1091: NULL,
1092: /*39*/ NULL,
1093: NULL,
1094: NULL,
1095: MatGetValues_MPISELL,
1096: MatCopy_MPISELL,
1097: /*44*/ NULL,
1098: MatScale_MPISELL,
1099: MatShift_MPISELL,
1100: MatDiagonalSet_MPISELL,
1101: NULL,
1102: /*49*/ MatSetRandom_MPISELL,
1103: NULL,
1104: NULL,
1105: NULL,
1106: NULL,
1107: /*54*/ MatFDColoringCreate_MPIXAIJ,
1108: NULL,
1109: MatSetUnfactored_MPISELL,
1110: NULL,
1111: NULL,
1112: /*59*/ NULL,
1113: MatDestroy_MPISELL,
1114: MatView_MPISELL,
1115: NULL,
1116: NULL,
1117: /*64*/ NULL,
1118: NULL,
1119: NULL,
1120: NULL,
1121: NULL,
1122: /*69*/ NULL,
1123: NULL,
1124: NULL,
1125: MatFDColoringApply_AIJ, /* reuse AIJ function */
1126: MatSetFromOptions_MPISELL,
1127: NULL,
1128: /*75*/ NULL,
1129: NULL,
1130: NULL,
1131: NULL,
1132: NULL,
1133: /*80*/ NULL,
1134: NULL,
1135: NULL,
1136: /*83*/ NULL,
1137: NULL,
1138: NULL,
1139: NULL,
1140: NULL,
1141: NULL,
1142: /*89*/ NULL,
1143: NULL,
1144: NULL,
1145: NULL,
1146: MatConjugate_MPISELL,
1147: /*94*/ NULL,
1148: NULL,
1149: NULL,
1150: NULL,
1151: NULL,
1152: /*99*/ NULL,
1153: NULL,
1154: NULL,
1155: NULL,
1156: NULL,
1157: /*104*/ NULL,
1158: NULL,
1159: MatGetGhosts_MPISELL,
1160: NULL,
1161: NULL,
1162: /*109*/ MatMultDiagonalBlock_MPISELL,
1163: NULL,
1164: NULL,
1165: NULL,
1166: NULL,
1167: /*114*/ NULL,
1168: NULL,
1169: MatInvertBlockDiagonal_MPISELL,
1170: NULL,
1171: /*119*/ NULL,
1172: NULL,
1173: NULL,
1174: NULL,
1175: NULL,
1176: /*124*/ NULL,
1177: NULL,
1178: NULL,
1179: NULL,
1180: MatFDColoringSetUp_MPIXAIJ,
1181: /*129*/ NULL,
1182: NULL,
1183: NULL,
1184: NULL,
1185: NULL,
1186: /*134*/ NULL,
1187: NULL,
1188: NULL,
1189: NULL,
1190: NULL,
1191: /*139*/ NULL,
1192: NULL,
1193: NULL,
1194: NULL,
1195: NULL,
1196: MatADot_Default,
1197: /*144*/ MatANorm_Default,
1198: NULL,
1199: NULL};
1201: /*@C
1202: MatMPISELLSetPreallocation - Preallocates memory for a `MATMPISELL` sparse parallel matrix in sell format.
1203: For good matrix assembly performance the user should preallocate the matrix storage by
1204: setting the parameters `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
1206: Collective
1208: Input Parameters:
1209: + B - the matrix
1210: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
1211: (same value is used for all local rows)
1212: . d_nnz - array containing the number of nonzeros in the various rows of the
1213: DIAGONAL portion of the local submatrix (possibly different for each row)
1214: or NULL (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
1215: The size of this array is equal to the number of local rows, i.e 'm'.
1216: For matrices that will be factored, you must leave room for (and set)
1217: the diagonal entry even if it is zero.
1218: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
1219: submatrix (same value is used for all local rows).
1220: - o_nnz - array containing the number of nonzeros in the various rows of the
1221: OFF-DIAGONAL portion of the local submatrix (possibly different for
1222: each row) or NULL (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
1223: structure. The size of this array is equal to the number
1224: of local rows, i.e 'm'.
1226: Example usage:
1227: Consider the following 8x8 matrix with 34 non-zero values, that is
1228: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1229: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1230: as follows
1232: .vb
1233: 1 2 0 | 0 3 0 | 0 4
1234: Proc0 0 5 6 | 7 0 0 | 8 0
1235: 9 0 10 | 11 0 0 | 12 0
1236: -------------------------------------
1237: 13 0 14 | 15 16 17 | 0 0
1238: Proc1 0 18 0 | 19 20 21 | 0 0
1239: 0 0 0 | 22 23 0 | 24 0
1240: -------------------------------------
1241: Proc2 25 26 27 | 0 0 28 | 29 0
1242: 30 0 0 | 31 32 33 | 0 34
1243: .ve
1245: This can be represented as a collection of submatrices as
1247: .vb
1248: A B C
1249: D E F
1250: G H I
1251: .ve
1253: Where the submatrices A,B,C are owned by proc0, D,E,F are
1254: owned by proc1, G,H,I are owned by proc2.
1256: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1257: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1258: The 'M','N' parameters are 8,8, and have the same values on all procs.
1260: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1261: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1262: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1263: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1264: part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1265: matrix, and [DF] as another SeqSELL matrix.
1267: When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
1268: allocated for every row of the local DIAGONAL submatrix, and o_nz
1269: storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
1270: One way to choose `d_nz` and `o_nz` is to use the maximum number of nonzeros over
1271: the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1272: In this case, the values of d_nz,o_nz are
1273: .vb
1274: proc0 dnz = 2, o_nz = 2
1275: proc1 dnz = 3, o_nz = 2
1276: proc2 dnz = 1, o_nz = 4
1277: .ve
1278: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
1279: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1280: for proc3. i.e we are using 12+15+10=37 storage locations to store
1281: 34 values.
1283: When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
1284: for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1285: In the above case the values for d_nnz,o_nnz are
1286: .vb
1287: proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
1288: proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
1289: proc2 d_nnz = [1,1] and o_nnz = [4,4]
1290: .ve
1291: Here the space allocated is according to nz (or maximum values in the nnz
1292: if nnz is provided) for DIAGONAL and OFF-DIAGONAL submatrices, i.e (2+2+3+2)*3+(1+4)*2=37
1294: Level: intermediate
1296: Notes:
1297: If the *_nnz parameter is given then the *_nz parameter is ignored
1299: The stored row and column indices begin with zero.
1301: The parallel matrix is partitioned such that the first m0 rows belong to
1302: process 0, the next m1 rows belong to process 1, the next m2 rows belong
1303: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
1305: The DIAGONAL portion of the local submatrix of a processor can be defined
1306: as the submatrix which is obtained by extraction the part corresponding to
1307: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
1308: first row that belongs to the processor, r2 is the last row belonging to
1309: the this processor, and c1-c2 is range of indices of the local part of a
1310: vector suitable for applying the matrix to. This is an mxn matrix. In the
1311: common case of a square matrix, the row and column ranges are the same and
1312: the DIAGONAL part is also square. The remaining portion of the local
1313: submatrix (mxN) constitute the OFF-DIAGONAL portion.
1315: If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
1317: You can call `MatGetInfo()` to get information on how effective the preallocation was;
1318: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1319: You can also run with the option -info and look for messages with the string
1320: malloc in them to see if additional memory allocation was needed.
1322: .seealso: `Mat`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatCreateSELL()`,
1323: `MATMPISELL`, `MatGetInfo()`, `PetscSplitOwnership()`, `MATSELL`
1324: @*/
1325: PetscErrorCode MatMPISELLSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
1326: {
1327: PetscFunctionBegin;
1330: PetscTryMethod(B, "MatMPISELLSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
1331: PetscFunctionReturn(PETSC_SUCCESS);
1332: }
1334: /*MC
1335: MATMPISELL - MATMPISELL = "mpisell" - A matrix type to be used for MPI sparse matrices,
1336: based on the sliced Ellpack format
1338: Options Database Key:
1339: . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`
1341: Level: beginner
1343: .seealso: `Mat`, `MatCreateSELL()`, `MATSEQSELL`, `MATSELL`, `MATSEQAIJ`, `MATAIJ`, `MATMPIAIJ`
1344: M*/
1346: /*@C
1347: MatCreateSELL - Creates a sparse parallel matrix in `MATSELL` format.
1349: Collective
1351: Input Parameters:
1352: + comm - MPI communicator
1353: . m - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
1354: This value should be the same as the local size used in creating the
1355: y vector for the matrix-vector product y = Ax.
1356: . n - This value should be the same as the local size used in creating the
1357: x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
1358: calculated if `N` is given) For square matrices n is almost always `m`.
1359: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
1360: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
1361: . d_rlenmax - max number of nonzeros per row in DIAGONAL portion of local submatrix
1362: (same value is used for all local rows)
1363: . d_rlen - array containing the number of nonzeros in the various rows of the
1364: DIAGONAL portion of the local submatrix (possibly different for each row)
1365: or `NULL`, if d_rlenmax is used to specify the nonzero structure.
1366: The size of this array is equal to the number of local rows, i.e `m`.
1367: . o_rlenmax - max number of nonzeros per row in the OFF-DIAGONAL portion of local
1368: submatrix (same value is used for all local rows).
1369: - o_rlen - array containing the number of nonzeros in the various rows of the
1370: OFF-DIAGONAL portion of the local submatrix (possibly different for
1371: each row) or `NULL`, if `o_rlenmax` is used to specify the nonzero
1372: structure. The size of this array is equal to the number
1373: of local rows, i.e `m`.
1375: Output Parameter:
1376: . A - the matrix
1378: Options Database Key:
1379: . -mat_sell_oneindex - Internally use indexing starting at 1
1380: rather than 0. When calling `MatSetValues()`,
1381: the user still MUST index entries starting at 0!
1383: Example:
1384: Consider the following 8x8 matrix with 34 non-zero values, that is
1385: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1386: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1387: as follows
1389: .vb
1390: 1 2 0 | 0 3 0 | 0 4
1391: Proc0 0 5 6 | 7 0 0 | 8 0
1392: 9 0 10 | 11 0 0 | 12 0
1393: -------------------------------------
1394: 13 0 14 | 15 16 17 | 0 0
1395: Proc1 0 18 0 | 19 20 21 | 0 0
1396: 0 0 0 | 22 23 0 | 24 0
1397: -------------------------------------
1398: Proc2 25 26 27 | 0 0 28 | 29 0
1399: 30 0 0 | 31 32 33 | 0 34
1400: .ve
1402: This can be represented as a collection of submatrices as
1403: .vb
1404: A B C
1405: D E F
1406: G H I
1407: .ve
1409: Where the submatrices A,B,C are owned by proc0, D,E,F are
1410: owned by proc1, G,H,I are owned by proc2.
1412: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1413: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1414: The 'M','N' parameters are 8,8, and have the same values on all procs.
1416: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1417: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1418: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1419: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1420: part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1421: matrix, and [DF] as another `MATSEQSELL` matrix.
1423: When d_rlenmax, o_rlenmax parameters are specified, d_rlenmax storage elements are
1424: allocated for every row of the local DIAGONAL submatrix, and o_rlenmax
1425: storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
1426: One way to choose `d_rlenmax` and `o_rlenmax` is to use the maximum number of nonzeros over
1427: the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1428: In this case, the values of d_rlenmax,o_rlenmax are
1429: .vb
1430: proc0 - d_rlenmax = 2, o_rlenmax = 2
1431: proc1 - d_rlenmax = 3, o_rlenmax = 2
1432: proc2 - d_rlenmax = 1, o_rlenmax = 4
1433: .ve
1434: We are allocating m*(d_rlenmax+o_rlenmax) storage locations for every proc. This
1435: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1436: for proc3. i.e we are using 12+15+10=37 storage locations to store
1437: 34 values.
1439: When `d_rlen`, `o_rlen` parameters are specified, the storage is specified
1440: for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1441: In the above case the values for `d_nnz`, `o_nnz` are
1442: .vb
1443: proc0 - d_nnz = [2,2,2] and o_nnz = [2,2,2]
1444: proc1 - d_nnz = [3,3,2] and o_nnz = [2,1,1]
1445: proc2 - d_nnz = [1,1] and o_nnz = [4,4]
1446: .ve
1447: Here the space allocated is still 37 though there are 34 nonzeros because
1448: the allocation is always done according to rlenmax.
1450: Level: intermediate
1452: Notes:
1453: It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
1454: MatXXXXSetPreallocation() paradigm instead of this routine directly.
1455: [MatXXXXSetPreallocation() is, for example, `MatSeqSELLSetPreallocation()`]
1457: If the *_rlen parameter is given then the *_rlenmax parameter is ignored
1459: `m`, `n`, `M`, `N` parameters specify the size of the matrix, and its partitioning across
1460: processors, while `d_rlenmax`, `d_rlen`, `o_rlenmax` , `o_rlen` parameters specify the approximate
1461: storage requirements for this matrix.
1463: If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one
1464: processor than it must be used on all processors that share the object for
1465: that argument.
1467: The user MUST specify either the local or global matrix dimensions
1468: (possibly both).
1470: The parallel matrix is partitioned across processors such that the
1471: first m0 rows belong to process 0, the next m1 rows belong to
1472: process 1, the next m2 rows belong to process 2 etc.. where
1473: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
1474: values corresponding to [`m` x `N`] submatrix.
1476: The columns are logically partitioned with the n0 columns belonging
1477: to 0th partition, the next n1 columns belonging to the next
1478: partition etc.. where n0,n1,n2... are the input parameter `n`.
1480: The DIAGONAL portion of the local submatrix on any given processor
1481: is the submatrix corresponding to the rows and columns `m`, `n`
1482: corresponding to the given processor. i.e diagonal matrix on
1483: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
1484: etc. The remaining portion of the local submatrix [m x (N-n)]
1485: constitute the OFF-DIAGONAL portion. The example below better
1486: illustrates this concept.
1488: For a square global matrix we define each processor's diagonal portion
1489: to be its local rows and the corresponding columns (a square submatrix);
1490: each processor's off-diagonal portion encompasses the remainder of the
1491: local matrix (a rectangular submatrix).
1493: If `o_rlen`, `d_rlen` are specified, then `o_rlenmax`, and `d_rlenmax` are ignored.
1495: When calling this routine with a single process communicator, a matrix of
1496: type `MATSEQSELL` is returned. If a matrix of type `MATMPISELL` is desired for this
1497: type of communicator, use the construction mechanism
1498: .vb
1499: MatCreate(...,&A);
1500: MatSetType(A,MATMPISELL);
1501: MatSetSizes(A, m,n,M,N);
1502: MatMPISELLSetPreallocation(A,...);
1503: .ve
1505: .seealso: `Mat`, `MATSELL`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatMPISELLSetPreallocation()`, `MATMPISELL`
1506: @*/
1507: PetscErrorCode MatCreateSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[], Mat *A)
1508: {
1509: PetscMPIInt size;
1511: PetscFunctionBegin;
1512: PetscCall(MatCreate(comm, A));
1513: PetscCall(MatSetSizes(*A, m, n, M, N));
1514: PetscCallMPI(MPI_Comm_size(comm, &size));
1515: if (size > 1) {
1516: PetscCall(MatSetType(*A, MATMPISELL));
1517: PetscCall(MatMPISELLSetPreallocation(*A, d_rlenmax, d_rlen, o_rlenmax, o_rlen));
1518: } else {
1519: PetscCall(MatSetType(*A, MATSEQSELL));
1520: PetscCall(MatSeqSELLSetPreallocation(*A, d_rlenmax, d_rlen));
1521: }
1522: PetscFunctionReturn(PETSC_SUCCESS);
1523: }
1525: /*@C
1526: MatMPISELLGetSeqSELL - Returns the local pieces of this distributed matrix
1528: Not Collective
1530: Input Parameter:
1531: . A - the `MATMPISELL` matrix
1533: Output Parameters:
1534: + Ad - The diagonal portion of `A`
1535: . Ao - The off-diagonal portion of `A`
1536: - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
1538: Level: advanced
1540: .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`
1541: @*/
1542: PetscErrorCode MatMPISELLGetSeqSELL(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
1543: {
1544: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1545: PetscBool flg;
1547: PetscFunctionBegin;
1548: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &flg));
1549: PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPISELL matrix as input");
1550: if (Ad) *Ad = a->A;
1551: if (Ao) *Ao = a->B;
1552: if (colmap) *colmap = a->garray;
1553: PetscFunctionReturn(PETSC_SUCCESS);
1554: }
1556: /*@C
1557: MatMPISELLGetLocalMatCondensed - Creates a `MATSEQSELL` matrix from an `MATMPISELL` matrix by
1558: taking all its local rows and NON-ZERO columns
1560: Not Collective
1562: Input Parameters:
1563: + A - the matrix
1564: . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
1565: . row - index sets of rows to extract (or `NULL`)
1566: - col - index sets of columns to extract (or `NULL`)
1568: Output Parameter:
1569: . A_loc - the local sequential matrix generated
1571: Level: advanced
1573: .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`, `MatGetOwnershipRange()`, `MatMPISELLGetLocalMat()`
1574: @*/
1575: PetscErrorCode MatMPISELLGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
1576: {
1577: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1578: PetscInt i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
1579: IS isrowa, iscola;
1580: Mat *aloc;
1581: PetscBool match;
1583: PetscFunctionBegin;
1584: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &match));
1585: PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPISELL matrix as input");
1586: PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1587: if (!row) {
1588: start = A->rmap->rstart;
1589: end = A->rmap->rend;
1590: PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
1591: } else {
1592: isrowa = *row;
1593: }
1594: if (!col) {
1595: start = A->cmap->rstart;
1596: cmap = a->garray;
1597: nzA = a->A->cmap->n;
1598: nzB = a->B->cmap->n;
1599: PetscCall(PetscMalloc1(nzA + nzB, &idx));
1600: ncols = 0;
1601: for (i = 0; i < nzB; i++) {
1602: if (cmap[i] < start) idx[ncols++] = cmap[i];
1603: else break;
1604: }
1605: imark = i;
1606: for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
1607: for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
1608: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
1609: } else {
1610: iscola = *col;
1611: }
1612: if (scall != MAT_INITIAL_MATRIX) {
1613: PetscCall(PetscMalloc1(1, &aloc));
1614: aloc[0] = *A_loc;
1615: }
1616: PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
1617: *A_loc = aloc[0];
1618: PetscCall(PetscFree(aloc));
1619: if (!row) PetscCall(ISDestroy(&isrowa));
1620: if (!col) PetscCall(ISDestroy(&iscola));
1621: PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1622: PetscFunctionReturn(PETSC_SUCCESS);
1623: }
1625: #include <../src/mat/impls/aij/mpi/mpiaij.h>
1627: PetscErrorCode MatConvert_MPISELL_MPIAIJ(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1628: {
1629: Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1630: Mat B;
1631: Mat_MPIAIJ *b;
1633: PetscFunctionBegin;
1634: PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");
1636: if (reuse == MAT_REUSE_MATRIX) {
1637: B = *newmat;
1638: } else {
1639: PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1640: PetscCall(MatSetType(B, MATMPIAIJ));
1641: PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
1642: PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1643: PetscCall(MatSeqAIJSetPreallocation(B, 0, NULL));
1644: PetscCall(MatMPIAIJSetPreallocation(B, 0, NULL, 0, NULL));
1645: }
1646: b = (Mat_MPIAIJ *)B->data;
1648: if (reuse == MAT_REUSE_MATRIX) {
1649: PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_REUSE_MATRIX, &b->A));
1650: PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_REUSE_MATRIX, &b->B));
1651: } else {
1652: PetscCall(MatDestroy(&b->A));
1653: PetscCall(MatDestroy(&b->B));
1654: PetscCall(MatDisAssemble_MPISELL(A));
1655: PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A));
1656: PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B));
1657: PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1658: PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1659: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1660: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1661: }
1663: if (reuse == MAT_INPLACE_MATRIX) {
1664: PetscCall(MatHeaderReplace(A, &B));
1665: } else {
1666: *newmat = B;
1667: }
1668: PetscFunctionReturn(PETSC_SUCCESS);
1669: }
1671: PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1672: {
1673: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1674: Mat B;
1675: Mat_MPISELL *b;
1677: PetscFunctionBegin;
1678: PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");
1680: if (reuse == MAT_REUSE_MATRIX) {
1681: B = *newmat;
1682: } else {
1683: Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)a->A->data, *Ba = (Mat_SeqAIJ *)a->B->data;
1684: PetscInt i, d_nz = 0, o_nz = 0, m = A->rmap->N, n = A->cmap->N, lm = A->rmap->n, ln = A->cmap->n;
1685: PetscInt *d_nnz, *o_nnz;
1686: PetscCall(PetscMalloc2(lm, &d_nnz, lm, &o_nnz));
1687: for (i = 0; i < lm; i++) {
1688: d_nnz[i] = Aa->i[i + 1] - Aa->i[i];
1689: o_nnz[i] = Ba->i[i + 1] - Ba->i[i];
1690: if (d_nnz[i] > d_nz) d_nz = d_nnz[i];
1691: if (o_nnz[i] > o_nz) o_nz = o_nnz[i];
1692: }
1693: PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1694: PetscCall(MatSetType(B, MATMPISELL));
1695: PetscCall(MatSetSizes(B, lm, ln, m, n));
1696: PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1697: PetscCall(MatSeqSELLSetPreallocation(B, d_nz, d_nnz));
1698: PetscCall(MatMPISELLSetPreallocation(B, d_nz, d_nnz, o_nz, o_nnz));
1699: PetscCall(PetscFree2(d_nnz, o_nnz));
1700: }
1701: b = (Mat_MPISELL *)B->data;
1703: if (reuse == MAT_REUSE_MATRIX) {
1704: PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_REUSE_MATRIX, &b->A));
1705: PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_REUSE_MATRIX, &b->B));
1706: } else {
1707: PetscCall(MatDestroy(&b->A));
1708: PetscCall(MatDestroy(&b->B));
1709: PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_INITIAL_MATRIX, &b->A));
1710: PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_INITIAL_MATRIX, &b->B));
1711: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1712: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1713: PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1714: PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1715: }
1717: if (reuse == MAT_INPLACE_MATRIX) {
1718: PetscCall(MatHeaderReplace(A, &B));
1719: } else {
1720: *newmat = B;
1721: }
1722: PetscFunctionReturn(PETSC_SUCCESS);
1723: }
1725: PetscErrorCode MatSOR_MPISELL(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1726: {
1727: Mat_MPISELL *mat = (Mat_MPISELL *)matin->data;
1728: Vec bb1 = NULL;
1730: PetscFunctionBegin;
1731: if (flag == SOR_APPLY_UPPER) {
1732: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1733: PetscFunctionReturn(PETSC_SUCCESS);
1734: }
1736: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));
1738: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1739: if (flag & SOR_ZERO_INITIAL_GUESS) {
1740: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1741: its--;
1742: }
1744: while (its--) {
1745: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1746: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1748: /* update rhs: bb1 = bb - B*x */
1749: PetscCall(VecScale(mat->lvec, -1.0));
1750: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1752: /* local sweep */
1753: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx);
1754: }
1755: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1756: if (flag & SOR_ZERO_INITIAL_GUESS) {
1757: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1758: its--;
1759: }
1760: while (its--) {
1761: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1762: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1764: /* update rhs: bb1 = bb - B*x */
1765: PetscCall(VecScale(mat->lvec, -1.0));
1766: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1768: /* local sweep */
1769: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx);
1770: }
1771: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1772: if (flag & SOR_ZERO_INITIAL_GUESS) {
1773: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1774: its--;
1775: }
1776: while (its--) {
1777: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1778: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1780: /* update rhs: bb1 = bb - B*x */
1781: PetscCall(VecScale(mat->lvec, -1.0));
1782: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1784: /* local sweep */
1785: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx);
1786: }
1787: } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");
1789: PetscCall(VecDestroy(&bb1));
1791: matin->factorerrortype = mat->A->factorerrortype;
1792: PetscFunctionReturn(PETSC_SUCCESS);
1793: }
1795: #if defined(PETSC_HAVE_CUDA)
1796: PETSC_INTERN PetscErrorCode MatConvert_MPISELL_MPISELLCUDA(Mat, MatType, MatReuse, Mat *);
1797: #endif
1799: /*MC
1800: MATMPISELL - MATMPISELL = "MPISELL" - A matrix type to be used for parallel sparse matrices.
1802: Options Database Keys:
1803: . -mat_type mpisell - sets the matrix type to `MATMPISELL` during a call to `MatSetFromOptions()`
1805: Level: beginner
1807: .seealso: `Mat`, `MATSELL`, `MATSEQSELL`, `MatCreateSELL()`
1808: M*/
1809: PETSC_EXTERN PetscErrorCode MatCreate_MPISELL(Mat B)
1810: {
1811: Mat_MPISELL *b;
1812: PetscMPIInt size;
1814: PetscFunctionBegin;
1815: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
1816: PetscCall(PetscNew(&b));
1817: B->data = (void *)b;
1818: B->ops[0] = MatOps_Values;
1819: B->assembled = PETSC_FALSE;
1820: B->insertmode = NOT_SET_VALUES;
1821: b->size = size;
1822: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
1823: /* build cache for off array entries formed */
1824: PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));
1826: b->donotstash = PETSC_FALSE;
1827: b->colmap = NULL;
1828: b->garray = NULL;
1829: b->roworiented = PETSC_TRUE;
1831: /* stuff used for matrix vector multiply */
1832: b->lvec = NULL;
1833: b->Mvctx = NULL;
1835: /* stuff for MatGetRow() */
1836: b->rowindices = NULL;
1837: b->rowvalues = NULL;
1838: b->getrowactive = PETSC_FALSE;
1840: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPISELL));
1841: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPISELL));
1842: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPISELL));
1843: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPISELLSetPreallocation_C", MatMPISELLSetPreallocation_MPISELL));
1844: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpiaij_C", MatConvert_MPISELL_MPIAIJ));
1845: #if defined(PETSC_HAVE_CUDA)
1846: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpisellcuda_C", MatConvert_MPISELL_MPISELLCUDA));
1847: #endif
1848: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPISELL));
1849: PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPISELL));
1850: PetscFunctionReturn(PETSC_SUCCESS);
1851: }