Actual source code: mpiaij.c
1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
2: #include <petsc/private/vecimpl.h>
3: #include <petsc/private/sfimpl.h>
4: #include <petsc/private/isimpl.h>
5: #include <petscblaslapack.h>
6: #include <petscsf.h>
7: #include <petsc/private/hashmapi.h>
9: /* defines MatSetValues_MPI_Hash(), MatAssemblyBegin_MPI_Hash(), and MatAssemblyEnd_MPI_Hash() */
10: #define TYPE AIJ
11: #define TYPE_AIJ
12: #include "../src/mat/impls/aij/mpi/mpihashmat.h"
13: #undef TYPE
14: #undef TYPE_AIJ
16: static PetscErrorCode MatReset_MPIAIJ(Mat mat)
17: {
18: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
20: PetscFunctionBegin;
21: PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
22: PetscCall(MatStashDestroy_Private(&mat->stash));
23: PetscCall(VecDestroy(&aij->diag));
24: PetscCall(MatDestroy(&aij->A));
25: PetscCall(MatDestroy(&aij->B));
26: #if defined(PETSC_USE_CTABLE)
27: PetscCall(PetscHMapIDestroy(&aij->colmap));
28: #else
29: PetscCall(PetscFree(aij->colmap));
30: #endif
31: PetscCall(PetscFree(aij->garray));
32: PetscCall(VecDestroy(&aij->lvec));
33: PetscCall(VecScatterDestroy(&aij->Mvctx));
34: PetscCall(PetscFree2(aij->rowvalues, aij->rowindices));
35: PetscCall(PetscFree(aij->ld));
36: PetscFunctionReturn(PETSC_SUCCESS);
37: }
39: static PetscErrorCode MatResetHash_MPIAIJ(Mat mat)
40: {
41: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
42: /* Save the nonzero states of the component matrices because those are what are used to determine
43: the nonzero state of mat */
44: PetscObjectState Astate = aij->A->nonzerostate, Bstate = aij->B->nonzerostate;
46: PetscFunctionBegin;
47: PetscCall(MatReset_MPIAIJ(mat));
48: PetscCall(MatSetUp_MPI_Hash(mat));
49: aij->A->nonzerostate = ++Astate, aij->B->nonzerostate = ++Bstate;
50: PetscFunctionReturn(PETSC_SUCCESS);
51: }
53: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
54: {
55: PetscFunctionBegin;
56: PetscCall(MatReset_MPIAIJ(mat));
58: PetscCall(PetscFree(mat->data));
60: /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
61: PetscCall(PetscObjectCompose((PetscObject)mat, "MatMergeSeqsToMPI", NULL));
63: PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
64: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
65: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
66: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
67: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocation_C", NULL));
68: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatResetPreallocation_C", NULL));
69: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatResetHash_C", NULL));
70: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocationCSR_C", NULL));
71: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
72: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpibaij_C", NULL));
73: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisbaij_C", NULL));
74: #if defined(PETSC_HAVE_CUDA)
75: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcusparse_C", NULL));
76: #endif
77: #if defined(PETSC_HAVE_HIP)
78: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijhipsparse_C", NULL));
79: #endif
80: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
81: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijkokkos_C", NULL));
82: #endif
83: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpidense_C", NULL));
84: #if defined(PETSC_HAVE_ELEMENTAL)
85: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_elemental_C", NULL));
86: #endif
87: #if defined(PETSC_HAVE_SCALAPACK) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
88: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_scalapack_C", NULL));
89: #endif
90: #if defined(PETSC_HAVE_HYPRE)
91: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_hypre_C", NULL));
92: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", NULL));
93: #endif
94: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL));
95: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_is_mpiaij_C", NULL));
96: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_mpiaij_mpiaij_C", NULL));
97: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetUseScalableIncreaseOverlap_C", NULL));
98: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijperm_C", NULL));
99: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijsell_C", NULL));
100: #if defined(PETSC_HAVE_MKL_SPARSE)
101: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijmkl_C", NULL));
102: #endif
103: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcrl_C", NULL));
104: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL));
105: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisell_C", NULL));
106: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetPreallocationCOO_C", NULL));
107: PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetValuesCOO_C", NULL));
108: PetscFunctionReturn(PETSC_SUCCESS);
109: }
111: static PetscErrorCode MatGetRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done)
112: {
113: Mat B;
115: PetscFunctionBegin;
116: PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &B));
117: PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject)B));
118: PetscCall(MatGetRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done));
119: PetscCall(MatDestroy(&B));
120: PetscFunctionReturn(PETSC_SUCCESS);
121: }
123: static PetscErrorCode MatRestoreRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done)
124: {
125: Mat B;
127: PetscFunctionBegin;
128: PetscCall(PetscObjectQuery((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject *)&B));
129: PetscCall(MatRestoreRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done));
130: PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", NULL));
131: PetscFunctionReturn(PETSC_SUCCESS);
132: }
134: /*MC
135: MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
137: This matrix type is identical to` MATSEQAIJ` when constructed with a single process communicator,
138: and `MATMPIAIJ` otherwise. As a result, for single process communicators,
139: `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
140: for communicators controlling multiple processes. It is recommended that you call both of
141: the above preallocation routines for simplicity.
143: Options Database Key:
144: . -mat_type aij - sets the matrix type to `MATAIJ` during a call to `MatSetFromOptions()`
146: Developer Note:
147: Level: beginner
149: Subclasses include `MATAIJCUSPARSE`, `MATAIJPERM`, `MATAIJSELL`, `MATAIJMKL`, `MATAIJCRL`, `MATAIJKOKKOS`,and also automatically switches over to use inodes when
150: enough exist.
152: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateAIJ()`, `MatCreateSeqAIJ()`, `MATBAIJ`
153: M*/
155: /*MC
156: MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
158: This matrix type is identical to `MATSEQAIJCRL` when constructed with a single process communicator,
159: and `MATMPIAIJCRL` otherwise. As a result, for single process communicators,
160: `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
161: for communicators controlling multiple processes. It is recommended that you call both of
162: the above preallocation routines for simplicity.
164: Options Database Key:
165: . -mat_type aijcrl - sets the matrix type to `MATMPIAIJCRL` during a call to `MatSetFromOptions()`
167: Level: beginner
169: .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJCRL`, `MATSEQAIJCRL`, `MATMPIAIJCRL`, `MATSEQAIJ`, `MATMPIAIJ`, `MATAIJ`
170: M*/
172: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A, PetscBool flg)
173: {
174: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
176: PetscFunctionBegin;
177: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) || defined(PETSC_HAVE_VIENNACL)
178: A->boundtocpu = flg;
179: #endif
180: if (a->A) PetscCall(MatBindToCPU(a->A, flg));
181: if (a->B) PetscCall(MatBindToCPU(a->B, flg));
183: /* In addition to binding the diagonal and off-diagonal matrices, bind the local vectors used for matrix-vector products.
184: * This maybe seems a little odd for a MatBindToCPU() call to do, but it makes no sense for the binding of these vectors
185: * to differ from the parent matrix. */
186: if (a->lvec) PetscCall(VecBindToCPU(a->lvec, flg));
187: if (a->diag) PetscCall(VecBindToCPU(a->diag, flg));
188: PetscFunctionReturn(PETSC_SUCCESS);
189: }
191: static PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
192: {
193: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)M->data;
195: PetscFunctionBegin;
196: if (mat->A) {
197: PetscCall(MatSetBlockSizes(mat->A, rbs, cbs));
198: PetscCall(MatSetBlockSizes(mat->B, rbs, 1));
199: }
200: PetscFunctionReturn(PETSC_SUCCESS);
201: }
203: static PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M, IS *keptrows)
204: {
205: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)M->data;
206: Mat_SeqAIJ *a = (Mat_SeqAIJ *)mat->A->data;
207: Mat_SeqAIJ *b = (Mat_SeqAIJ *)mat->B->data;
208: const PetscInt *ia, *ib;
209: const MatScalar *aa, *bb, *aav, *bav;
210: PetscInt na, nb, i, j, *rows, cnt = 0, n0rows;
211: PetscInt m = M->rmap->n, rstart = M->rmap->rstart;
213: PetscFunctionBegin;
214: *keptrows = NULL;
216: ia = a->i;
217: ib = b->i;
218: PetscCall(MatSeqAIJGetArrayRead(mat->A, &aav));
219: PetscCall(MatSeqAIJGetArrayRead(mat->B, &bav));
220: for (i = 0; i < m; i++) {
221: na = ia[i + 1] - ia[i];
222: nb = ib[i + 1] - ib[i];
223: if (!na && !nb) {
224: cnt++;
225: goto ok1;
226: }
227: aa = aav + ia[i];
228: for (j = 0; j < na; j++) {
229: if (aa[j] != 0.0) goto ok1;
230: }
231: bb = PetscSafePointerPlusOffset(bav, ib[i]);
232: for (j = 0; j < nb; j++) {
233: if (bb[j] != 0.0) goto ok1;
234: }
235: cnt++;
236: ok1:;
237: }
238: PetscCallMPI(MPIU_Allreduce(&cnt, &n0rows, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)M)));
239: if (!n0rows) {
240: PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav));
241: PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav));
242: PetscFunctionReturn(PETSC_SUCCESS);
243: }
244: PetscCall(PetscMalloc1(M->rmap->n - cnt, &rows));
245: cnt = 0;
246: for (i = 0; i < m; i++) {
247: na = ia[i + 1] - ia[i];
248: nb = ib[i + 1] - ib[i];
249: if (!na && !nb) continue;
250: aa = aav + ia[i];
251: for (j = 0; j < na; j++) {
252: if (aa[j] != 0.0) {
253: rows[cnt++] = rstart + i;
254: goto ok2;
255: }
256: }
257: bb = PetscSafePointerPlusOffset(bav, ib[i]);
258: for (j = 0; j < nb; j++) {
259: if (bb[j] != 0.0) {
260: rows[cnt++] = rstart + i;
261: goto ok2;
262: }
263: }
264: ok2:;
265: }
266: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), cnt, rows, PETSC_OWN_POINTER, keptrows));
267: PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav));
268: PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav));
269: PetscFunctionReturn(PETSC_SUCCESS);
270: }
272: static PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y, Vec D, InsertMode is)
273: {
274: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)Y->data;
275: PetscBool cong;
277: PetscFunctionBegin;
278: PetscCall(MatHasCongruentLayouts(Y, &cong));
279: if (Y->assembled && cong) PetscCall(MatDiagonalSet(aij->A, D, is));
280: else PetscCall(MatDiagonalSet_Default(Y, D, is));
281: PetscFunctionReturn(PETSC_SUCCESS);
282: }
284: static PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M, IS *zrows)
285: {
286: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)M->data;
287: PetscInt i, rstart, nrows, *rows;
289: PetscFunctionBegin;
290: *zrows = NULL;
291: PetscCall(MatFindZeroDiagonals_SeqAIJ_Private(aij->A, &nrows, &rows));
292: PetscCall(MatGetOwnershipRange(M, &rstart, NULL));
293: for (i = 0; i < nrows; i++) rows[i] += rstart;
294: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), nrows, rows, PETSC_OWN_POINTER, zrows));
295: PetscFunctionReturn(PETSC_SUCCESS);
296: }
298: static PetscErrorCode MatGetColumnReductions_MPIAIJ(Mat A, PetscInt type, PetscReal *reductions)
299: {
300: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data;
301: PetscInt i, m, n, *garray = aij->garray;
302: Mat_SeqAIJ *a_aij = (Mat_SeqAIJ *)aij->A->data;
303: Mat_SeqAIJ *b_aij = (Mat_SeqAIJ *)aij->B->data;
304: PetscReal *work;
305: const PetscScalar *dummy;
307: PetscFunctionBegin;
308: PetscCall(MatGetSize(A, &m, &n));
309: PetscCall(PetscCalloc1(n, &work));
310: PetscCall(MatSeqAIJGetArrayRead(aij->A, &dummy));
311: PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &dummy));
312: PetscCall(MatSeqAIJGetArrayRead(aij->B, &dummy));
313: PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &dummy));
314: if (type == NORM_2) {
315: for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i] * a_aij->a[i]);
316: for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i] * b_aij->a[i]);
317: } else if (type == NORM_1) {
318: for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
319: for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
320: } else if (type == NORM_INFINITY) {
321: for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
322: for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]), work[garray[b_aij->j[i]]]);
323: } else if (type == REDUCTION_SUM_REALPART || type == REDUCTION_MEAN_REALPART) {
324: for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscRealPart(a_aij->a[i]);
325: for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscRealPart(b_aij->a[i]);
326: } else if (type == REDUCTION_SUM_IMAGINARYPART || type == REDUCTION_MEAN_IMAGINARYPART) {
327: for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscImaginaryPart(a_aij->a[i]);
328: for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscImaginaryPart(b_aij->a[i]);
329: } else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Unknown reduction type");
330: if (type == NORM_INFINITY) {
331: PetscCallMPI(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)A)));
332: } else {
333: PetscCallMPI(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)A)));
334: }
335: PetscCall(PetscFree(work));
336: if (type == NORM_2) {
337: for (i = 0; i < n; i++) reductions[i] = PetscSqrtReal(reductions[i]);
338: } else if (type == REDUCTION_MEAN_REALPART || type == REDUCTION_MEAN_IMAGINARYPART) {
339: for (i = 0; i < n; i++) reductions[i] /= m;
340: }
341: PetscFunctionReturn(PETSC_SUCCESS);
342: }
344: static PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A, IS *is)
345: {
346: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
347: IS sis, gis;
348: const PetscInt *isis, *igis;
349: PetscInt n, *iis, nsis, ngis, rstart, i;
351: PetscFunctionBegin;
352: PetscCall(MatFindOffBlockDiagonalEntries(a->A, &sis));
353: PetscCall(MatFindNonzeroRows(a->B, &gis));
354: PetscCall(ISGetSize(gis, &ngis));
355: PetscCall(ISGetSize(sis, &nsis));
356: PetscCall(ISGetIndices(sis, &isis));
357: PetscCall(ISGetIndices(gis, &igis));
359: PetscCall(PetscMalloc1(ngis + nsis, &iis));
360: PetscCall(PetscArraycpy(iis, igis, ngis));
361: PetscCall(PetscArraycpy(iis + ngis, isis, nsis));
362: n = ngis + nsis;
363: PetscCall(PetscSortRemoveDupsInt(&n, iis));
364: PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
365: for (i = 0; i < n; i++) iis[i] += rstart;
366: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)A), n, iis, PETSC_OWN_POINTER, is));
368: PetscCall(ISRestoreIndices(sis, &isis));
369: PetscCall(ISRestoreIndices(gis, &igis));
370: PetscCall(ISDestroy(&sis));
371: PetscCall(ISDestroy(&gis));
372: PetscFunctionReturn(PETSC_SUCCESS);
373: }
375: /*
376: Local utility routine that creates a mapping from the global column
377: number to the local number in the off-diagonal part of the local
378: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
379: a slightly higher hash table cost; without it it is not scalable (each processor
380: has an order N integer array but is fast to access.
381: */
382: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
383: {
384: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
385: PetscInt n = aij->B->cmap->n, i;
387: PetscFunctionBegin;
388: PetscCheck(!n || aij->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPIAIJ Matrix was assembled but is missing garray");
389: #if defined(PETSC_USE_CTABLE)
390: PetscCall(PetscHMapICreateWithSize(n, &aij->colmap));
391: for (i = 0; i < n; i++) PetscCall(PetscHMapISet(aij->colmap, aij->garray[i] + 1, i + 1));
392: #else
393: PetscCall(PetscCalloc1(mat->cmap->N + 1, &aij->colmap));
394: for (i = 0; i < n; i++) aij->colmap[aij->garray[i]] = i + 1;
395: #endif
396: PetscFunctionReturn(PETSC_SUCCESS);
397: }
399: #define MatSetValues_SeqAIJ_A_Private(row, col, value, addv, orow, ocol) \
400: do { \
401: if (col <= lastcol1) low1 = 0; \
402: else high1 = nrow1; \
403: lastcol1 = col; \
404: while (high1 - low1 > 5) { \
405: t = (low1 + high1) / 2; \
406: if (rp1[t] > col) high1 = t; \
407: else low1 = t; \
408: } \
409: for (_i = low1; _i < high1; _i++) { \
410: if (rp1[_i] > col) break; \
411: if (rp1[_i] == col) { \
412: if (addv == ADD_VALUES) { \
413: ap1[_i] += value; \
414: /* Not sure LogFlops will slow down the code or not */ \
415: (void)PetscLogFlops(1.0); \
416: } else ap1[_i] = value; \
417: goto a_noinsert; \
418: } \
419: } \
420: if (value == 0.0 && ignorezeroentries && row != col) { \
421: low1 = 0; \
422: high1 = nrow1; \
423: goto a_noinsert; \
424: } \
425: if (nonew == 1) { \
426: low1 = 0; \
427: high1 = nrow1; \
428: goto a_noinsert; \
429: } \
430: PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
431: MatSeqXAIJReallocateAIJ(A, am, 1, nrow1, row, col, rmax1, aa, ai, aj, rp1, ap1, aimax, nonew, MatScalar); \
432: N = nrow1++ - 1; \
433: a->nz++; \
434: high1++; \
435: /* shift up all the later entries in this row */ \
436: PetscCall(PetscArraymove(rp1 + _i + 1, rp1 + _i, N - _i + 1)); \
437: PetscCall(PetscArraymove(ap1 + _i + 1, ap1 + _i, N - _i + 1)); \
438: rp1[_i] = col; \
439: ap1[_i] = value; \
440: a_noinsert:; \
441: ailen[row] = nrow1; \
442: } while (0)
444: #define MatSetValues_SeqAIJ_B_Private(row, col, value, addv, orow, ocol) \
445: do { \
446: if (col <= lastcol2) low2 = 0; \
447: else high2 = nrow2; \
448: lastcol2 = col; \
449: while (high2 - low2 > 5) { \
450: t = (low2 + high2) / 2; \
451: if (rp2[t] > col) high2 = t; \
452: else low2 = t; \
453: } \
454: for (_i = low2; _i < high2; _i++) { \
455: if (rp2[_i] > col) break; \
456: if (rp2[_i] == col) { \
457: if (addv == ADD_VALUES) { \
458: ap2[_i] += value; \
459: (void)PetscLogFlops(1.0); \
460: } else ap2[_i] = value; \
461: goto b_noinsert; \
462: } \
463: } \
464: if (value == 0.0 && ignorezeroentries) { \
465: low2 = 0; \
466: high2 = nrow2; \
467: goto b_noinsert; \
468: } \
469: if (nonew == 1) { \
470: low2 = 0; \
471: high2 = nrow2; \
472: goto b_noinsert; \
473: } \
474: PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
475: MatSeqXAIJReallocateAIJ(B, bm, 1, nrow2, row, col, rmax2, ba, bi, bj, rp2, ap2, bimax, nonew, MatScalar); \
476: N = nrow2++ - 1; \
477: b->nz++; \
478: high2++; \
479: /* shift up all the later entries in this row */ \
480: PetscCall(PetscArraymove(rp2 + _i + 1, rp2 + _i, N - _i + 1)); \
481: PetscCall(PetscArraymove(ap2 + _i + 1, ap2 + _i, N - _i + 1)); \
482: rp2[_i] = col; \
483: ap2[_i] = value; \
484: b_noinsert:; \
485: bilen[row] = nrow2; \
486: } while (0)
488: static PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A, PetscInt row, const PetscScalar v[])
489: {
490: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data;
491: Mat_SeqAIJ *a = (Mat_SeqAIJ *)mat->A->data, *b = (Mat_SeqAIJ *)mat->B->data;
492: PetscInt l, *garray = mat->garray, diag;
493: PetscScalar *aa, *ba;
495: PetscFunctionBegin;
496: /* code only works for square matrices A */
498: /* find size of row to the left of the diagonal part */
499: PetscCall(MatGetOwnershipRange(A, &diag, NULL));
500: row = row - diag;
501: for (l = 0; l < b->i[row + 1] - b->i[row]; l++) {
502: if (garray[b->j[b->i[row] + l]] > diag) break;
503: }
504: if (l) {
505: PetscCall(MatSeqAIJGetArray(mat->B, &ba));
506: PetscCall(PetscArraycpy(ba + b->i[row], v, l));
507: PetscCall(MatSeqAIJRestoreArray(mat->B, &ba));
508: }
510: /* diagonal part */
511: if (a->i[row + 1] - a->i[row]) {
512: PetscCall(MatSeqAIJGetArray(mat->A, &aa));
513: PetscCall(PetscArraycpy(aa + a->i[row], v + l, a->i[row + 1] - a->i[row]));
514: PetscCall(MatSeqAIJRestoreArray(mat->A, &aa));
515: }
517: /* right of diagonal part */
518: if (b->i[row + 1] - b->i[row] - l) {
519: PetscCall(MatSeqAIJGetArray(mat->B, &ba));
520: PetscCall(PetscArraycpy(ba + b->i[row] + l, v + l + a->i[row + 1] - a->i[row], b->i[row + 1] - b->i[row] - l));
521: PetscCall(MatSeqAIJRestoreArray(mat->B, &ba));
522: }
523: PetscFunctionReturn(PETSC_SUCCESS);
524: }
526: PetscErrorCode MatSetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
527: {
528: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
529: PetscScalar value = 0.0;
530: PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
531: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
532: PetscBool roworiented = aij->roworiented;
534: /* Some Variables required in the macro */
535: Mat A = aij->A;
536: Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
537: PetscInt *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j;
538: PetscBool ignorezeroentries = a->ignorezeroentries;
539: Mat B = aij->B;
540: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
541: PetscInt *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n;
542: MatScalar *aa, *ba;
543: PetscInt *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2;
544: PetscInt nonew;
545: MatScalar *ap1, *ap2;
547: PetscFunctionBegin;
548: PetscCall(MatSeqAIJGetArray(A, &aa));
549: PetscCall(MatSeqAIJGetArray(B, &ba));
550: for (i = 0; i < m; i++) {
551: if (im[i] < 0) continue;
552: PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
553: if (im[i] >= rstart && im[i] < rend) {
554: row = im[i] - rstart;
555: lastcol1 = -1;
556: rp1 = PetscSafePointerPlusOffset(aj, ai[row]);
557: ap1 = PetscSafePointerPlusOffset(aa, ai[row]);
558: rmax1 = aimax[row];
559: nrow1 = ailen[row];
560: low1 = 0;
561: high1 = nrow1;
562: lastcol2 = -1;
563: rp2 = PetscSafePointerPlusOffset(bj, bi[row]);
564: ap2 = PetscSafePointerPlusOffset(ba, bi[row]);
565: rmax2 = bimax[row];
566: nrow2 = bilen[row];
567: low2 = 0;
568: high2 = nrow2;
570: for (j = 0; j < n; j++) {
571: if (v) value = roworiented ? v[i * n + j] : v[i + j * m];
572: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
573: if (in[j] >= cstart && in[j] < cend) {
574: col = in[j] - cstart;
575: nonew = a->nonew;
576: MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]);
577: } else if (in[j] < 0) {
578: continue;
579: } else {
580: PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
581: if (mat->was_assembled) {
582: if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
583: #if defined(PETSC_USE_CTABLE)
584: PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col)); /* map global col ids to local ones */
585: col--;
586: #else
587: col = aij->colmap[in[j]] - 1;
588: #endif
589: if (col < 0 && !((Mat_SeqAIJ *)aij->B->data)->nonew) { /* col < 0 means in[j] is a new col for B */
590: PetscCall(MatDisAssemble_MPIAIJ(mat, PETSC_FALSE)); /* Change aij->B from reduced/local format to expanded/global format */
591: col = in[j];
592: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
593: B = aij->B;
594: b = (Mat_SeqAIJ *)B->data;
595: bimax = b->imax;
596: bi = b->i;
597: bilen = b->ilen;
598: bj = b->j;
599: ba = b->a;
600: rp2 = PetscSafePointerPlusOffset(bj, bi[row]);
601: ap2 = PetscSafePointerPlusOffset(ba, bi[row]);
602: rmax2 = bimax[row];
603: nrow2 = bilen[row];
604: low2 = 0;
605: high2 = nrow2;
606: bm = aij->B->rmap->n;
607: ba = b->a;
608: } else if (col < 0 && !(ignorezeroentries && value == 0.0)) {
609: PetscCheck(1 == ((Mat_SeqAIJ *)aij->B->data)->nonew, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
610: PetscCall(PetscInfo(mat, "Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%" PetscInt_FMT ",%" PetscInt_FMT ")\n", (double)PetscRealPart(value), im[i], in[j]));
611: }
612: } else col = in[j];
613: nonew = b->nonew;
614: MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]);
615: }
616: }
617: } else {
618: PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
619: if (!aij->donotstash) {
620: mat->assembled = PETSC_FALSE;
621: if (roworiented) {
622: PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, PetscSafePointerPlusOffset(v, i * n), (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
623: } else {
624: PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, PetscSafePointerPlusOffset(v, i), m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
625: }
626: }
627: }
628: }
629: PetscCall(MatSeqAIJRestoreArray(A, &aa)); /* aa, bb might have been free'd due to reallocation above. But we don't access them here */
630: PetscCall(MatSeqAIJRestoreArray(B, &ba));
631: PetscFunctionReturn(PETSC_SUCCESS);
632: }
634: /*
635: This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
636: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
637: No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
638: */
639: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[])
640: {
641: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
642: Mat A = aij->A; /* diagonal part of the matrix */
643: Mat B = aij->B; /* off-diagonal part of the matrix */
644: Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
645: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
646: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, col;
647: PetscInt *ailen = a->ilen, *aj = a->j;
648: PetscInt *bilen = b->ilen, *bj = b->j;
649: PetscInt am = aij->A->rmap->n, j;
650: PetscInt diag_so_far = 0, dnz;
651: PetscInt offd_so_far = 0, onz;
653: PetscFunctionBegin;
654: /* Iterate over all rows of the matrix */
655: for (j = 0; j < am; j++) {
656: dnz = onz = 0;
657: /* Iterate over all non-zero columns of the current row */
658: for (col = mat_i[j]; col < mat_i[j + 1]; col++) {
659: /* If column is in the diagonal */
660: if (mat_j[col] >= cstart && mat_j[col] < cend) {
661: aj[diag_so_far++] = mat_j[col] - cstart;
662: dnz++;
663: } else { /* off-diagonal entries */
664: bj[offd_so_far++] = mat_j[col];
665: onz++;
666: }
667: }
668: ailen[j] = dnz;
669: bilen[j] = onz;
670: }
671: PetscFunctionReturn(PETSC_SUCCESS);
672: }
674: /*
675: This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
676: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
677: No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
678: Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
679: would not be true and the more complex MatSetValues_MPIAIJ has to be used.
680: */
681: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[], const PetscScalar mat_a[])
682: {
683: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
684: Mat A = aij->A; /* diagonal part of the matrix */
685: Mat B = aij->B; /* off-diagonal part of the matrix */
686: Mat_SeqAIJ *aijd = (Mat_SeqAIJ *)aij->A->data, *aijo = (Mat_SeqAIJ *)aij->B->data;
687: Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
688: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
689: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend;
690: PetscInt *ailen = a->ilen, *aj = a->j;
691: PetscInt *bilen = b->ilen, *bj = b->j;
692: PetscInt am = aij->A->rmap->n, j;
693: PetscInt *full_diag_i = aijd->i, *full_offd_i = aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
694: PetscInt col, dnz_row, onz_row, rowstart_diag, rowstart_offd;
695: PetscScalar *aa = a->a, *ba = b->a;
697: PetscFunctionBegin;
698: /* Iterate over all rows of the matrix */
699: for (j = 0; j < am; j++) {
700: dnz_row = onz_row = 0;
701: rowstart_offd = full_offd_i[j];
702: rowstart_diag = full_diag_i[j];
703: /* Iterate over all non-zero columns of the current row */
704: for (col = mat_i[j]; col < mat_i[j + 1]; col++) {
705: /* If column is in the diagonal */
706: if (mat_j[col] >= cstart && mat_j[col] < cend) {
707: aj[rowstart_diag + dnz_row] = mat_j[col] - cstart;
708: aa[rowstart_diag + dnz_row] = mat_a[col];
709: dnz_row++;
710: } else { /* off-diagonal entries */
711: bj[rowstart_offd + onz_row] = mat_j[col];
712: ba[rowstart_offd + onz_row] = mat_a[col];
713: onz_row++;
714: }
715: }
716: ailen[j] = dnz_row;
717: bilen[j] = onz_row;
718: }
719: PetscFunctionReturn(PETSC_SUCCESS);
720: }
722: static PetscErrorCode MatGetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
723: {
724: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
725: PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
726: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
728: PetscFunctionBegin;
729: for (i = 0; i < m; i++) {
730: if (idxm[i] < 0) continue; /* negative row */
731: PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
732: PetscCheck(idxm[i] >= rstart && idxm[i] < rend, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported, row requested %" PetscInt_FMT " range [%" PetscInt_FMT " %" PetscInt_FMT ")", idxm[i], rstart, rend);
733: row = idxm[i] - rstart;
734: for (j = 0; j < n; j++) {
735: if (idxn[j] < 0) continue; /* negative column */
736: PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
737: if (idxn[j] >= cstart && idxn[j] < cend) {
738: col = idxn[j] - cstart;
739: PetscCall(MatGetValues(aij->A, 1, &row, 1, &col, v + i * n + j));
740: } else {
741: if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
742: #if defined(PETSC_USE_CTABLE)
743: PetscCall(PetscHMapIGetWithDefault(aij->colmap, idxn[j] + 1, 0, &col));
744: col--;
745: #else
746: col = aij->colmap[idxn[j]] - 1;
747: #endif
748: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v + i * n + j) = 0.0;
749: else PetscCall(MatGetValues(aij->B, 1, &row, 1, &col, v + i * n + j));
750: }
751: }
752: }
753: PetscFunctionReturn(PETSC_SUCCESS);
754: }
756: static PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat, MatAssemblyType mode)
757: {
758: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
759: PetscInt nstash, reallocs;
761: PetscFunctionBegin;
762: if (aij->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);
764: PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
765: PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
766: PetscCall(PetscInfo(mat, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
767: PetscFunctionReturn(PETSC_SUCCESS);
768: }
770: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat, MatAssemblyType mode)
771: {
772: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
773: PetscMPIInt n;
774: PetscInt i, j, rstart, ncols, flg;
775: PetscInt *row, *col;
776: PetscBool all_assembled;
777: PetscScalar *val;
779: /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
781: PetscFunctionBegin;
782: if (!aij->donotstash && !mat->nooffprocentries) {
783: while (1) {
784: PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
785: if (!flg) break;
787: for (i = 0; i < n;) {
788: /* Now identify the consecutive vals belonging to the same row */
789: for (j = i, rstart = row[j]; j < n; j++) {
790: if (row[j] != rstart) break;
791: }
792: if (j < n) ncols = j - i;
793: else ncols = n - i;
794: /* Now assemble all these values with a single function call */
795: PetscCall(MatSetValues_MPIAIJ(mat, 1, row + i, ncols, col + i, val + i, mat->insertmode));
796: i = j;
797: }
798: }
799: PetscCall(MatStashScatterEnd_Private(&mat->stash));
800: }
801: #if defined(PETSC_HAVE_DEVICE)
802: if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
803: /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
804: if (mat->boundtocpu) {
805: PetscCall(MatBindToCPU(aij->A, PETSC_TRUE));
806: PetscCall(MatBindToCPU(aij->B, PETSC_TRUE));
807: }
808: #endif
809: PetscCall(MatAssemblyBegin(aij->A, mode));
810: PetscCall(MatAssemblyEnd(aij->A, mode));
812: /* determine if any process has disassembled, if so we must
813: also disassemble ourself, in order that we may reassemble. */
814: /*
815: if nonzero structure of submatrix B cannot change then we know that
816: no process disassembled thus we can skip this stuff
817: */
818: if (!((Mat_SeqAIJ *)aij->B->data)->nonew) {
819: PetscCallMPI(MPIU_Allreduce(&mat->was_assembled, &all_assembled, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
820: if (mat->was_assembled && !all_assembled) { /* mat on this rank has reduced off-diag B with local col ids, but globally it does not */
821: PetscCall(MatDisAssemble_MPIAIJ(mat, PETSC_FALSE));
822: }
823: }
824: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPIAIJ(mat));
825: PetscCall(MatSetOption(aij->B, MAT_USE_INODES, PETSC_FALSE));
826: #if defined(PETSC_HAVE_DEVICE)
827: if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
828: #endif
829: PetscCall(MatAssemblyBegin(aij->B, mode));
830: PetscCall(MatAssemblyEnd(aij->B, mode));
832: PetscCall(PetscFree2(aij->rowvalues, aij->rowindices));
834: aij->rowvalues = NULL;
836: PetscCall(VecDestroy(&aij->diag));
838: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
839: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ *)aij->A->data)->nonew) {
840: PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
841: PetscCallMPI(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
842: }
843: #if defined(PETSC_HAVE_DEVICE)
844: mat->offloadmask = PETSC_OFFLOAD_BOTH;
845: #endif
846: PetscFunctionReturn(PETSC_SUCCESS);
847: }
849: static PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
850: {
851: Mat_MPIAIJ *l = (Mat_MPIAIJ *)A->data;
853: PetscFunctionBegin;
854: PetscCall(MatZeroEntries(l->A));
855: PetscCall(MatZeroEntries(l->B));
856: PetscFunctionReturn(PETSC_SUCCESS);
857: }
859: static PetscErrorCode MatZeroRows_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
860: {
861: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data;
862: PetscInt *lrows;
863: PetscInt r, len;
864: PetscBool cong;
866: PetscFunctionBegin;
867: /* get locally owned rows */
868: PetscCall(MatZeroRowsMapLocal_Private(A, N, rows, &len, &lrows));
869: PetscCall(MatHasCongruentLayouts(A, &cong));
870: /* fix right-hand side if needed */
871: if (x && b) {
872: const PetscScalar *xx;
873: PetscScalar *bb;
875: PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout");
876: PetscCall(VecGetArrayRead(x, &xx));
877: PetscCall(VecGetArray(b, &bb));
878: for (r = 0; r < len; ++r) bb[lrows[r]] = diag * xx[lrows[r]];
879: PetscCall(VecRestoreArrayRead(x, &xx));
880: PetscCall(VecRestoreArray(b, &bb));
881: }
883: if (diag != 0.0 && cong) {
884: PetscCall(MatZeroRows(mat->A, len, lrows, diag, NULL, NULL));
885: PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
886: } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
887: Mat_SeqAIJ *aijA = (Mat_SeqAIJ *)mat->A->data;
888: Mat_SeqAIJ *aijB = (Mat_SeqAIJ *)mat->B->data;
889: PetscInt nnwA, nnwB;
890: PetscBool nnzA, nnzB;
892: nnwA = aijA->nonew;
893: nnwB = aijB->nonew;
894: nnzA = aijA->keepnonzeropattern;
895: nnzB = aijB->keepnonzeropattern;
896: if (!nnzA) {
897: PetscCall(PetscInfo(mat->A, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n"));
898: aijA->nonew = 0;
899: }
900: if (!nnzB) {
901: PetscCall(PetscInfo(mat->B, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n"));
902: aijB->nonew = 0;
903: }
904: /* Must zero here before the next loop */
905: PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL));
906: PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
907: for (r = 0; r < len; ++r) {
908: const PetscInt row = lrows[r] + A->rmap->rstart;
909: if (row >= A->cmap->N) continue;
910: PetscCall(MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES));
911: }
912: aijA->nonew = nnwA;
913: aijB->nonew = nnwB;
914: } else {
915: PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL));
916: PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
917: }
918: PetscCall(PetscFree(lrows));
919: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
920: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
922: /* only change matrix nonzero state if pattern was allowed to be changed */
923: if (!((Mat_SeqAIJ *)mat->A->data)->keepnonzeropattern || !((Mat_SeqAIJ *)mat->A->data)->nonew) {
924: PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
925: PetscCallMPI(MPIU_Allreduce(&state, &A->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)A)));
926: }
927: PetscFunctionReturn(PETSC_SUCCESS);
928: }
930: static PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
931: {
932: Mat_MPIAIJ *l = (Mat_MPIAIJ *)A->data;
933: PetscInt n = A->rmap->n;
934: PetscInt i, j, r, m, len = 0;
935: PetscInt *lrows, *owners = A->rmap->range;
936: PetscMPIInt p = 0;
937: PetscSFNode *rrows;
938: PetscSF sf;
939: const PetscScalar *xx;
940: PetscScalar *bb, *mask, *aij_a;
941: Vec xmask, lmask;
942: Mat_SeqAIJ *aij = (Mat_SeqAIJ *)l->B->data;
943: const PetscInt *aj, *ii, *ridx;
944: PetscScalar *aa;
946: PetscFunctionBegin;
947: /* Create SF where leaves are input rows and roots are owned rows */
948: PetscCall(PetscMalloc1(n, &lrows));
949: for (r = 0; r < n; ++r) lrows[r] = -1;
950: PetscCall(PetscMalloc1(N, &rrows));
951: for (r = 0; r < N; ++r) {
952: const PetscInt idx = rows[r];
953: PetscCheck(idx >= 0 && A->rmap->N > idx, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row %" PetscInt_FMT " out of range [0,%" PetscInt_FMT ")", idx, A->rmap->N);
954: if (idx < owners[p] || owners[p + 1] <= idx) { /* short-circuit the search if the last p owns this row too */
955: PetscCall(PetscLayoutFindOwner(A->rmap, idx, &p));
956: }
957: rrows[r].rank = p;
958: rrows[r].index = rows[r] - owners[p];
959: }
960: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
961: PetscCall(PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER));
962: /* Collect flags for rows to be zeroed */
963: PetscCall(PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR));
964: PetscCall(PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR));
965: PetscCall(PetscSFDestroy(&sf));
966: /* Compress and put in row numbers */
967: for (r = 0; r < n; ++r)
968: if (lrows[r] >= 0) lrows[len++] = r;
969: /* zero diagonal part of matrix */
970: PetscCall(MatZeroRowsColumns(l->A, len, lrows, diag, x, b));
971: /* handle off-diagonal part of matrix */
972: PetscCall(MatCreateVecs(A, &xmask, NULL));
973: PetscCall(VecDuplicate(l->lvec, &lmask));
974: PetscCall(VecGetArray(xmask, &bb));
975: for (i = 0; i < len; i++) bb[lrows[i]] = 1;
976: PetscCall(VecRestoreArray(xmask, &bb));
977: PetscCall(VecScatterBegin(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD));
978: PetscCall(VecScatterEnd(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD));
979: PetscCall(VecDestroy(&xmask));
980: if (x && b) { /* this code is buggy when the row and column layout don't match */
981: PetscBool cong;
983: PetscCall(MatHasCongruentLayouts(A, &cong));
984: PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout");
985: PetscCall(VecScatterBegin(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD));
986: PetscCall(VecScatterEnd(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD));
987: PetscCall(VecGetArrayRead(l->lvec, &xx));
988: PetscCall(VecGetArray(b, &bb));
989: }
990: PetscCall(VecGetArray(lmask, &mask));
991: /* remove zeroed rows of off-diagonal matrix */
992: PetscCall(MatSeqAIJGetArray(l->B, &aij_a));
993: ii = aij->i;
994: for (i = 0; i < len; i++) PetscCall(PetscArrayzero(PetscSafePointerPlusOffset(aij_a, ii[lrows[i]]), ii[lrows[i] + 1] - ii[lrows[i]]));
995: /* loop over all elements of off process part of matrix zeroing removed columns*/
996: if (aij->compressedrow.use) {
997: m = aij->compressedrow.nrows;
998: ii = aij->compressedrow.i;
999: ridx = aij->compressedrow.rindex;
1000: for (i = 0; i < m; i++) {
1001: n = ii[i + 1] - ii[i];
1002: aj = aij->j + ii[i];
1003: aa = aij_a + ii[i];
1005: for (j = 0; j < n; j++) {
1006: if (PetscAbsScalar(mask[*aj])) {
1007: if (b) bb[*ridx] -= *aa * xx[*aj];
1008: *aa = 0.0;
1009: }
1010: aa++;
1011: aj++;
1012: }
1013: ridx++;
1014: }
1015: } else { /* do not use compressed row format */
1016: m = l->B->rmap->n;
1017: for (i = 0; i < m; i++) {
1018: n = ii[i + 1] - ii[i];
1019: aj = aij->j + ii[i];
1020: aa = aij_a + ii[i];
1021: for (j = 0; j < n; j++) {
1022: if (PetscAbsScalar(mask[*aj])) {
1023: if (b) bb[i] -= *aa * xx[*aj];
1024: *aa = 0.0;
1025: }
1026: aa++;
1027: aj++;
1028: }
1029: }
1030: }
1031: if (x && b) {
1032: PetscCall(VecRestoreArray(b, &bb));
1033: PetscCall(VecRestoreArrayRead(l->lvec, &xx));
1034: }
1035: PetscCall(MatSeqAIJRestoreArray(l->B, &aij_a));
1036: PetscCall(VecRestoreArray(lmask, &mask));
1037: PetscCall(VecDestroy(&lmask));
1038: PetscCall(PetscFree(lrows));
1040: /* only change matrix nonzero state if pattern was allowed to be changed */
1041: if (!((Mat_SeqAIJ *)l->A->data)->nonew) {
1042: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1043: PetscCallMPI(MPIU_Allreduce(&state, &A->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)A)));
1044: }
1045: PetscFunctionReturn(PETSC_SUCCESS);
1046: }
1048: static PetscErrorCode MatMult_MPIAIJ(Mat A, Vec xx, Vec yy)
1049: {
1050: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1051: PetscInt nt;
1052: VecScatter Mvctx = a->Mvctx;
1054: PetscFunctionBegin;
1055: PetscCall(VecGetLocalSize(xx, &nt));
1056: PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
1057: PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1058: PetscUseTypeMethod(a->A, mult, xx, yy);
1059: PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1060: PetscUseTypeMethod(a->B, multadd, a->lvec, yy, yy);
1061: PetscFunctionReturn(PETSC_SUCCESS);
1062: }
1064: static PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A, Vec bb, Vec xx)
1065: {
1066: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1068: PetscFunctionBegin;
1069: PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
1070: PetscFunctionReturn(PETSC_SUCCESS);
1071: }
1073: static PetscErrorCode MatMultAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz)
1074: {
1075: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1076: VecScatter Mvctx = a->Mvctx;
1078: PetscFunctionBegin;
1079: PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1080: PetscUseTypeMethod(a->A, multadd, xx, yy, zz);
1081: PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1082: PetscUseTypeMethod(a->B, multadd, a->lvec, zz, zz);
1083: PetscFunctionReturn(PETSC_SUCCESS);
1084: }
1086: static PetscErrorCode MatMultTranspose_MPIAIJ(Mat A, Vec xx, Vec yy)
1087: {
1088: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1090: PetscFunctionBegin;
1091: /* do nondiagonal part */
1092: PetscUseTypeMethod(a->B, multtranspose, xx, a->lvec);
1093: /* do local part */
1094: PetscUseTypeMethod(a->A, multtranspose, xx, yy);
1095: /* add partial results together */
1096: PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
1097: PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
1098: PetscFunctionReturn(PETSC_SUCCESS);
1099: }
1101: static PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
1102: {
1103: MPI_Comm comm;
1104: Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)Amat->data, *Bij = (Mat_MPIAIJ *)Bmat->data;
1105: Mat Adia = Aij->A, Bdia = Bij->A, Aoff, Boff, *Aoffs, *Boffs;
1106: IS Me, Notme;
1107: PetscInt M, N, first, last, *notme, i;
1108: PetscBool lf;
1109: PetscMPIInt size;
1111: PetscFunctionBegin;
1112: /* Easy test: symmetric diagonal block */
1113: PetscCall(MatIsTranspose(Adia, Bdia, tol, &lf));
1114: PetscCallMPI(MPIU_Allreduce(&lf, f, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)Amat)));
1115: if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
1116: PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
1117: PetscCallMPI(MPI_Comm_size(comm, &size));
1118: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1120: /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1121: PetscCall(MatGetSize(Amat, &M, &N));
1122: PetscCall(MatGetOwnershipRange(Amat, &first, &last));
1123: PetscCall(PetscMalloc1(N - last + first, ¬me));
1124: for (i = 0; i < first; i++) notme[i] = i;
1125: for (i = last; i < M; i++) notme[i - last + first] = i;
1126: PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
1127: PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
1128: PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
1129: Aoff = Aoffs[0];
1130: PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
1131: Boff = Boffs[0];
1132: PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
1133: PetscCall(MatDestroyMatrices(1, &Aoffs));
1134: PetscCall(MatDestroyMatrices(1, &Boffs));
1135: PetscCall(ISDestroy(&Me));
1136: PetscCall(ISDestroy(&Notme));
1137: PetscCall(PetscFree(notme));
1138: PetscFunctionReturn(PETSC_SUCCESS);
1139: }
1141: static PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz)
1142: {
1143: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1145: PetscFunctionBegin;
1146: /* do nondiagonal part */
1147: PetscUseTypeMethod(a->B, multtranspose, xx, a->lvec);
1148: /* do local part */
1149: PetscUseTypeMethod(a->A, multtransposeadd, xx, yy, zz);
1150: /* add partial results together */
1151: PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
1152: PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
1153: PetscFunctionReturn(PETSC_SUCCESS);
1154: }
1156: /*
1157: This only works correctly for square matrices where the subblock A->A is the
1158: diagonal block
1159: */
1160: static PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A, Vec v)
1161: {
1162: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1164: PetscFunctionBegin;
1165: PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
1166: PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
1167: PetscCall(MatGetDiagonal(a->A, v));
1168: PetscFunctionReturn(PETSC_SUCCESS);
1169: }
1171: static PetscErrorCode MatScale_MPIAIJ(Mat A, PetscScalar aa)
1172: {
1173: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1175: PetscFunctionBegin;
1176: PetscCall(MatScale(a->A, aa));
1177: PetscCall(MatScale(a->B, aa));
1178: PetscFunctionReturn(PETSC_SUCCESS);
1179: }
1181: static PetscErrorCode MatView_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
1182: {
1183: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1184: Mat_SeqAIJ *A = (Mat_SeqAIJ *)aij->A->data;
1185: Mat_SeqAIJ *B = (Mat_SeqAIJ *)aij->B->data;
1186: const PetscInt *garray = aij->garray;
1187: const PetscScalar *aa, *ba;
1188: PetscInt header[4], M, N, m, rs, cs, cnt, i, ja, jb;
1189: PetscInt64 nz, hnz;
1190: PetscInt *rowlens;
1191: PetscInt *colidxs;
1192: PetscScalar *matvals;
1193: PetscMPIInt rank;
1195: PetscFunctionBegin;
1196: PetscCall(PetscViewerSetUp(viewer));
1198: M = mat->rmap->N;
1199: N = mat->cmap->N;
1200: m = mat->rmap->n;
1201: rs = mat->rmap->rstart;
1202: cs = mat->cmap->rstart;
1203: nz = A->nz + B->nz;
1205: /* write matrix header */
1206: header[0] = MAT_FILE_CLASSID;
1207: header[1] = M;
1208: header[2] = N;
1209: PetscCallMPI(MPI_Reduce(&nz, &hnz, 1, MPIU_INT64, MPI_SUM, 0, PetscObjectComm((PetscObject)mat)));
1210: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
1211: if (rank == 0) PetscCall(PetscIntCast(hnz, &header[3]));
1212: PetscCall(PetscViewerBinaryWrite(viewer, header, 4, PETSC_INT));
1214: /* fill in and store row lengths */
1215: PetscCall(PetscMalloc1(m, &rowlens));
1216: for (i = 0; i < m; i++) rowlens[i] = A->i[i + 1] - A->i[i] + B->i[i + 1] - B->i[i];
1217: PetscCall(PetscViewerBinaryWriteAll(viewer, rowlens, m, rs, M, PETSC_INT));
1218: PetscCall(PetscFree(rowlens));
1220: /* fill in and store column indices */
1221: PetscCall(PetscMalloc1(nz, &colidxs));
1222: for (cnt = 0, i = 0; i < m; i++) {
1223: for (jb = B->i[i]; jb < B->i[i + 1]; jb++) {
1224: if (garray[B->j[jb]] > cs) break;
1225: colidxs[cnt++] = garray[B->j[jb]];
1226: }
1227: for (ja = A->i[i]; ja < A->i[i + 1]; ja++) colidxs[cnt++] = A->j[ja] + cs;
1228: for (; jb < B->i[i + 1]; jb++) colidxs[cnt++] = garray[B->j[jb]];
1229: }
1230: PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz);
1231: PetscCall(PetscViewerBinaryWriteAll(viewer, colidxs, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT));
1232: PetscCall(PetscFree(colidxs));
1234: /* fill in and store nonzero values */
1235: PetscCall(MatSeqAIJGetArrayRead(aij->A, &aa));
1236: PetscCall(MatSeqAIJGetArrayRead(aij->B, &ba));
1237: PetscCall(PetscMalloc1(nz, &matvals));
1238: for (cnt = 0, i = 0; i < m; i++) {
1239: for (jb = B->i[i]; jb < B->i[i + 1]; jb++) {
1240: if (garray[B->j[jb]] > cs) break;
1241: matvals[cnt++] = ba[jb];
1242: }
1243: for (ja = A->i[i]; ja < A->i[i + 1]; ja++) matvals[cnt++] = aa[ja];
1244: for (; jb < B->i[i + 1]; jb++) matvals[cnt++] = ba[jb];
1245: }
1246: PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &aa));
1247: PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &ba));
1248: PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_LIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz);
1249: PetscCall(PetscViewerBinaryWriteAll(viewer, matvals, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR));
1250: PetscCall(PetscFree(matvals));
1252: /* write block size option to the viewer's .info file */
1253: PetscCall(MatView_Binary_BlockSizes(mat, viewer));
1254: PetscFunctionReturn(PETSC_SUCCESS);
1255: }
1257: #include <petscdraw.h>
1258: static PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
1259: {
1260: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1261: PetscMPIInt rank = aij->rank, size = aij->size;
1262: PetscBool isdraw, isascii, isbinary;
1263: PetscViewer sviewer;
1264: PetscViewerFormat format;
1266: PetscFunctionBegin;
1267: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
1268: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
1269: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
1270: if (isascii) {
1271: PetscCall(PetscViewerGetFormat(viewer, &format));
1272: if (format == PETSC_VIEWER_LOAD_BALANCE) {
1273: PetscInt i, nmax = 0, nmin = PETSC_INT_MAX, navg = 0, *nz, nzlocal = ((Mat_SeqAIJ *)aij->A->data)->nz + ((Mat_SeqAIJ *)aij->B->data)->nz;
1274: PetscCall(PetscMalloc1(size, &nz));
1275: PetscCallMPI(MPI_Allgather(&nzlocal, 1, MPIU_INT, nz, 1, MPIU_INT, PetscObjectComm((PetscObject)mat)));
1276: for (i = 0; i < size; i++) {
1277: nmax = PetscMax(nmax, nz[i]);
1278: nmin = PetscMin(nmin, nz[i]);
1279: navg += nz[i];
1280: }
1281: PetscCall(PetscFree(nz));
1282: navg = navg / size;
1283: PetscCall(PetscViewerASCIIPrintf(viewer, "Load Balance - Nonzeros: Min %" PetscInt_FMT " avg %" PetscInt_FMT " max %" PetscInt_FMT "\n", nmin, navg, nmax));
1284: PetscFunctionReturn(PETSC_SUCCESS);
1285: }
1286: PetscCall(PetscViewerGetFormat(viewer, &format));
1287: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1288: MatInfo info;
1289: PetscInt *inodes = NULL;
1291: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
1292: PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
1293: PetscCall(MatInodeGetInodeSizes(aij->A, NULL, &inodes, NULL));
1294: PetscCall(PetscViewerASCIIPushSynchronized(viewer));
1295: if (!inodes) {
1296: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated,
1297: info.memory));
1298: } else {
1299: PetscCall(
1300: PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated, info.memory));
1301: }
1302: PetscCall(MatGetInfo(aij->A, MAT_LOCAL, &info));
1303: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
1304: PetscCall(MatGetInfo(aij->B, MAT_LOCAL, &info));
1305: PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
1306: PetscCall(PetscViewerFlush(viewer));
1307: PetscCall(PetscViewerASCIIPopSynchronized(viewer));
1308: PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
1309: PetscCall(VecScatterView(aij->Mvctx, viewer));
1310: PetscFunctionReturn(PETSC_SUCCESS);
1311: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1312: PetscInt inodecount, inodelimit, *inodes;
1313: PetscCall(MatInodeGetInodeSizes(aij->A, &inodecount, &inodes, &inodelimit));
1314: if (inodes) {
1315: PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
1316: } else {
1317: PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
1318: }
1319: PetscFunctionReturn(PETSC_SUCCESS);
1320: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1321: PetscFunctionReturn(PETSC_SUCCESS);
1322: }
1323: } else if (isbinary) {
1324: if (size == 1) {
1325: PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name));
1326: PetscCall(MatView(aij->A, viewer));
1327: } else {
1328: PetscCall(MatView_MPIAIJ_Binary(mat, viewer));
1329: }
1330: PetscFunctionReturn(PETSC_SUCCESS);
1331: } else if (isascii && size == 1) {
1332: PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name));
1333: PetscCall(MatView(aij->A, viewer));
1334: PetscFunctionReturn(PETSC_SUCCESS);
1335: } else if (isdraw) {
1336: PetscDraw draw;
1337: PetscBool isnull;
1338: PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
1339: PetscCall(PetscDrawIsNull(draw, &isnull));
1340: if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
1341: }
1343: { /* assemble the entire matrix onto first processor */
1344: Mat A = NULL, Av;
1345: IS isrow, iscol;
1347: PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->rmap->N : 0, 0, 1, &isrow));
1348: PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->cmap->N : 0, 0, 1, &iscol));
1349: PetscCall(MatCreateSubMatrix(mat, isrow, iscol, MAT_INITIAL_MATRIX, &A));
1350: PetscCall(MatMPIAIJGetSeqAIJ(A, &Av, NULL, NULL));
1351: /* The commented code uses MatCreateSubMatrices instead */
1352: /*
1353: Mat *AA, A = NULL, Av;
1354: IS isrow,iscol;
1356: PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow));
1357: PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol));
1358: PetscCall(MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA));
1359: if (rank == 0) {
1360: PetscCall(PetscObjectReference((PetscObject)AA[0]));
1361: A = AA[0];
1362: Av = AA[0];
1363: }
1364: PetscCall(MatDestroySubMatrices(1,&AA));
1365: */
1366: PetscCall(ISDestroy(&iscol));
1367: PetscCall(ISDestroy(&isrow));
1368: /*
1369: Everyone has to call to draw the matrix since the graphics waits are
1370: synchronized across all processors that share the PetscDraw object
1371: */
1372: PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
1373: if (rank == 0) {
1374: if (((PetscObject)mat)->name) PetscCall(PetscObjectSetName((PetscObject)Av, ((PetscObject)mat)->name));
1375: PetscCall(MatView_SeqAIJ(Av, sviewer));
1376: }
1377: PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
1378: PetscCall(MatDestroy(&A));
1379: }
1380: PetscFunctionReturn(PETSC_SUCCESS);
1381: }
1383: PetscErrorCode MatView_MPIAIJ(Mat mat, PetscViewer viewer)
1384: {
1385: PetscBool isascii, isdraw, issocket, isbinary;
1387: PetscFunctionBegin;
1388: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
1389: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
1390: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
1391: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
1392: if (isascii || isdraw || isbinary || issocket) PetscCall(MatView_MPIAIJ_ASCIIorDraworSocket(mat, viewer));
1393: PetscFunctionReturn(PETSC_SUCCESS);
1394: }
1396: static PetscErrorCode MatSOR_MPIAIJ(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1397: {
1398: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data;
1399: Vec bb1 = NULL;
1400: PetscBool hasop;
1402: PetscFunctionBegin;
1403: if (flag == SOR_APPLY_UPPER) {
1404: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1405: PetscFunctionReturn(PETSC_SUCCESS);
1406: }
1408: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));
1410: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1411: if (flag & SOR_ZERO_INITIAL_GUESS) {
1412: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1413: its--;
1414: }
1416: while (its--) {
1417: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1418: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1420: /* update rhs: bb1 = bb - B*x */
1421: PetscCall(VecScale(mat->lvec, -1.0));
1422: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1424: /* local sweep */
1425: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx);
1426: }
1427: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1428: if (flag & SOR_ZERO_INITIAL_GUESS) {
1429: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1430: its--;
1431: }
1432: while (its--) {
1433: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1434: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1436: /* update rhs: bb1 = bb - B*x */
1437: PetscCall(VecScale(mat->lvec, -1.0));
1438: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1440: /* local sweep */
1441: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx);
1442: }
1443: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1444: if (flag & SOR_ZERO_INITIAL_GUESS) {
1445: PetscUseTypeMethod(mat->A, sor, bb, omega, flag, fshift, lits, 1, xx);
1446: its--;
1447: }
1448: while (its--) {
1449: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1450: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1452: /* update rhs: bb1 = bb - B*x */
1453: PetscCall(VecScale(mat->lvec, -1.0));
1454: PetscUseTypeMethod(mat->B, multadd, mat->lvec, bb, bb1);
1456: /* local sweep */
1457: PetscUseTypeMethod(mat->A, sor, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx);
1458: }
1459: } else if (flag & SOR_EISENSTAT) {
1460: Vec xx1;
1462: PetscCall(VecDuplicate(bb, &xx1));
1463: PetscUseTypeMethod(mat->A, sor, bb, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP), fshift, lits, 1, xx);
1465: PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1466: PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1467: if (!mat->diag) {
1468: PetscCall(MatCreateVecs(matin, &mat->diag, NULL));
1469: PetscCall(MatGetDiagonal(matin, mat->diag));
1470: }
1471: PetscCall(MatHasOperation(matin, MATOP_MULT_DIAGONAL_BLOCK, &hasop));
1472: if (hasop) PetscCall(MatMultDiagonalBlock(matin, xx, bb1));
1473: else PetscCall(VecPointwiseMult(bb1, mat->diag, xx));
1474: PetscCall(VecAYPX(bb1, (omega - 2.0) / omega, bb));
1476: PetscCall(MatMultAdd(mat->B, mat->lvec, bb1, bb1));
1478: /* local sweep */
1479: PetscUseTypeMethod(mat->A, sor, bb1, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP), fshift, lits, 1, xx1);
1480: PetscCall(VecAXPY(xx, 1.0, xx1));
1481: PetscCall(VecDestroy(&xx1));
1482: } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");
1484: PetscCall(VecDestroy(&bb1));
1486: matin->factorerrortype = mat->A->factorerrortype;
1487: PetscFunctionReturn(PETSC_SUCCESS);
1488: }
1490: static PetscErrorCode MatPermute_MPIAIJ(Mat A, IS rowp, IS colp, Mat *B)
1491: {
1492: Mat aA, aB, Aperm;
1493: const PetscInt *rwant, *cwant, *gcols, *ai, *bi, *aj, *bj;
1494: PetscScalar *aa, *ba;
1495: PetscInt i, j, m, n, ng, anz, bnz, *dnnz, *onnz, *tdnnz, *tonnz, *rdest, *cdest, *work, *gcdest;
1496: PetscSF rowsf, sf;
1497: IS parcolp = NULL;
1498: PetscBool done;
1500: PetscFunctionBegin;
1501: PetscCall(MatGetLocalSize(A, &m, &n));
1502: PetscCall(ISGetIndices(rowp, &rwant));
1503: PetscCall(ISGetIndices(colp, &cwant));
1504: PetscCall(PetscMalloc3(PetscMax(m, n), &work, m, &rdest, n, &cdest));
1506: /* Invert row permutation to find out where my rows should go */
1507: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &rowsf));
1508: PetscCall(PetscSFSetGraphLayout(rowsf, A->rmap, A->rmap->n, NULL, PETSC_OWN_POINTER, rwant));
1509: PetscCall(PetscSFSetFromOptions(rowsf));
1510: for (i = 0; i < m; i++) work[i] = A->rmap->rstart + i;
1511: PetscCall(PetscSFReduceBegin(rowsf, MPIU_INT, work, rdest, MPI_REPLACE));
1512: PetscCall(PetscSFReduceEnd(rowsf, MPIU_INT, work, rdest, MPI_REPLACE));
1514: /* Invert column permutation to find out where my columns should go */
1515: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1516: PetscCall(PetscSFSetGraphLayout(sf, A->cmap, A->cmap->n, NULL, PETSC_OWN_POINTER, cwant));
1517: PetscCall(PetscSFSetFromOptions(sf));
1518: for (i = 0; i < n; i++) work[i] = A->cmap->rstart + i;
1519: PetscCall(PetscSFReduceBegin(sf, MPIU_INT, work, cdest, MPI_REPLACE));
1520: PetscCall(PetscSFReduceEnd(sf, MPIU_INT, work, cdest, MPI_REPLACE));
1521: PetscCall(PetscSFDestroy(&sf));
1523: PetscCall(ISRestoreIndices(rowp, &rwant));
1524: PetscCall(ISRestoreIndices(colp, &cwant));
1525: PetscCall(MatMPIAIJGetSeqAIJ(A, &aA, &aB, &gcols));
1527: /* Find out where my gcols should go */
1528: PetscCall(MatGetSize(aB, NULL, &ng));
1529: PetscCall(PetscMalloc1(ng, &gcdest));
1530: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1531: PetscCall(PetscSFSetGraphLayout(sf, A->cmap, ng, NULL, PETSC_OWN_POINTER, gcols));
1532: PetscCall(PetscSFSetFromOptions(sf));
1533: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE));
1534: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE));
1535: PetscCall(PetscSFDestroy(&sf));
1537: PetscCall(PetscCalloc4(m, &dnnz, m, &onnz, m, &tdnnz, m, &tonnz));
1538: PetscCall(MatGetRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done));
1539: PetscCall(MatGetRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done));
1540: for (i = 0; i < m; i++) {
1541: PetscInt row = rdest[i];
1542: PetscMPIInt rowner;
1543: PetscCall(PetscLayoutFindOwner(A->rmap, row, &rowner));
1544: for (j = ai[i]; j < ai[i + 1]; j++) {
1545: PetscInt col = cdest[aj[j]];
1546: PetscMPIInt cowner;
1547: PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner)); /* Could build an index for the columns to eliminate this search */
1548: if (rowner == cowner) dnnz[i]++;
1549: else onnz[i]++;
1550: }
1551: for (j = bi[i]; j < bi[i + 1]; j++) {
1552: PetscInt col = gcdest[bj[j]];
1553: PetscMPIInt cowner;
1554: PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner));
1555: if (rowner == cowner) dnnz[i]++;
1556: else onnz[i]++;
1557: }
1558: }
1559: PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE));
1560: PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE));
1561: PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE));
1562: PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE));
1563: PetscCall(PetscSFDestroy(&rowsf));
1565: PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)A), A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N, 0, tdnnz, 0, tonnz, &Aperm));
1566: PetscCall(MatSeqAIJGetArray(aA, &aa));
1567: PetscCall(MatSeqAIJGetArray(aB, &ba));
1568: for (i = 0; i < m; i++) {
1569: PetscInt *acols = dnnz, *bcols = onnz; /* Repurpose now-unneeded arrays */
1570: PetscInt rowlen;
1571: rowlen = ai[i + 1] - ai[i];
1572: for (PetscInt j0 = j = 0; j < rowlen; j0 = j) { /* rowlen could be larger than number of rows m, so sum in batches */
1573: for (; j < PetscMin(rowlen, j0 + m); j++) acols[j - j0] = cdest[aj[ai[i] + j]];
1574: PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, acols, aa + ai[i] + j0, INSERT_VALUES));
1575: }
1576: rowlen = bi[i + 1] - bi[i];
1577: for (PetscInt j0 = j = 0; j < rowlen; j0 = j) {
1578: for (; j < PetscMin(rowlen, j0 + m); j++) bcols[j - j0] = gcdest[bj[bi[i] + j]];
1579: PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, bcols, ba + bi[i] + j0, INSERT_VALUES));
1580: }
1581: }
1582: PetscCall(MatAssemblyBegin(Aperm, MAT_FINAL_ASSEMBLY));
1583: PetscCall(MatAssemblyEnd(Aperm, MAT_FINAL_ASSEMBLY));
1584: PetscCall(MatRestoreRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done));
1585: PetscCall(MatRestoreRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done));
1586: PetscCall(MatSeqAIJRestoreArray(aA, &aa));
1587: PetscCall(MatSeqAIJRestoreArray(aB, &ba));
1588: PetscCall(PetscFree4(dnnz, onnz, tdnnz, tonnz));
1589: PetscCall(PetscFree3(work, rdest, cdest));
1590: PetscCall(PetscFree(gcdest));
1591: if (parcolp) PetscCall(ISDestroy(&colp));
1592: *B = Aperm;
1593: PetscFunctionReturn(PETSC_SUCCESS);
1594: }
1596: static PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
1597: {
1598: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1600: PetscFunctionBegin;
1601: PetscCall(MatGetSize(aij->B, NULL, nghosts));
1602: if (ghosts) *ghosts = aij->garray;
1603: PetscFunctionReturn(PETSC_SUCCESS);
1604: }
1606: static PetscErrorCode MatGetInfo_MPIAIJ(Mat matin, MatInfoType flag, MatInfo *info)
1607: {
1608: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data;
1609: Mat A = mat->A, B = mat->B;
1610: PetscLogDouble isend[5], irecv[5];
1612: PetscFunctionBegin;
1613: info->block_size = 1.0;
1614: PetscCall(MatGetInfo(A, MAT_LOCAL, info));
1616: isend[0] = info->nz_used;
1617: isend[1] = info->nz_allocated;
1618: isend[2] = info->nz_unneeded;
1619: isend[3] = info->memory;
1620: isend[4] = info->mallocs;
1622: PetscCall(MatGetInfo(B, MAT_LOCAL, info));
1624: isend[0] += info->nz_used;
1625: isend[1] += info->nz_allocated;
1626: isend[2] += info->nz_unneeded;
1627: isend[3] += info->memory;
1628: isend[4] += info->mallocs;
1629: if (flag == MAT_LOCAL) {
1630: info->nz_used = isend[0];
1631: info->nz_allocated = isend[1];
1632: info->nz_unneeded = isend[2];
1633: info->memory = isend[3];
1634: info->mallocs = isend[4];
1635: } else if (flag == MAT_GLOBAL_MAX) {
1636: PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));
1638: info->nz_used = irecv[0];
1639: info->nz_allocated = irecv[1];
1640: info->nz_unneeded = irecv[2];
1641: info->memory = irecv[3];
1642: info->mallocs = irecv[4];
1643: } else if (flag == MAT_GLOBAL_SUM) {
1644: PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));
1646: info->nz_used = irecv[0];
1647: info->nz_allocated = irecv[1];
1648: info->nz_unneeded = irecv[2];
1649: info->memory = irecv[3];
1650: info->mallocs = irecv[4];
1651: }
1652: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1653: info->fill_ratio_needed = 0;
1654: info->factor_mallocs = 0;
1655: PetscFunctionReturn(PETSC_SUCCESS);
1656: }
1658: PetscErrorCode MatSetOption_MPIAIJ(Mat A, MatOption op, PetscBool flg)
1659: {
1660: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1662: PetscFunctionBegin;
1663: switch (op) {
1664: case MAT_NEW_NONZERO_LOCATIONS:
1665: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1666: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1667: case MAT_KEEP_NONZERO_PATTERN:
1668: case MAT_NEW_NONZERO_LOCATION_ERR:
1669: case MAT_USE_INODES:
1670: case MAT_IGNORE_ZERO_ENTRIES:
1671: case MAT_FORM_EXPLICIT_TRANSPOSE:
1672: MatCheckPreallocated(A, 1);
1673: PetscCall(MatSetOption(a->A, op, flg));
1674: PetscCall(MatSetOption(a->B, op, flg));
1675: break;
1676: case MAT_ROW_ORIENTED:
1677: MatCheckPreallocated(A, 1);
1678: a->roworiented = flg;
1680: PetscCall(MatSetOption(a->A, op, flg));
1681: PetscCall(MatSetOption(a->B, op, flg));
1682: break;
1683: case MAT_IGNORE_OFF_PROC_ENTRIES:
1684: a->donotstash = flg;
1685: break;
1686: /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1687: case MAT_SPD:
1688: case MAT_SYMMETRIC:
1689: case MAT_STRUCTURALLY_SYMMETRIC:
1690: case MAT_HERMITIAN:
1691: case MAT_SYMMETRY_ETERNAL:
1692: case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
1693: case MAT_SPD_ETERNAL:
1694: /* if the diagonal matrix is square it inherits some of the properties above */
1695: if (a->A && A->rmap->n == A->cmap->n) PetscCall(MatSetOption(a->A, op, flg));
1696: break;
1697: case MAT_SUBMAT_SINGLEIS:
1698: A->submat_singleis = flg;
1699: break;
1700: default:
1701: break;
1702: }
1703: PetscFunctionReturn(PETSC_SUCCESS);
1704: }
1706: PetscErrorCode MatGetRow_MPIAIJ(Mat matin, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v)
1707: {
1708: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data;
1709: PetscScalar *vworkA, *vworkB, **pvA, **pvB, *v_p;
1710: PetscInt i, *cworkA, *cworkB, **pcA, **pcB, cstart = matin->cmap->rstart;
1711: PetscInt nztot, nzA, nzB, lrow, rstart = matin->rmap->rstart, rend = matin->rmap->rend;
1712: PetscInt *cmap, *idx_p;
1714: PetscFunctionBegin;
1715: PetscCheck(!mat->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Already active");
1716: mat->getrowactive = PETSC_TRUE;
1718: if (!mat->rowvalues && (idx || v)) {
1719: /*
1720: allocate enough space to hold information from the longest row.
1721: */
1722: Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)mat->A->data, *Ba = (Mat_SeqAIJ *)mat->B->data;
1723: PetscInt max = 1, tmp;
1724: for (i = 0; i < matin->rmap->n; i++) {
1725: tmp = Aa->i[i + 1] - Aa->i[i] + Ba->i[i + 1] - Ba->i[i];
1726: if (max < tmp) max = tmp;
1727: }
1728: PetscCall(PetscMalloc2(max, &mat->rowvalues, max, &mat->rowindices));
1729: }
1731: PetscCheck(row >= rstart && row < rend, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Only local rows");
1732: lrow = row - rstart;
1734: pvA = &vworkA;
1735: pcA = &cworkA;
1736: pvB = &vworkB;
1737: pcB = &cworkB;
1738: if (!v) {
1739: pvA = NULL;
1740: pvB = NULL;
1741: }
1742: if (!idx) {
1743: pcA = NULL;
1744: if (!v) pcB = NULL;
1745: }
1746: PetscUseTypeMethod(mat->A, getrow, lrow, &nzA, pcA, pvA);
1747: PetscUseTypeMethod(mat->B, getrow, lrow, &nzB, pcB, pvB);
1748: nztot = nzA + nzB;
1750: cmap = mat->garray;
1751: if (v || idx) {
1752: if (nztot) {
1753: /* Sort by increasing column numbers, assuming A and B already sorted */
1754: PetscInt imark = -1;
1755: if (v) {
1756: *v = v_p = mat->rowvalues;
1757: for (i = 0; i < nzB; i++) {
1758: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1759: else break;
1760: }
1761: imark = i;
1762: for (i = 0; i < nzA; i++) v_p[imark + i] = vworkA[i];
1763: for (i = imark; i < nzB; i++) v_p[nzA + i] = vworkB[i];
1764: }
1765: if (idx) {
1766: *idx = idx_p = mat->rowindices;
1767: if (imark > -1) {
1768: for (i = 0; i < imark; i++) idx_p[i] = cmap[cworkB[i]];
1769: } else {
1770: for (i = 0; i < nzB; i++) {
1771: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1772: else break;
1773: }
1774: imark = i;
1775: }
1776: for (i = 0; i < nzA; i++) idx_p[imark + i] = cstart + cworkA[i];
1777: for (i = imark; i < nzB; i++) idx_p[nzA + i] = cmap[cworkB[i]];
1778: }
1779: } else {
1780: if (idx) *idx = NULL;
1781: if (v) *v = NULL;
1782: }
1783: }
1784: *nz = nztot;
1785: PetscUseTypeMethod(mat->A, restorerow, lrow, &nzA, pcA, pvA);
1786: PetscUseTypeMethod(mat->B, restorerow, lrow, &nzB, pcB, pvB);
1787: PetscFunctionReturn(PETSC_SUCCESS);
1788: }
1790: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v)
1791: {
1792: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1794: PetscFunctionBegin;
1795: PetscCheck(aij->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "MatGetRow() must be called first");
1796: aij->getrowactive = PETSC_FALSE;
1797: PetscFunctionReturn(PETSC_SUCCESS);
1798: }
1800: static PetscErrorCode MatNorm_MPIAIJ(Mat mat, NormType type, PetscReal *norm)
1801: {
1802: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1803: Mat_SeqAIJ *amat = (Mat_SeqAIJ *)aij->A->data, *bmat = (Mat_SeqAIJ *)aij->B->data;
1804: PetscInt i, j;
1805: PetscReal sum = 0.0;
1806: const MatScalar *v, *amata, *bmata;
1808: PetscFunctionBegin;
1809: if (aij->size == 1) {
1810: PetscCall(MatNorm(aij->A, type, norm));
1811: } else {
1812: PetscCall(MatSeqAIJGetArrayRead(aij->A, &amata));
1813: PetscCall(MatSeqAIJGetArrayRead(aij->B, &bmata));
1814: if (type == NORM_FROBENIUS) {
1815: v = amata;
1816: for (i = 0; i < amat->nz; i++) {
1817: sum += PetscRealPart(PetscConj(*v) * (*v));
1818: v++;
1819: }
1820: v = bmata;
1821: for (i = 0; i < bmat->nz; i++) {
1822: sum += PetscRealPart(PetscConj(*v) * (*v));
1823: v++;
1824: }
1825: PetscCallMPI(MPIU_Allreduce(&sum, norm, 1, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)mat)));
1826: *norm = PetscSqrtReal(*norm);
1827: PetscCall(PetscLogFlops(2.0 * amat->nz + 2.0 * bmat->nz));
1828: } else if (type == NORM_1) { /* max column norm */
1829: Vec col, bcol;
1830: PetscScalar *array;
1831: PetscInt *jj, *garray = aij->garray;
1833: PetscCall(MatCreateVecs(mat, &col, NULL));
1834: PetscCall(VecGetArrayWrite(col, &array));
1835: v = amata;
1836: jj = amat->j;
1837: for (j = 0; j < amat->nz; j++) array[*jj++] += PetscAbsScalar(*v++);
1838: PetscCall(VecRestoreArrayWrite(col, &array));
1839: PetscCall(MatCreateVecs(aij->B, &bcol, NULL));
1840: PetscCall(VecGetArrayWrite(bcol, &array));
1841: v = bmata;
1842: jj = bmat->j;
1843: for (j = 0; j < bmat->nz; j++) array[*jj++] += PetscAbsScalar(*v++);
1844: PetscCall(VecSetValues(col, aij->B->cmap->n, garray, array, ADD_VALUES));
1845: PetscCall(VecRestoreArrayWrite(bcol, &array));
1846: PetscCall(VecDestroy(&bcol));
1847: PetscCall(VecAssemblyBegin(col));
1848: PetscCall(VecAssemblyEnd(col));
1849: PetscCall(VecNorm(col, NORM_INFINITY, norm));
1850: PetscCall(VecDestroy(&col));
1851: } else if (type == NORM_INFINITY) { /* max row norm */
1852: PetscReal ntemp = 0.0;
1853: for (j = 0; j < aij->A->rmap->n; j++) {
1854: v = PetscSafePointerPlusOffset(amata, amat->i[j]);
1855: sum = 0.0;
1856: for (i = 0; i < amat->i[j + 1] - amat->i[j]; i++) {
1857: sum += PetscAbsScalar(*v);
1858: v++;
1859: }
1860: v = PetscSafePointerPlusOffset(bmata, bmat->i[j]);
1861: for (i = 0; i < bmat->i[j + 1] - bmat->i[j]; i++) {
1862: sum += PetscAbsScalar(*v);
1863: v++;
1864: }
1865: if (sum > ntemp) ntemp = sum;
1866: }
1867: PetscCallMPI(MPIU_Allreduce(&ntemp, norm, 1, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)mat)));
1868: PetscCall(PetscLogFlops(PetscMax(amat->nz + bmat->nz - 1, 0)));
1869: } else SETERRQ(PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "No support for two norm");
1870: PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &amata));
1871: PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &bmata));
1872: }
1873: PetscFunctionReturn(PETSC_SUCCESS);
1874: }
1876: static PetscErrorCode MatTranspose_MPIAIJ(Mat A, MatReuse reuse, Mat *matout)
1877: {
1878: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *b;
1879: Mat_SeqAIJ *Aloc = (Mat_SeqAIJ *)a->A->data, *Bloc = (Mat_SeqAIJ *)a->B->data, *sub_B_diag;
1880: PetscInt M = A->rmap->N, N = A->cmap->N, ma, na, mb, nb, row, *cols, *cols_tmp, *B_diag_ilen, i, ncol, A_diag_ncol;
1881: const PetscInt *ai, *aj, *bi, *bj, *B_diag_i;
1882: Mat B, A_diag, *B_diag;
1883: const MatScalar *pbv, *bv;
1885: PetscFunctionBegin;
1886: if (reuse == MAT_REUSE_MATRIX) PetscCall(MatTransposeCheckNonzeroState_Private(A, *matout));
1887: ma = A->rmap->n;
1888: na = A->cmap->n;
1889: mb = a->B->rmap->n;
1890: nb = a->B->cmap->n;
1891: ai = Aloc->i;
1892: aj = Aloc->j;
1893: bi = Bloc->i;
1894: bj = Bloc->j;
1895: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1896: PetscInt *d_nnz, *g_nnz, *o_nnz;
1897: PetscSFNode *oloc;
1898: PETSC_UNUSED PetscSF sf;
1900: PetscCall(PetscMalloc4(na, &d_nnz, na, &o_nnz, nb, &g_nnz, nb, &oloc));
1901: /* compute d_nnz for preallocation */
1902: PetscCall(PetscArrayzero(d_nnz, na));
1903: for (i = 0; i < ai[ma]; i++) d_nnz[aj[i]]++;
1904: /* compute local off-diagonal contributions */
1905: PetscCall(PetscArrayzero(g_nnz, nb));
1906: for (i = 0; i < bi[ma]; i++) g_nnz[bj[i]]++;
1907: /* map those to global */
1908: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1909: PetscCall(PetscSFSetGraphLayout(sf, A->cmap, nb, NULL, PETSC_USE_POINTER, a->garray));
1910: PetscCall(PetscSFSetFromOptions(sf));
1911: PetscCall(PetscArrayzero(o_nnz, na));
1912: PetscCall(PetscSFReduceBegin(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM));
1913: PetscCall(PetscSFReduceEnd(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM));
1914: PetscCall(PetscSFDestroy(&sf));
1916: PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1917: PetscCall(MatSetSizes(B, A->cmap->n, A->rmap->n, N, M));
1918: PetscCall(MatSetBlockSizes(B, A->cmap->bs, A->rmap->bs));
1919: PetscCall(MatSetType(B, ((PetscObject)A)->type_name));
1920: PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz));
1921: PetscCall(PetscFree4(d_nnz, o_nnz, g_nnz, oloc));
1922: } else {
1923: B = *matout;
1924: PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE));
1925: }
1927: b = (Mat_MPIAIJ *)B->data;
1928: A_diag = a->A;
1929: B_diag = &b->A;
1930: sub_B_diag = (Mat_SeqAIJ *)(*B_diag)->data;
1931: A_diag_ncol = A_diag->cmap->N;
1932: B_diag_ilen = sub_B_diag->ilen;
1933: B_diag_i = sub_B_diag->i;
1935: /* Set ilen for diagonal of B */
1936: for (i = 0; i < A_diag_ncol; i++) B_diag_ilen[i] = B_diag_i[i + 1] - B_diag_i[i];
1938: /* Transpose the diagonal part of the matrix. In contrast to the off-diagonal part, this can be done
1939: very quickly (=without using MatSetValues), because all writes are local. */
1940: PetscCall(MatTransposeSetPrecursor(A_diag, *B_diag));
1941: PetscCall(MatTranspose(A_diag, MAT_REUSE_MATRIX, B_diag));
1943: /* copy over the B part */
1944: PetscCall(PetscMalloc1(bi[mb], &cols));
1945: PetscCall(MatSeqAIJGetArrayRead(a->B, &bv));
1946: pbv = bv;
1947: row = A->rmap->rstart;
1948: for (i = 0; i < bi[mb]; i++) cols[i] = a->garray[bj[i]];
1949: cols_tmp = cols;
1950: for (i = 0; i < mb; i++) {
1951: ncol = bi[i + 1] - bi[i];
1952: PetscCall(MatSetValues(B, ncol, cols_tmp, 1, &row, pbv, INSERT_VALUES));
1953: row++;
1954: if (pbv) pbv += ncol;
1955: if (cols_tmp) cols_tmp += ncol;
1956: }
1957: PetscCall(PetscFree(cols));
1958: PetscCall(MatSeqAIJRestoreArrayRead(a->B, &bv));
1960: PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1961: PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1962: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1963: *matout = B;
1964: } else {
1965: PetscCall(MatHeaderMerge(A, &B));
1966: }
1967: PetscFunctionReturn(PETSC_SUCCESS);
1968: }
1970: static PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat, Vec ll, Vec rr)
1971: {
1972: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1973: Mat a = aij->A, b = aij->B;
1974: PetscInt s1, s2, s3;
1976: PetscFunctionBegin;
1977: PetscCall(MatGetLocalSize(mat, &s2, &s3));
1978: if (rr) {
1979: PetscCall(VecGetLocalSize(rr, &s1));
1980: PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
1981: /* Overlap communication with computation. */
1982: PetscCall(VecScatterBegin(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD));
1983: }
1984: if (ll) {
1985: PetscCall(VecGetLocalSize(ll, &s1));
1986: PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
1987: PetscUseTypeMethod(b, diagonalscale, ll, NULL);
1988: }
1989: /* scale the diagonal block */
1990: PetscUseTypeMethod(a, diagonalscale, ll, rr);
1992: if (rr) {
1993: /* Do a scatter end and then right scale the off-diagonal block */
1994: PetscCall(VecScatterEnd(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD));
1995: PetscUseTypeMethod(b, diagonalscale, NULL, aij->lvec);
1996: }
1997: PetscFunctionReturn(PETSC_SUCCESS);
1998: }
2000: static PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2001: {
2002: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2004: PetscFunctionBegin;
2005: PetscCall(MatSetUnfactored(a->A));
2006: PetscFunctionReturn(PETSC_SUCCESS);
2007: }
2009: static PetscErrorCode MatEqual_MPIAIJ(Mat A, Mat B, PetscBool *flag)
2010: {
2011: Mat_MPIAIJ *matB = (Mat_MPIAIJ *)B->data, *matA = (Mat_MPIAIJ *)A->data;
2012: Mat a, b, c, d;
2013: PetscBool flg;
2015: PetscFunctionBegin;
2016: a = matA->A;
2017: b = matA->B;
2018: c = matB->A;
2019: d = matB->B;
2021: PetscCall(MatEqual(a, c, &flg));
2022: if (flg) PetscCall(MatEqual(b, d, &flg));
2023: PetscCallMPI(MPIU_Allreduce(&flg, flag, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
2024: PetscFunctionReturn(PETSC_SUCCESS);
2025: }
2027: static PetscErrorCode MatCopy_MPIAIJ(Mat A, Mat B, MatStructure str)
2028: {
2029: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2030: Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2032: PetscFunctionBegin;
2033: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2034: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2035: /* because of the column compression in the off-processor part of the matrix a->B,
2036: the number of columns in a->B and b->B may be different, hence we cannot call
2037: the MatCopy() directly on the two parts. If need be, we can provide a more
2038: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2039: then copying the submatrices */
2040: PetscCall(MatCopy_Basic(A, B, str));
2041: } else {
2042: PetscCall(MatCopy(a->A, b->A, str));
2043: PetscCall(MatCopy(a->B, b->B, str));
2044: }
2045: PetscCall(PetscObjectStateIncrease((PetscObject)B));
2046: PetscFunctionReturn(PETSC_SUCCESS);
2047: }
2049: /*
2050: Computes the number of nonzeros per row needed for preallocation when X and Y
2051: have different nonzero structure.
2052: */
2053: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m, const PetscInt *xi, const PetscInt *xj, const PetscInt *xltog, const PetscInt *yi, const PetscInt *yj, const PetscInt *yltog, PetscInt *nnz)
2054: {
2055: PetscInt i, j, k, nzx, nzy;
2057: PetscFunctionBegin;
2058: /* Set the number of nonzeros in the new matrix */
2059: for (i = 0; i < m; i++) {
2060: const PetscInt *xjj = PetscSafePointerPlusOffset(xj, xi[i]), *yjj = PetscSafePointerPlusOffset(yj, yi[i]);
2061: nzx = xi[i + 1] - xi[i];
2062: nzy = yi[i + 1] - yi[i];
2063: nnz[i] = 0;
2064: for (j = 0, k = 0; j < nzx; j++) { /* Point in X */
2065: for (; k < nzy && yltog[yjj[k]] < xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2066: if (k < nzy && yltog[yjj[k]] == xltog[xjj[j]]) k++; /* Skip duplicate */
2067: nnz[i]++;
2068: }
2069: for (; k < nzy; k++) nnz[i]++;
2070: }
2071: PetscFunctionReturn(PETSC_SUCCESS);
2072: }
2074: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2075: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y, const PetscInt *yltog, Mat X, const PetscInt *xltog, PetscInt *nnz)
2076: {
2077: PetscInt m = Y->rmap->N;
2078: Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data;
2079: Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data;
2081: PetscFunctionBegin;
2082: PetscCall(MatAXPYGetPreallocation_MPIX_private(m, x->i, x->j, xltog, y->i, y->j, yltog, nnz));
2083: PetscFunctionReturn(PETSC_SUCCESS);
2084: }
2086: static PetscErrorCode MatAXPY_MPIAIJ(Mat Y, PetscScalar a, Mat X, MatStructure str)
2087: {
2088: Mat_MPIAIJ *xx = (Mat_MPIAIJ *)X->data, *yy = (Mat_MPIAIJ *)Y->data;
2090: PetscFunctionBegin;
2091: if (str == SAME_NONZERO_PATTERN) {
2092: PetscCall(MatAXPY(yy->A, a, xx->A, str));
2093: PetscCall(MatAXPY(yy->B, a, xx->B, str));
2094: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2095: PetscCall(MatAXPY_Basic(Y, a, X, str));
2096: } else {
2097: Mat B;
2098: PetscInt *nnz_d, *nnz_o;
2100: PetscCall(PetscMalloc1(yy->A->rmap->N, &nnz_d));
2101: PetscCall(PetscMalloc1(yy->B->rmap->N, &nnz_o));
2102: PetscCall(MatCreate(PetscObjectComm((PetscObject)Y), &B));
2103: PetscCall(PetscObjectSetName((PetscObject)B, ((PetscObject)Y)->name));
2104: PetscCall(MatSetLayouts(B, Y->rmap, Y->cmap));
2105: PetscCall(MatSetType(B, ((PetscObject)Y)->type_name));
2106: PetscCall(MatAXPYGetPreallocation_SeqAIJ(yy->A, xx->A, nnz_d));
2107: PetscCall(MatAXPYGetPreallocation_MPIAIJ(yy->B, yy->garray, xx->B, xx->garray, nnz_o));
2108: PetscCall(MatMPIAIJSetPreallocation(B, 0, nnz_d, 0, nnz_o));
2109: PetscCall(MatAXPY_BasicWithPreallocation(B, Y, a, X, str));
2110: PetscCall(MatHeaderMerge(Y, &B));
2111: PetscCall(PetscFree(nnz_d));
2112: PetscCall(PetscFree(nnz_o));
2113: }
2114: PetscFunctionReturn(PETSC_SUCCESS);
2115: }
2117: PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);
2119: static PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2120: {
2121: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2123: PetscFunctionBegin;
2124: PetscCall(MatConjugate_SeqAIJ(aij->A));
2125: PetscCall(MatConjugate_SeqAIJ(aij->B));
2126: PetscFunctionReturn(PETSC_SUCCESS);
2127: }
2129: static PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2130: {
2131: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2133: PetscFunctionBegin;
2134: PetscCall(MatRealPart(a->A));
2135: PetscCall(MatRealPart(a->B));
2136: PetscFunctionReturn(PETSC_SUCCESS);
2137: }
2139: static PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2140: {
2141: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2143: PetscFunctionBegin;
2144: PetscCall(MatImaginaryPart(a->A));
2145: PetscCall(MatImaginaryPart(a->B));
2146: PetscFunctionReturn(PETSC_SUCCESS);
2147: }
2149: static PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2150: {
2151: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2152: PetscInt i, *idxb = NULL, m = A->rmap->n;
2153: PetscScalar *vv;
2154: Vec vB, vA;
2155: const PetscScalar *va, *vb;
2157: PetscFunctionBegin;
2158: PetscCall(MatCreateVecs(a->A, NULL, &vA));
2159: PetscCall(MatGetRowMaxAbs(a->A, vA, idx));
2161: PetscCall(VecGetArrayRead(vA, &va));
2162: if (idx) {
2163: for (i = 0; i < m; i++) {
2164: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2165: }
2166: }
2168: PetscCall(MatCreateVecs(a->B, NULL, &vB));
2169: PetscCall(PetscMalloc1(m, &idxb));
2170: PetscCall(MatGetRowMaxAbs(a->B, vB, idxb));
2172: PetscCall(VecGetArrayWrite(v, &vv));
2173: PetscCall(VecGetArrayRead(vB, &vb));
2174: for (i = 0; i < m; i++) {
2175: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2176: vv[i] = vb[i];
2177: if (idx) idx[i] = a->garray[idxb[i]];
2178: } else {
2179: vv[i] = va[i];
2180: if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]]) idx[i] = a->garray[idxb[i]];
2181: }
2182: }
2183: PetscCall(VecRestoreArrayWrite(v, &vv));
2184: PetscCall(VecRestoreArrayRead(vA, &va));
2185: PetscCall(VecRestoreArrayRead(vB, &vb));
2186: PetscCall(PetscFree(idxb));
2187: PetscCall(VecDestroy(&vA));
2188: PetscCall(VecDestroy(&vB));
2189: PetscFunctionReturn(PETSC_SUCCESS);
2190: }
2192: static PetscErrorCode MatGetRowSumAbs_MPIAIJ(Mat A, Vec v)
2193: {
2194: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2195: Vec vB, vA;
2197: PetscFunctionBegin;
2198: PetscCall(MatCreateVecs(a->A, NULL, &vA));
2199: PetscCall(MatGetRowSumAbs(a->A, vA));
2200: PetscCall(MatCreateVecs(a->B, NULL, &vB));
2201: PetscCall(MatGetRowSumAbs(a->B, vB));
2202: PetscCall(VecAXPY(vA, 1.0, vB));
2203: PetscCall(VecDestroy(&vB));
2204: PetscCall(VecCopy(vA, v));
2205: PetscCall(VecDestroy(&vA));
2206: PetscFunctionReturn(PETSC_SUCCESS);
2207: }
2209: static PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2210: {
2211: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data;
2212: PetscInt m = A->rmap->n, n = A->cmap->n;
2213: PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend;
2214: PetscInt *cmap = mat->garray;
2215: PetscInt *diagIdx, *offdiagIdx;
2216: Vec diagV, offdiagV;
2217: PetscScalar *a, *diagA, *offdiagA;
2218: const PetscScalar *ba, *bav;
2219: PetscInt r, j, col, ncols, *bi, *bj;
2220: Mat B = mat->B;
2221: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
2223: PetscFunctionBegin;
2224: /* When a process holds entire A and other processes have no entry */
2225: if (A->cmap->N == n) {
2226: PetscCall(VecGetArrayWrite(v, &diagA));
2227: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2228: PetscCall(MatGetRowMinAbs(mat->A, diagV, idx));
2229: PetscCall(VecDestroy(&diagV));
2230: PetscCall(VecRestoreArrayWrite(v, &diagA));
2231: PetscFunctionReturn(PETSC_SUCCESS);
2232: } else if (n == 0) {
2233: if (m) {
2234: PetscCall(VecGetArrayWrite(v, &a));
2235: for (r = 0; r < m; r++) {
2236: a[r] = 0.0;
2237: if (idx) idx[r] = -1;
2238: }
2239: PetscCall(VecRestoreArrayWrite(v, &a));
2240: }
2241: PetscFunctionReturn(PETSC_SUCCESS);
2242: }
2244: PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx));
2245: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2246: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2247: PetscCall(MatGetRowMinAbs(mat->A, diagV, diagIdx));
2249: /* Get offdiagIdx[] for implicit 0.0 */
2250: PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2251: ba = bav;
2252: bi = b->i;
2253: bj = b->j;
2254: PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2255: for (r = 0; r < m; r++) {
2256: ncols = bi[r + 1] - bi[r];
2257: if (ncols == A->cmap->N - n) { /* Brow is dense */
2258: offdiagA[r] = *ba;
2259: offdiagIdx[r] = cmap[0];
2260: } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2261: offdiagA[r] = 0.0;
2263: /* Find first hole in the cmap */
2264: for (j = 0; j < ncols; j++) {
2265: col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2266: if (col > j && j < cstart) {
2267: offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2268: break;
2269: } else if (col > j + n && j >= cstart) {
2270: offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2271: break;
2272: }
2273: }
2274: if (j == ncols && ncols < A->cmap->N - n) {
2275: /* a hole is outside compressed Bcols */
2276: if (ncols == 0) {
2277: if (cstart) {
2278: offdiagIdx[r] = 0;
2279: } else offdiagIdx[r] = cend;
2280: } else { /* ncols > 0 */
2281: offdiagIdx[r] = cmap[ncols - 1] + 1;
2282: if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2283: }
2284: }
2285: }
2287: for (j = 0; j < ncols; j++) {
2288: if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) {
2289: offdiagA[r] = *ba;
2290: offdiagIdx[r] = cmap[*bj];
2291: }
2292: ba++;
2293: bj++;
2294: }
2295: }
2297: PetscCall(VecGetArrayWrite(v, &a));
2298: PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2299: for (r = 0; r < m; ++r) {
2300: if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) {
2301: a[r] = diagA[r];
2302: if (idx) idx[r] = cstart + diagIdx[r];
2303: } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) {
2304: a[r] = diagA[r];
2305: if (idx) {
2306: if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2307: idx[r] = cstart + diagIdx[r];
2308: } else idx[r] = offdiagIdx[r];
2309: }
2310: } else {
2311: a[r] = offdiagA[r];
2312: if (idx) idx[r] = offdiagIdx[r];
2313: }
2314: }
2315: PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2316: PetscCall(VecRestoreArrayWrite(v, &a));
2317: PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2318: PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2319: PetscCall(VecDestroy(&diagV));
2320: PetscCall(VecDestroy(&offdiagV));
2321: PetscCall(PetscFree2(diagIdx, offdiagIdx));
2322: PetscFunctionReturn(PETSC_SUCCESS);
2323: }
2325: static PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2326: {
2327: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data;
2328: PetscInt m = A->rmap->n, n = A->cmap->n;
2329: PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend;
2330: PetscInt *cmap = mat->garray;
2331: PetscInt *diagIdx, *offdiagIdx;
2332: Vec diagV, offdiagV;
2333: PetscScalar *a, *diagA, *offdiagA;
2334: const PetscScalar *ba, *bav;
2335: PetscInt r, j, col, ncols, *bi, *bj;
2336: Mat B = mat->B;
2337: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
2339: PetscFunctionBegin;
2340: /* When a process holds entire A and other processes have no entry */
2341: if (A->cmap->N == n) {
2342: PetscCall(VecGetArrayWrite(v, &diagA));
2343: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2344: PetscCall(MatGetRowMin(mat->A, diagV, idx));
2345: PetscCall(VecDestroy(&diagV));
2346: PetscCall(VecRestoreArrayWrite(v, &diagA));
2347: PetscFunctionReturn(PETSC_SUCCESS);
2348: } else if (n == 0) {
2349: if (m) {
2350: PetscCall(VecGetArrayWrite(v, &a));
2351: for (r = 0; r < m; r++) {
2352: a[r] = PETSC_MAX_REAL;
2353: if (idx) idx[r] = -1;
2354: }
2355: PetscCall(VecRestoreArrayWrite(v, &a));
2356: }
2357: PetscFunctionReturn(PETSC_SUCCESS);
2358: }
2360: PetscCall(PetscCalloc2(m, &diagIdx, m, &offdiagIdx));
2361: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2362: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2363: PetscCall(MatGetRowMin(mat->A, diagV, diagIdx));
2365: /* Get offdiagIdx[] for implicit 0.0 */
2366: PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2367: ba = bav;
2368: bi = b->i;
2369: bj = b->j;
2370: PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2371: for (r = 0; r < m; r++) {
2372: ncols = bi[r + 1] - bi[r];
2373: if (ncols == A->cmap->N - n) { /* Brow is dense */
2374: offdiagA[r] = *ba;
2375: offdiagIdx[r] = cmap[0];
2376: } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2377: offdiagA[r] = 0.0;
2379: /* Find first hole in the cmap */
2380: for (j = 0; j < ncols; j++) {
2381: col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2382: if (col > j && j < cstart) {
2383: offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2384: break;
2385: } else if (col > j + n && j >= cstart) {
2386: offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2387: break;
2388: }
2389: }
2390: if (j == ncols && ncols < A->cmap->N - n) {
2391: /* a hole is outside compressed Bcols */
2392: if (ncols == 0) {
2393: if (cstart) {
2394: offdiagIdx[r] = 0;
2395: } else offdiagIdx[r] = cend;
2396: } else { /* ncols > 0 */
2397: offdiagIdx[r] = cmap[ncols - 1] + 1;
2398: if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2399: }
2400: }
2401: }
2403: for (j = 0; j < ncols; j++) {
2404: if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) {
2405: offdiagA[r] = *ba;
2406: offdiagIdx[r] = cmap[*bj];
2407: }
2408: ba++;
2409: bj++;
2410: }
2411: }
2413: PetscCall(VecGetArrayWrite(v, &a));
2414: PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2415: for (r = 0; r < m; ++r) {
2416: if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) {
2417: a[r] = diagA[r];
2418: if (idx) idx[r] = cstart + diagIdx[r];
2419: } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2420: a[r] = diagA[r];
2421: if (idx) {
2422: if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2423: idx[r] = cstart + diagIdx[r];
2424: } else idx[r] = offdiagIdx[r];
2425: }
2426: } else {
2427: a[r] = offdiagA[r];
2428: if (idx) idx[r] = offdiagIdx[r];
2429: }
2430: }
2431: PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2432: PetscCall(VecRestoreArrayWrite(v, &a));
2433: PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2434: PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2435: PetscCall(VecDestroy(&diagV));
2436: PetscCall(VecDestroy(&offdiagV));
2437: PetscCall(PetscFree2(diagIdx, offdiagIdx));
2438: PetscFunctionReturn(PETSC_SUCCESS);
2439: }
2441: static PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2442: {
2443: Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data;
2444: PetscInt m = A->rmap->n, n = A->cmap->n;
2445: PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend;
2446: PetscInt *cmap = mat->garray;
2447: PetscInt *diagIdx, *offdiagIdx;
2448: Vec diagV, offdiagV;
2449: PetscScalar *a, *diagA, *offdiagA;
2450: const PetscScalar *ba, *bav;
2451: PetscInt r, j, col, ncols, *bi, *bj;
2452: Mat B = mat->B;
2453: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
2455: PetscFunctionBegin;
2456: /* When a process holds entire A and other processes have no entry */
2457: if (A->cmap->N == n) {
2458: PetscCall(VecGetArrayWrite(v, &diagA));
2459: PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2460: PetscCall(MatGetRowMax(mat->A, diagV, idx));
2461: PetscCall(VecDestroy(&diagV));
2462: PetscCall(VecRestoreArrayWrite(v, &diagA));
2463: PetscFunctionReturn(PETSC_SUCCESS);
2464: } else if (n == 0) {
2465: if (m) {
2466: PetscCall(VecGetArrayWrite(v, &a));
2467: for (r = 0; r < m; r++) {
2468: a[r] = PETSC_MIN_REAL;
2469: if (idx) idx[r] = -1;
2470: }
2471: PetscCall(VecRestoreArrayWrite(v, &a));
2472: }
2473: PetscFunctionReturn(PETSC_SUCCESS);
2474: }
2476: PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx));
2477: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2478: PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2479: PetscCall(MatGetRowMax(mat->A, diagV, diagIdx));
2481: /* Get offdiagIdx[] for implicit 0.0 */
2482: PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2483: ba = bav;
2484: bi = b->i;
2485: bj = b->j;
2486: PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2487: for (r = 0; r < m; r++) {
2488: ncols = bi[r + 1] - bi[r];
2489: if (ncols == A->cmap->N - n) { /* Brow is dense */
2490: offdiagA[r] = *ba;
2491: offdiagIdx[r] = cmap[0];
2492: } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2493: offdiagA[r] = 0.0;
2495: /* Find first hole in the cmap */
2496: for (j = 0; j < ncols; j++) {
2497: col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2498: if (col > j && j < cstart) {
2499: offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2500: break;
2501: } else if (col > j + n && j >= cstart) {
2502: offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2503: break;
2504: }
2505: }
2506: if (j == ncols && ncols < A->cmap->N - n) {
2507: /* a hole is outside compressed Bcols */
2508: if (ncols == 0) {
2509: if (cstart) {
2510: offdiagIdx[r] = 0;
2511: } else offdiagIdx[r] = cend;
2512: } else { /* ncols > 0 */
2513: offdiagIdx[r] = cmap[ncols - 1] + 1;
2514: if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2515: }
2516: }
2517: }
2519: for (j = 0; j < ncols; j++) {
2520: if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) {
2521: offdiagA[r] = *ba;
2522: offdiagIdx[r] = cmap[*bj];
2523: }
2524: ba++;
2525: bj++;
2526: }
2527: }
2529: PetscCall(VecGetArrayWrite(v, &a));
2530: PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2531: for (r = 0; r < m; ++r) {
2532: if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) {
2533: a[r] = diagA[r];
2534: if (idx) idx[r] = cstart + diagIdx[r];
2535: } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2536: a[r] = diagA[r];
2537: if (idx) {
2538: if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2539: idx[r] = cstart + diagIdx[r];
2540: } else idx[r] = offdiagIdx[r];
2541: }
2542: } else {
2543: a[r] = offdiagA[r];
2544: if (idx) idx[r] = offdiagIdx[r];
2545: }
2546: }
2547: PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2548: PetscCall(VecRestoreArrayWrite(v, &a));
2549: PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2550: PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2551: PetscCall(VecDestroy(&diagV));
2552: PetscCall(VecDestroy(&offdiagV));
2553: PetscCall(PetscFree2(diagIdx, offdiagIdx));
2554: PetscFunctionReturn(PETSC_SUCCESS);
2555: }
2557: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat, Mat *newmat)
2558: {
2559: Mat *dummy;
2561: PetscFunctionBegin;
2562: PetscCall(MatCreateSubMatrix_MPIAIJ_All(mat, MAT_DO_NOT_GET_VALUES, MAT_INITIAL_MATRIX, &dummy));
2563: *newmat = *dummy;
2564: PetscCall(PetscFree(dummy));
2565: PetscFunctionReturn(PETSC_SUCCESS);
2566: }
2568: static PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A, const PetscScalar **values)
2569: {
2570: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2572: PetscFunctionBegin;
2573: PetscCall(MatInvertBlockDiagonal(a->A, values));
2574: A->factorerrortype = a->A->factorerrortype;
2575: PetscFunctionReturn(PETSC_SUCCESS);
2576: }
2578: static PetscErrorCode MatSetRandom_MPIAIJ(Mat x, PetscRandom rctx)
2579: {
2580: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)x->data;
2582: PetscFunctionBegin;
2583: PetscCheck(x->assembled || x->preallocated, PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2584: PetscCall(MatSetRandom(aij->A, rctx));
2585: if (x->assembled) {
2586: PetscCall(MatSetRandom(aij->B, rctx));
2587: } else {
2588: PetscCall(MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B, x->cmap->rstart, x->cmap->rend, rctx));
2589: }
2590: PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
2591: PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
2592: PetscFunctionReturn(PETSC_SUCCESS);
2593: }
2595: static PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A, PetscBool sc)
2596: {
2597: PetscFunctionBegin;
2598: if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2599: else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ;
2600: PetscFunctionReturn(PETSC_SUCCESS);
2601: }
2603: /*@
2604: MatMPIAIJGetNumberNonzeros - gets the number of nonzeros in the matrix on this MPI rank
2606: Not Collective
2608: Input Parameter:
2609: . A - the matrix
2611: Output Parameter:
2612: . nz - the number of nonzeros
2614: Level: advanced
2616: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`
2617: @*/
2618: PetscErrorCode MatMPIAIJGetNumberNonzeros(Mat A, PetscCount *nz)
2619: {
2620: Mat_MPIAIJ *maij = (Mat_MPIAIJ *)A->data;
2621: Mat_SeqAIJ *aaij = (Mat_SeqAIJ *)maij->A->data, *baij = (Mat_SeqAIJ *)maij->B->data;
2622: PetscBool isaij;
2624: PetscFunctionBegin;
2625: PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATMPIAIJ, &isaij));
2626: PetscCheck(isaij, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Not for type %s", ((PetscObject)A)->type_name);
2627: *nz = aaij->i[A->rmap->n] + baij->i[A->rmap->n];
2628: PetscFunctionReturn(PETSC_SUCCESS);
2629: }
2631: /*@
2632: MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2634: Collective
2636: Input Parameters:
2637: + A - the matrix
2638: - sc - `PETSC_TRUE` indicates use the scalable algorithm (default is not to use the scalable algorithm)
2640: Level: advanced
2642: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`
2643: @*/
2644: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A, PetscBool sc)
2645: {
2646: PetscFunctionBegin;
2647: PetscTryMethod(A, "MatMPIAIJSetUseScalableIncreaseOverlap_C", (Mat, PetscBool), (A, sc));
2648: PetscFunctionReturn(PETSC_SUCCESS);
2649: }
2651: PetscErrorCode MatSetFromOptions_MPIAIJ(Mat A, PetscOptionItems PetscOptionsObject)
2652: {
2653: PetscBool sc = PETSC_FALSE, flg;
2655: PetscFunctionBegin;
2656: PetscOptionsHeadBegin(PetscOptionsObject, "MPIAIJ options");
2657: if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2658: PetscCall(PetscOptionsBool("-mat_increase_overlap_scalable", "Use a scalable algorithm to compute the overlap", "MatIncreaseOverlap", sc, &sc, &flg));
2659: if (flg) PetscCall(MatMPIAIJSetUseScalableIncreaseOverlap(A, sc));
2660: PetscOptionsHeadEnd();
2661: PetscFunctionReturn(PETSC_SUCCESS);
2662: }
2664: static PetscErrorCode MatShift_MPIAIJ(Mat Y, PetscScalar a)
2665: {
2666: Mat_MPIAIJ *maij = (Mat_MPIAIJ *)Y->data;
2667: Mat_SeqAIJ *aij = (Mat_SeqAIJ *)maij->A->data;
2669: PetscFunctionBegin;
2670: if (!Y->preallocated) {
2671: PetscCall(MatMPIAIJSetPreallocation(Y, 1, NULL, 0, NULL));
2672: } else if (!aij->nz) { /* It does not matter if diagonals of Y only partially lie in maij->A. We just need an estimated preallocation. */
2673: PetscInt nonew = aij->nonew;
2674: PetscCall(MatSeqAIJSetPreallocation(maij->A, 1, NULL));
2675: aij->nonew = nonew;
2676: }
2677: PetscCall(MatShift_Basic(Y, a));
2678: PetscFunctionReturn(PETSC_SUCCESS);
2679: }
2681: static PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A, PetscInt nblocks, const PetscInt *bsizes, PetscScalar *diag)
2682: {
2683: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2685: PetscFunctionBegin;
2686: PetscCall(MatInvertVariableBlockDiagonal(a->A, nblocks, bsizes, diag));
2687: PetscFunctionReturn(PETSC_SUCCESS);
2688: }
2690: static PetscErrorCode MatEliminateZeros_MPIAIJ(Mat A, PetscBool keep)
2691: {
2692: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2694: PetscFunctionBegin;
2695: PetscCall(MatEliminateZeros_SeqAIJ(a->A, keep)); // possibly keep zero diagonal coefficients
2696: PetscCall(MatEliminateZeros_SeqAIJ(a->B, PETSC_FALSE)); // never keep zero diagonal coefficients
2697: PetscFunctionReturn(PETSC_SUCCESS);
2698: }
2700: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2701: MatGetRow_MPIAIJ,
2702: MatRestoreRow_MPIAIJ,
2703: MatMult_MPIAIJ,
2704: /* 4*/ MatMultAdd_MPIAIJ,
2705: MatMultTranspose_MPIAIJ,
2706: MatMultTransposeAdd_MPIAIJ,
2707: NULL,
2708: NULL,
2709: NULL,
2710: /*10*/ NULL,
2711: NULL,
2712: NULL,
2713: MatSOR_MPIAIJ,
2714: MatTranspose_MPIAIJ,
2715: /*15*/ MatGetInfo_MPIAIJ,
2716: MatEqual_MPIAIJ,
2717: MatGetDiagonal_MPIAIJ,
2718: MatDiagonalScale_MPIAIJ,
2719: MatNorm_MPIAIJ,
2720: /*20*/ MatAssemblyBegin_MPIAIJ,
2721: MatAssemblyEnd_MPIAIJ,
2722: MatSetOption_MPIAIJ,
2723: MatZeroEntries_MPIAIJ,
2724: /*24*/ MatZeroRows_MPIAIJ,
2725: NULL,
2726: NULL,
2727: NULL,
2728: NULL,
2729: /*29*/ MatSetUp_MPI_Hash,
2730: NULL,
2731: NULL,
2732: MatGetDiagonalBlock_MPIAIJ,
2733: NULL,
2734: /*34*/ MatDuplicate_MPIAIJ,
2735: NULL,
2736: NULL,
2737: NULL,
2738: NULL,
2739: /*39*/ MatAXPY_MPIAIJ,
2740: MatCreateSubMatrices_MPIAIJ,
2741: MatIncreaseOverlap_MPIAIJ,
2742: MatGetValues_MPIAIJ,
2743: MatCopy_MPIAIJ,
2744: /*44*/ MatGetRowMax_MPIAIJ,
2745: MatScale_MPIAIJ,
2746: MatShift_MPIAIJ,
2747: MatDiagonalSet_MPIAIJ,
2748: MatZeroRowsColumns_MPIAIJ,
2749: /*49*/ MatSetRandom_MPIAIJ,
2750: MatGetRowIJ_MPIAIJ,
2751: MatRestoreRowIJ_MPIAIJ,
2752: NULL,
2753: NULL,
2754: /*54*/ MatFDColoringCreate_MPIXAIJ,
2755: NULL,
2756: MatSetUnfactored_MPIAIJ,
2757: MatPermute_MPIAIJ,
2758: NULL,
2759: /*59*/ MatCreateSubMatrix_MPIAIJ,
2760: MatDestroy_MPIAIJ,
2761: MatView_MPIAIJ,
2762: NULL,
2763: NULL,
2764: /*64*/ MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2765: NULL,
2766: NULL,
2767: NULL,
2768: MatGetRowMaxAbs_MPIAIJ,
2769: /*69*/ MatGetRowMinAbs_MPIAIJ,
2770: NULL,
2771: NULL,
2772: MatFDColoringApply_AIJ,
2773: MatSetFromOptions_MPIAIJ,
2774: MatFindZeroDiagonals_MPIAIJ,
2775: /*75*/ NULL,
2776: NULL,
2777: NULL,
2778: MatLoad_MPIAIJ,
2779: NULL,
2780: /*80*/ NULL,
2781: NULL,
2782: NULL,
2783: /*83*/ NULL,
2784: NULL,
2785: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2786: MatPtAPNumeric_MPIAIJ_MPIAIJ,
2787: NULL,
2788: NULL,
2789: /*89*/ MatBindToCPU_MPIAIJ,
2790: MatProductSetFromOptions_MPIAIJ,
2791: NULL,
2792: NULL,
2793: MatConjugate_MPIAIJ,
2794: /*94*/ NULL,
2795: MatSetValuesRow_MPIAIJ,
2796: MatRealPart_MPIAIJ,
2797: MatImaginaryPart_MPIAIJ,
2798: NULL,
2799: /*99*/ NULL,
2800: NULL,
2801: NULL,
2802: MatGetRowMin_MPIAIJ,
2803: NULL,
2804: /*104*/ MatGetSeqNonzeroStructure_MPIAIJ,
2805: NULL,
2806: MatGetGhosts_MPIAIJ,
2807: NULL,
2808: NULL,
2809: /*109*/ MatMultDiagonalBlock_MPIAIJ,
2810: NULL,
2811: NULL,
2812: NULL,
2813: MatGetMultiProcBlock_MPIAIJ,
2814: /*114*/ MatFindNonzeroRows_MPIAIJ,
2815: MatGetColumnReductions_MPIAIJ,
2816: MatInvertBlockDiagonal_MPIAIJ,
2817: MatInvertVariableBlockDiagonal_MPIAIJ,
2818: MatCreateSubMatricesMPI_MPIAIJ,
2819: /*119*/ NULL,
2820: MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2821: NULL,
2822: NULL,
2823: NULL,
2824: /*124*/ NULL,
2825: MatSetBlockSizes_MPIAIJ,
2826: NULL,
2827: MatFDColoringSetUp_MPIXAIJ,
2828: MatFindOffBlockDiagonalEntries_MPIAIJ,
2829: /*129*/ MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2830: NULL,
2831: NULL,
2832: NULL,
2833: MatCreateGraph_Simple_AIJ,
2834: /*134*/ NULL,
2835: MatEliminateZeros_MPIAIJ,
2836: MatGetRowSumAbs_MPIAIJ,
2837: NULL,
2838: NULL,
2839: /*139*/ NULL,
2840: MatCopyHashToXAIJ_MPI_Hash,
2841: MatGetCurrentMemType_MPIAIJ,
2842: NULL,
2843: MatADot_Default,
2844: /*144*/ MatANorm_Default,
2845: NULL,
2846: NULL};
2848: static PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2849: {
2850: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2852: PetscFunctionBegin;
2853: PetscCall(MatStoreValues(aij->A));
2854: PetscCall(MatStoreValues(aij->B));
2855: PetscFunctionReturn(PETSC_SUCCESS);
2856: }
2858: static PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2859: {
2860: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2862: PetscFunctionBegin;
2863: PetscCall(MatRetrieveValues(aij->A));
2864: PetscCall(MatRetrieveValues(aij->B));
2865: PetscFunctionReturn(PETSC_SUCCESS);
2866: }
2868: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
2869: {
2870: Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2871: PetscMPIInt size;
2873: PetscFunctionBegin;
2874: if (B->hash_active) {
2875: B->ops[0] = b->cops;
2876: B->hash_active = PETSC_FALSE;
2877: }
2878: PetscCall(PetscLayoutSetUp(B->rmap));
2879: PetscCall(PetscLayoutSetUp(B->cmap));
2881: #if defined(PETSC_USE_CTABLE)
2882: PetscCall(PetscHMapIDestroy(&b->colmap));
2883: #else
2884: PetscCall(PetscFree(b->colmap));
2885: #endif
2886: PetscCall(PetscFree(b->garray));
2887: PetscCall(VecDestroy(&b->lvec));
2888: PetscCall(VecScatterDestroy(&b->Mvctx));
2890: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
2892: MatSeqXAIJGetOptions_Private(b->B);
2893: PetscCall(MatDestroy(&b->B));
2894: PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
2895: PetscCall(MatSetSizes(b->B, B->rmap->n, size > 1 ? B->cmap->N : 0, B->rmap->n, size > 1 ? B->cmap->N : 0));
2896: PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
2897: PetscCall(MatSetType(b->B, MATSEQAIJ));
2898: MatSeqXAIJRestoreOptions_Private(b->B);
2900: MatSeqXAIJGetOptions_Private(b->A);
2901: PetscCall(MatDestroy(&b->A));
2902: PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
2903: PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
2904: PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
2905: PetscCall(MatSetType(b->A, MATSEQAIJ));
2906: MatSeqXAIJRestoreOptions_Private(b->A);
2908: PetscCall(MatSeqAIJSetPreallocation(b->A, d_nz, d_nnz));
2909: PetscCall(MatSeqAIJSetPreallocation(b->B, o_nz, o_nnz));
2910: B->preallocated = PETSC_TRUE;
2911: B->was_assembled = PETSC_FALSE;
2912: B->assembled = PETSC_FALSE;
2913: PetscFunctionReturn(PETSC_SUCCESS);
2914: }
2916: static PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2917: {
2918: Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2919: PetscBool ondiagreset, offdiagreset, memoryreset;
2921: PetscFunctionBegin;
2923: PetscCheck(B->insertmode == NOT_SET_VALUES, PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot reset preallocation after setting some values but not yet calling MatAssemblyBegin()/MatAssemblyEnd()");
2924: if (B->num_ass == 0) PetscFunctionReturn(PETSC_SUCCESS);
2926: PetscCall(MatResetPreallocation_SeqAIJ_Private(b->A, &ondiagreset));
2927: PetscCall(MatResetPreallocation_SeqAIJ_Private(b->B, &offdiagreset));
2928: memoryreset = (PetscBool)(ondiagreset || offdiagreset);
2929: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &memoryreset, 1, MPI_C_BOOL, MPI_LOR, PetscObjectComm((PetscObject)B)));
2930: if (!memoryreset) PetscFunctionReturn(PETSC_SUCCESS);
2932: PetscCall(PetscLayoutSetUp(B->rmap));
2933: PetscCall(PetscLayoutSetUp(B->cmap));
2934: PetscCheck(B->assembled || B->was_assembled, PetscObjectComm((PetscObject)B), PETSC_ERR_ARG_WRONGSTATE, "Should not need to reset preallocation if the matrix was never assembled");
2935: PetscCall(MatDisAssemble_MPIAIJ(B, PETSC_TRUE));
2936: PetscCall(VecScatterDestroy(&b->Mvctx));
2938: B->preallocated = PETSC_TRUE;
2939: B->was_assembled = PETSC_FALSE;
2940: B->assembled = PETSC_FALSE;
2941: /* Log that the state of this object has changed; this will help guarantee that preconditioners get re-setup */
2942: PetscCall(PetscObjectStateIncrease((PetscObject)B));
2943: PetscFunctionReturn(PETSC_SUCCESS);
2944: }
2946: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
2947: {
2948: Mat mat;
2949: Mat_MPIAIJ *a, *oldmat = (Mat_MPIAIJ *)matin->data;
2951: PetscFunctionBegin;
2952: *newmat = NULL;
2953: PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
2954: PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
2955: PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
2956: PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
2957: a = (Mat_MPIAIJ *)mat->data;
2959: mat->factortype = matin->factortype;
2960: mat->assembled = matin->assembled;
2961: mat->insertmode = NOT_SET_VALUES;
2963: a->size = oldmat->size;
2964: a->rank = oldmat->rank;
2965: a->donotstash = oldmat->donotstash;
2966: a->roworiented = oldmat->roworiented;
2967: a->rowindices = NULL;
2968: a->rowvalues = NULL;
2969: a->getrowactive = PETSC_FALSE;
2971: PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
2972: PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));
2973: if (matin->hash_active) PetscCall(MatSetUp(mat));
2974: else {
2975: mat->preallocated = matin->preallocated;
2976: if (oldmat->colmap) {
2977: #if defined(PETSC_USE_CTABLE)
2978: PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
2979: #else
2980: PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
2981: PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
2982: #endif
2983: } else a->colmap = NULL;
2984: if (oldmat->garray) {
2985: PetscInt len;
2986: len = oldmat->B->cmap->n;
2987: PetscCall(PetscMalloc1(len, &a->garray));
2988: if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
2989: } else a->garray = NULL;
2991: /* It may happen MatDuplicate is called with a non-assembled matrix
2992: In fact, MatDuplicate only requires the matrix to be preallocated
2993: This may happen inside a DMCreateMatrix_Shell */
2994: if (oldmat->lvec) PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
2995: if (oldmat->Mvctx) {
2996: a->Mvctx = oldmat->Mvctx;
2997: PetscCall(PetscObjectReference((PetscObject)oldmat->Mvctx));
2998: }
2999: PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
3000: PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
3001: }
3002: PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
3003: *newmat = mat;
3004: PetscFunctionReturn(PETSC_SUCCESS);
3005: }
3007: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
3008: {
3009: PetscBool isbinary, ishdf5;
3011: PetscFunctionBegin;
3014: /* force binary viewer to load .info file if it has not yet done so */
3015: PetscCall(PetscViewerSetUp(viewer));
3016: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
3017: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERHDF5, &ishdf5));
3018: if (isbinary) {
3019: PetscCall(MatLoad_MPIAIJ_Binary(newMat, viewer));
3020: } else if (ishdf5) {
3021: #if defined(PETSC_HAVE_HDF5)
3022: PetscCall(MatLoad_AIJ_HDF5(newMat, viewer));
3023: #else
3024: SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
3025: #endif
3026: } else {
3027: SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "Viewer type %s not yet supported for reading %s matrices", ((PetscObject)viewer)->type_name, ((PetscObject)newMat)->type_name);
3028: }
3029: PetscFunctionReturn(PETSC_SUCCESS);
3030: }
3032: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
3033: {
3034: PetscInt header[4], M, N, m, nz, rows, cols, sum, i;
3035: PetscInt *rowidxs, *colidxs;
3036: PetscScalar *matvals;
3038: PetscFunctionBegin;
3039: PetscCall(PetscViewerSetUp(viewer));
3041: /* read in matrix header */
3042: PetscCall(PetscViewerBinaryRead(viewer, header, 4, NULL, PETSC_INT));
3043: PetscCheck(header[0] == MAT_FILE_CLASSID, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Not a matrix object in file");
3044: M = header[1];
3045: N = header[2];
3046: nz = header[3];
3047: PetscCheck(M >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix row size (%" PetscInt_FMT ") in file is negative", M);
3048: PetscCheck(N >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix column size (%" PetscInt_FMT ") in file is negative", N);
3049: PetscCheck(nz >= 0, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix stored in special format on disk, cannot load as MPIAIJ");
3051: /* set block sizes from the viewer's .info file */
3052: PetscCall(MatLoad_Binary_BlockSizes(mat, viewer));
3053: /* set global sizes if not set already */
3054: if (mat->rmap->N < 0) mat->rmap->N = M;
3055: if (mat->cmap->N < 0) mat->cmap->N = N;
3056: PetscCall(PetscLayoutSetUp(mat->rmap));
3057: PetscCall(PetscLayoutSetUp(mat->cmap));
3059: /* check if the matrix sizes are correct */
3060: PetscCall(MatGetSize(mat, &rows, &cols));
3061: PetscCheck(M == rows && N == cols, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%" PetscInt_FMT ", %" PetscInt_FMT ") than the input matrix (%" PetscInt_FMT ", %" PetscInt_FMT ")", M, N, rows, cols);
3063: /* read in row lengths and build row indices */
3064: PetscCall(MatGetLocalSize(mat, &m, NULL));
3065: PetscCall(PetscMalloc1(m + 1, &rowidxs));
3066: PetscCall(PetscViewerBinaryReadAll(viewer, rowidxs + 1, m, PETSC_DECIDE, M, PETSC_INT));
3067: rowidxs[0] = 0;
3068: for (i = 0; i < m; i++) rowidxs[i + 1] += rowidxs[i];
3069: if (nz != PETSC_INT_MAX) {
3070: PetscCallMPI(MPIU_Allreduce(&rowidxs[m], &sum, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)viewer)));
3071: PetscCheck(sum == nz, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Inconsistent matrix data in file: nonzeros = %" PetscInt_FMT ", sum-row-lengths = %" PetscInt_FMT, nz, sum);
3072: }
3074: /* read in column indices and matrix values */
3075: PetscCall(PetscMalloc2(rowidxs[m], &colidxs, rowidxs[m], &matvals));
3076: PetscCall(PetscViewerBinaryReadAll(viewer, colidxs, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT));
3077: PetscCall(PetscViewerBinaryReadAll(viewer, matvals, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR));
3078: /* store matrix indices and values */
3079: PetscCall(MatMPIAIJSetPreallocationCSR(mat, rowidxs, colidxs, matvals));
3080: PetscCall(PetscFree(rowidxs));
3081: PetscCall(PetscFree2(colidxs, matvals));
3082: PetscFunctionReturn(PETSC_SUCCESS);
3083: }
3085: /* Not scalable because of ISAllGather() unless getting all columns. */
3086: static PetscErrorCode ISGetSeqIS_Private(Mat mat, IS iscol, IS *isseq)
3087: {
3088: IS iscol_local;
3089: PetscBool isstride;
3090: PetscMPIInt gisstride = 0;
3092: PetscFunctionBegin;
3093: /* check if we are grabbing all columns*/
3094: PetscCall(PetscObjectTypeCompare((PetscObject)iscol, ISSTRIDE, &isstride));
3096: if (isstride) {
3097: PetscInt start, len, mstart, mlen;
3098: PetscCall(ISStrideGetInfo(iscol, &start, NULL));
3099: PetscCall(ISGetLocalSize(iscol, &len));
3100: PetscCall(MatGetOwnershipRangeColumn(mat, &mstart, &mlen));
3101: if (mstart == start && mlen - mstart == len) gisstride = 1;
3102: }
3104: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &gisstride, 1, MPI_INT, MPI_MIN, PetscObjectComm((PetscObject)mat)));
3105: if (gisstride) {
3106: PetscInt N;
3107: PetscCall(MatGetSize(mat, NULL, &N));
3108: PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iscol_local));
3109: PetscCall(ISSetIdentity(iscol_local));
3110: PetscCall(PetscInfo(mat, "Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n"));
3111: } else {
3112: PetscInt cbs;
3113: PetscCall(ISGetBlockSize(iscol, &cbs));
3114: PetscCall(ISAllGather(iscol, &iscol_local));
3115: PetscCall(ISSetBlockSize(iscol_local, cbs));
3116: }
3118: *isseq = iscol_local;
3119: PetscFunctionReturn(PETSC_SUCCESS);
3120: }
3122: /*
3123: Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3124: (see MatCreateSubMatrix_MPIAIJ_nonscalable)
3126: Input Parameters:
3127: + mat - matrix
3128: . isrow - parallel row index set; its local indices are a subset of local columns of `mat`,
3129: i.e., mat->rstart <= isrow[i] < mat->rend
3130: - iscol - parallel column index set; its local indices are a subset of local columns of `mat`,
3131: i.e., mat->cstart <= iscol[i] < mat->cend
3133: Output Parameters:
3134: + isrow_d - sequential row index set for retrieving mat->A
3135: . iscol_d - sequential column index set for retrieving mat->A
3136: . iscol_o - sequential column index set for retrieving mat->B
3137: - garray - column map; garray[i] indicates global location of iscol_o[i] in `iscol`
3138: */
3139: static PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat, IS isrow, IS iscol, IS *isrow_d, IS *iscol_d, IS *iscol_o, PetscInt *garray[])
3140: {
3141: Vec x, cmap;
3142: const PetscInt *is_idx;
3143: PetscScalar *xarray, *cmaparray;
3144: PetscInt ncols, isstart, *idx, m, rstart, *cmap1, count;
3145: Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data;
3146: Mat B = a->B;
3147: Vec lvec = a->lvec, lcmap;
3148: PetscInt i, cstart, cend, Bn = B->cmap->N;
3149: MPI_Comm comm;
3150: VecScatter Mvctx = a->Mvctx;
3152: PetscFunctionBegin;
3153: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3154: PetscCall(ISGetLocalSize(iscol, &ncols));
3156: /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3157: PetscCall(MatCreateVecs(mat, &x, NULL));
3158: PetscCall(VecSet(x, -1.0));
3159: PetscCall(VecDuplicate(x, &cmap));
3160: PetscCall(VecSet(cmap, -1.0));
3162: /* Get start indices */
3163: PetscCallMPI(MPI_Scan(&ncols, &isstart, 1, MPIU_INT, MPI_SUM, comm));
3164: isstart -= ncols;
3165: PetscCall(MatGetOwnershipRangeColumn(mat, &cstart, &cend));
3167: PetscCall(ISGetIndices(iscol, &is_idx));
3168: PetscCall(VecGetArray(x, &xarray));
3169: PetscCall(VecGetArray(cmap, &cmaparray));
3170: PetscCall(PetscMalloc1(ncols, &idx));
3171: for (i = 0; i < ncols; i++) {
3172: xarray[is_idx[i] - cstart] = (PetscScalar)is_idx[i];
3173: cmaparray[is_idx[i] - cstart] = i + isstart; /* global index of iscol[i] */
3174: idx[i] = is_idx[i] - cstart; /* local index of iscol[i] */
3175: }
3176: PetscCall(VecRestoreArray(x, &xarray));
3177: PetscCall(VecRestoreArray(cmap, &cmaparray));
3178: PetscCall(ISRestoreIndices(iscol, &is_idx));
3180: /* Get iscol_d */
3181: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, iscol_d));
3182: PetscCall(ISGetBlockSize(iscol, &i));
3183: PetscCall(ISSetBlockSize(*iscol_d, i));
3185: /* Get isrow_d */
3186: PetscCall(ISGetLocalSize(isrow, &m));
3187: rstart = mat->rmap->rstart;
3188: PetscCall(PetscMalloc1(m, &idx));
3189: PetscCall(ISGetIndices(isrow, &is_idx));
3190: for (i = 0; i < m; i++) idx[i] = is_idx[i] - rstart;
3191: PetscCall(ISRestoreIndices(isrow, &is_idx));
3193: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_OWN_POINTER, isrow_d));
3194: PetscCall(ISGetBlockSize(isrow, &i));
3195: PetscCall(ISSetBlockSize(*isrow_d, i));
3197: /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3198: PetscCall(VecScatterBegin(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD));
3199: PetscCall(VecScatterEnd(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD));
3201: PetscCall(VecDuplicate(lvec, &lcmap));
3203: PetscCall(VecScatterBegin(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD));
3204: PetscCall(VecScatterEnd(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD));
3206: /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3207: /* off-process column indices */
3208: count = 0;
3209: PetscCall(PetscMalloc1(Bn, &idx));
3210: PetscCall(PetscMalloc1(Bn, &cmap1));
3212: PetscCall(VecGetArray(lvec, &xarray));
3213: PetscCall(VecGetArray(lcmap, &cmaparray));
3214: for (i = 0; i < Bn; i++) {
3215: if (PetscRealPart(xarray[i]) > -1.0) {
3216: idx[count] = i; /* local column index in off-diagonal part B */
3217: cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */
3218: count++;
3219: }
3220: }
3221: PetscCall(VecRestoreArray(lvec, &xarray));
3222: PetscCall(VecRestoreArray(lcmap, &cmaparray));
3224: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_COPY_VALUES, iscol_o));
3225: /* cannot ensure iscol_o has same blocksize as iscol! */
3227: PetscCall(PetscFree(idx));
3228: *garray = cmap1;
3230: PetscCall(VecDestroy(&x));
3231: PetscCall(VecDestroy(&cmap));
3232: PetscCall(VecDestroy(&lcmap));
3233: PetscFunctionReturn(PETSC_SUCCESS);
3234: }
3236: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3237: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *submat)
3238: {
3239: Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data, *asub;
3240: Mat M = NULL;
3241: MPI_Comm comm;
3242: IS iscol_d, isrow_d, iscol_o;
3243: Mat Asub = NULL, Bsub = NULL;
3244: PetscInt n, count, M_size, N_size;
3246: PetscFunctionBegin;
3247: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3249: if (call == MAT_REUSE_MATRIX) {
3250: /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3251: PetscCall(PetscObjectQuery((PetscObject)*submat, "isrow_d", (PetscObject *)&isrow_d));
3252: PetscCheck(isrow_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "isrow_d passed in was not used before, cannot reuse");
3254: PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_d", (PetscObject *)&iscol_d));
3255: PetscCheck(iscol_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_d passed in was not used before, cannot reuse");
3257: PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_o", (PetscObject *)&iscol_o));
3258: PetscCheck(iscol_o, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_o passed in was not used before, cannot reuse");
3260: /* Update diagonal and off-diagonal portions of submat */
3261: asub = (Mat_MPIAIJ *)(*submat)->data;
3262: PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->A));
3263: PetscCall(ISGetLocalSize(iscol_o, &n));
3264: if (n) PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->B));
3265: PetscCall(MatAssemblyBegin(*submat, MAT_FINAL_ASSEMBLY));
3266: PetscCall(MatAssemblyEnd(*submat, MAT_FINAL_ASSEMBLY));
3268: } else { /* call == MAT_INITIAL_MATRIX) */
3269: PetscInt *garray, *garray_compact;
3270: PetscInt BsubN;
3272: /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3273: PetscCall(ISGetSeqIS_SameColDist_Private(mat, isrow, iscol, &isrow_d, &iscol_d, &iscol_o, &garray));
3275: /* Create local submatrices Asub and Bsub */
3276: PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Asub));
3277: PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Bsub));
3279: // Compact garray so its not of size Bn
3280: PetscCall(ISGetSize(iscol_o, &count));
3281: PetscCall(PetscMalloc1(count, &garray_compact));
3282: PetscCall(PetscArraycpy(garray_compact, garray, count));
3284: /* Create submatrix M */
3285: PetscCall(ISGetSize(isrow, &M_size));
3286: PetscCall(ISGetSize(iscol, &N_size));
3287: PetscCall(MatCreateMPIAIJWithSeqAIJ(comm, M_size, N_size, Asub, Bsub, garray_compact, &M));
3289: /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3290: asub = (Mat_MPIAIJ *)M->data;
3292: PetscCall(ISGetLocalSize(iscol_o, &BsubN));
3293: n = asub->B->cmap->N;
3294: if (BsubN > n) {
3295: /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3296: const PetscInt *idx;
3297: PetscInt i, j, *idx_new, *subgarray = asub->garray;
3298: PetscCall(PetscInfo(M, "submatrix Bn %" PetscInt_FMT " != BsubN %" PetscInt_FMT ", update iscol_o\n", n, BsubN));
3300: PetscCall(PetscMalloc1(n, &idx_new));
3301: j = 0;
3302: PetscCall(ISGetIndices(iscol_o, &idx));
3303: for (i = 0; i < n; i++) {
3304: if (j >= BsubN) break;
3305: while (subgarray[i] > garray[j]) j++;
3307: PetscCheck(subgarray[i] == garray[j], PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "subgarray[%" PetscInt_FMT "]=%" PetscInt_FMT " cannot < garray[%" PetscInt_FMT "]=%" PetscInt_FMT, i, subgarray[i], j, garray[j]);
3308: idx_new[i] = idx[j++];
3309: }
3310: PetscCall(ISRestoreIndices(iscol_o, &idx));
3312: PetscCall(ISDestroy(&iscol_o));
3313: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, n, idx_new, PETSC_OWN_POINTER, &iscol_o));
3315: } else PetscCheck(BsubN >= n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Columns of Bsub (%" PetscInt_FMT ") cannot be smaller than B's (%" PetscInt_FMT ")", BsubN, asub->B->cmap->N);
3317: PetscCall(PetscFree(garray));
3318: *submat = M;
3320: /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3321: PetscCall(PetscObjectCompose((PetscObject)M, "isrow_d", (PetscObject)isrow_d));
3322: PetscCall(ISDestroy(&isrow_d));
3324: PetscCall(PetscObjectCompose((PetscObject)M, "iscol_d", (PetscObject)iscol_d));
3325: PetscCall(ISDestroy(&iscol_d));
3327: PetscCall(PetscObjectCompose((PetscObject)M, "iscol_o", (PetscObject)iscol_o));
3328: PetscCall(ISDestroy(&iscol_o));
3329: }
3330: PetscFunctionReturn(PETSC_SUCCESS);
3331: }
3333: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *newmat)
3334: {
3335: IS iscol_local = NULL, isrow_d;
3336: PetscInt csize;
3337: PetscInt n, i, j, start, end;
3338: PetscBool sameRowDist = PETSC_FALSE, sameDist[2], tsameDist[2];
3339: MPI_Comm comm;
3341: PetscFunctionBegin;
3342: /* If isrow has same processor distribution as mat,
3343: call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3344: if (call == MAT_REUSE_MATRIX) {
3345: PetscCall(PetscObjectQuery((PetscObject)*newmat, "isrow_d", (PetscObject *)&isrow_d));
3346: if (isrow_d) {
3347: sameRowDist = PETSC_TRUE;
3348: tsameDist[1] = PETSC_TRUE; /* sameColDist */
3349: } else {
3350: PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_local));
3351: if (iscol_local) {
3352: sameRowDist = PETSC_TRUE;
3353: tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3354: }
3355: }
3356: } else {
3357: /* Check if isrow has same processor distribution as mat */
3358: sameDist[0] = PETSC_FALSE;
3359: PetscCall(ISGetLocalSize(isrow, &n));
3360: if (!n) {
3361: sameDist[0] = PETSC_TRUE;
3362: } else {
3363: PetscCall(ISGetMinMax(isrow, &i, &j));
3364: PetscCall(MatGetOwnershipRange(mat, &start, &end));
3365: if (i >= start && j < end) sameDist[0] = PETSC_TRUE;
3366: }
3368: /* Check if iscol has same processor distribution as mat */
3369: sameDist[1] = PETSC_FALSE;
3370: PetscCall(ISGetLocalSize(iscol, &n));
3371: if (!n) {
3372: sameDist[1] = PETSC_TRUE;
3373: } else {
3374: PetscCall(ISGetMinMax(iscol, &i, &j));
3375: PetscCall(MatGetOwnershipRangeColumn(mat, &start, &end));
3376: if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3377: }
3379: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3380: PetscCallMPI(MPIU_Allreduce(&sameDist, &tsameDist, 2, MPI_C_BOOL, MPI_LAND, comm));
3381: sameRowDist = tsameDist[0];
3382: }
3384: if (sameRowDist) {
3385: if (tsameDist[1]) { /* sameRowDist & sameColDist */
3386: /* isrow and iscol have same processor distribution as mat */
3387: PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat, isrow, iscol, call, newmat));
3388: PetscFunctionReturn(PETSC_SUCCESS);
3389: } else { /* sameRowDist */
3390: /* isrow has same processor distribution as mat */
3391: if (call == MAT_INITIAL_MATRIX) {
3392: PetscBool sorted;
3393: PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local));
3394: PetscCall(ISGetLocalSize(iscol_local, &n)); /* local size of iscol_local = global columns of newmat */
3395: PetscCall(ISGetSize(iscol, &i));
3396: PetscCheck(n == i, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "n %" PetscInt_FMT " != size of iscol %" PetscInt_FMT, n, i);
3398: PetscCall(ISSorted(iscol_local, &sorted));
3399: if (sorted) {
3400: /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3401: PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, iscol_local, MAT_INITIAL_MATRIX, newmat));
3402: PetscFunctionReturn(PETSC_SUCCESS);
3403: }
3404: } else { /* call == MAT_REUSE_MATRIX */
3405: IS iscol_sub;
3406: PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub));
3407: if (iscol_sub) {
3408: PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, NULL, call, newmat));
3409: PetscFunctionReturn(PETSC_SUCCESS);
3410: }
3411: }
3412: }
3413: }
3415: /* General case: iscol -> iscol_local which has global size of iscol */
3416: if (call == MAT_REUSE_MATRIX) {
3417: PetscCall(PetscObjectQuery((PetscObject)*newmat, "ISAllGather", (PetscObject *)&iscol_local));
3418: PetscCheck(iscol_local, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3419: } else {
3420: if (!iscol_local) PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local));
3421: }
3423: PetscCall(ISGetLocalSize(iscol, &csize));
3424: PetscCall(MatCreateSubMatrix_MPIAIJ_nonscalable(mat, isrow, iscol_local, csize, call, newmat));
3426: if (call == MAT_INITIAL_MATRIX) {
3427: PetscCall(PetscObjectCompose((PetscObject)*newmat, "ISAllGather", (PetscObject)iscol_local));
3428: PetscCall(ISDestroy(&iscol_local));
3429: }
3430: PetscFunctionReturn(PETSC_SUCCESS);
3431: }
3433: /*@C
3434: MatCreateMPIAIJWithSeqAIJ - creates a `MATMPIAIJ` matrix using `MATSEQAIJ` matrices that contain the "diagonal"
3435: and "off-diagonal" part of the matrix in CSR format.
3437: Collective
3439: Input Parameters:
3440: + comm - MPI communicator
3441: . M - the global row size
3442: . N - the global column size
3443: . A - "diagonal" portion of matrix
3444: . B - if garray is `NULL`, B should be the offdiag matrix using global col ids and of size N - if garray is not `NULL`, B should be the offdiag matrix using local col ids and of size garray
3445: - garray - either `NULL` or the global index of `B` columns. If not `NULL`, it should be allocated by `PetscMalloc1()` and will be owned by `mat` thereafter.
3447: Output Parameter:
3448: . mat - the matrix, with input `A` as its local diagonal matrix
3450: Level: advanced
3452: Notes:
3453: See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3455: `A` and `B` becomes part of output mat. The user cannot use `A` and `B` anymore.
3457: If `garray` is `NULL`, `B` will be compacted to use local indices. In this sense, `B`'s sparsity pattern (nonzerostate) will be changed. If `B` is a device matrix, we need to somehow also update
3458: `B`'s copy on device. We do so by increasing `B`'s nonzerostate. In use of `B` on device, device matrix types should detect this change (ref. internal routines `MatSeqAIJCUSPARSECopyToGPU()` or
3459: `MatAssemblyEnd_SeqAIJKokkos()`) and will just destroy and then recreate the device copy of `B`. It is not optimal, but is easy to implement and less hacky. To avoid this overhead, try to compute `garray`
3460: yourself, see algorithms in the private function `MatSetUpMultiply_MPIAIJ()`.
3462: The `NULL`-ness of `garray` doesn't need to be collective, in other words, `garray` can be `NULL` on some processes while not on others.
3464: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateMPIAIJWithSplitArrays()`
3465: @*/
3466: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm, PetscInt M, PetscInt N, Mat A, Mat B, PetscInt *garray, Mat *mat)
3467: {
3468: PetscInt m, n;
3469: MatType mpi_mat_type;
3470: Mat_MPIAIJ *mpiaij;
3471: Mat C;
3473: PetscFunctionBegin;
3474: PetscCall(MatCreate(comm, &C));
3475: PetscCall(MatGetSize(A, &m, &n));
3476: PetscCheck(m == B->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Am %" PetscInt_FMT " != Bm %" PetscInt_FMT, m, B->rmap->N);
3477: PetscCheck(A->rmap->bs == B->rmap->bs, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "A row bs %" PetscInt_FMT " != B row bs %" PetscInt_FMT, A->rmap->bs, B->rmap->bs);
3479: PetscCall(MatSetSizes(C, m, n, M, N));
3480: /* Determine the type of MPI matrix that should be created from the type of matrix A, which holds the "diagonal" portion. */
3481: PetscCall(MatGetMPIMatType_Private(A, &mpi_mat_type));
3482: PetscCall(MatSetType(C, mpi_mat_type));
3483: if (!garray) {
3484: const PetscScalar *ba;
3486: B->nonzerostate++;
3487: PetscCall(MatSeqAIJGetArrayRead(B, &ba)); /* Since we will destroy B's device copy, we need to make sure the host copy is up to date */
3488: PetscCall(MatSeqAIJRestoreArrayRead(B, &ba));
3489: }
3491: PetscCall(MatSetBlockSizes(C, A->rmap->bs, A->cmap->bs));
3492: PetscCall(PetscLayoutSetUp(C->rmap));
3493: PetscCall(PetscLayoutSetUp(C->cmap));
3495: mpiaij = (Mat_MPIAIJ *)C->data;
3496: mpiaij->A = A;
3497: mpiaij->B = B;
3498: mpiaij->garray = garray;
3499: C->preallocated = PETSC_TRUE;
3500: C->nooffprocentries = PETSC_TRUE; /* See MatAssemblyBegin_MPIAIJ. In effect, making MatAssemblyBegin a nop */
3502: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
3503: PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
3504: /* MatAssemblyEnd is critical here. It sets mat->offloadmask according to A and B's, and
3505: also gets mpiaij->B compacted (if garray is NULL), with its col ids and size reduced
3506: */
3507: PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
3508: PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE));
3509: PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
3510: *mat = C;
3511: PetscFunctionReturn(PETSC_SUCCESS);
3512: }
3514: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, PetscBool, Mat *);
3516: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat, IS isrow, IS iscol, IS iscol_local, MatReuse call, Mat *newmat)
3517: {
3518: PetscInt i, m, n, rstart, row, rend, nz, j, bs, cbs;
3519: PetscInt *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal;
3520: Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data;
3521: Mat M, Msub, B = a->B;
3522: MatScalar *aa;
3523: Mat_SeqAIJ *aij;
3524: PetscInt *garray = a->garray, *colsub, Ncols;
3525: PetscInt count, Bn = B->cmap->N, cstart = mat->cmap->rstart, cend = mat->cmap->rend;
3526: IS iscol_sub, iscmap;
3527: const PetscInt *is_idx, *cmap;
3528: PetscBool allcolumns = PETSC_FALSE;
3529: MPI_Comm comm;
3531: PetscFunctionBegin;
3532: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3533: if (call == MAT_REUSE_MATRIX) {
3534: PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub));
3535: PetscCheck(iscol_sub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "SubIScol passed in was not used before, cannot reuse");
3536: PetscCall(ISGetLocalSize(iscol_sub, &count));
3538: PetscCall(PetscObjectQuery((PetscObject)*newmat, "Subcmap", (PetscObject *)&iscmap));
3539: PetscCheck(iscmap, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Subcmap passed in was not used before, cannot reuse");
3541: PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Msub));
3542: PetscCheck(Msub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3544: PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_REUSE_MATRIX, PETSC_FALSE, &Msub));
3546: } else { /* call == MAT_INITIAL_MATRIX) */
3547: PetscBool flg;
3549: PetscCall(ISGetLocalSize(iscol, &n));
3550: PetscCall(ISGetSize(iscol, &Ncols));
3552: /* (1) iscol -> nonscalable iscol_local */
3553: /* Check for special case: each processor gets entire matrix columns */
3554: PetscCall(ISIdentity(iscol_local, &flg));
3555: if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3556: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
3557: if (allcolumns) {
3558: iscol_sub = iscol_local;
3559: PetscCall(PetscObjectReference((PetscObject)iscol_local));
3560: PetscCall(ISCreateStride(PETSC_COMM_SELF, n, 0, 1, &iscmap));
3562: } else {
3563: /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3564: PetscInt *idx, *cmap1, k;
3565: PetscCall(PetscMalloc1(Ncols, &idx));
3566: PetscCall(PetscMalloc1(Ncols, &cmap1));
3567: PetscCall(ISGetIndices(iscol_local, &is_idx));
3568: count = 0;
3569: k = 0;
3570: for (i = 0; i < Ncols; i++) {
3571: j = is_idx[i];
3572: if (j >= cstart && j < cend) {
3573: /* diagonal part of mat */
3574: idx[count] = j;
3575: cmap1[count++] = i; /* column index in submat */
3576: } else if (Bn) {
3577: /* off-diagonal part of mat */
3578: if (j == garray[k]) {
3579: idx[count] = j;
3580: cmap1[count++] = i; /* column index in submat */
3581: } else if (j > garray[k]) {
3582: while (j > garray[k] && k < Bn - 1) k++;
3583: if (j == garray[k]) {
3584: idx[count] = j;
3585: cmap1[count++] = i; /* column index in submat */
3586: }
3587: }
3588: }
3589: }
3590: PetscCall(ISRestoreIndices(iscol_local, &is_idx));
3592: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_OWN_POINTER, &iscol_sub));
3593: PetscCall(ISGetBlockSize(iscol, &cbs));
3594: PetscCall(ISSetBlockSize(iscol_sub, cbs));
3596: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local), count, cmap1, PETSC_OWN_POINTER, &iscmap));
3597: }
3599: /* (3) Create sequential Msub */
3600: PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_INITIAL_MATRIX, allcolumns, &Msub));
3601: }
3603: PetscCall(ISGetLocalSize(iscol_sub, &count));
3604: aij = (Mat_SeqAIJ *)Msub->data;
3605: ii = aij->i;
3606: PetscCall(ISGetIndices(iscmap, &cmap));
3608: /*
3609: m - number of local rows
3610: Ncols - number of columns (same on all processors)
3611: rstart - first row in new global matrix generated
3612: */
3613: PetscCall(MatGetSize(Msub, &m, NULL));
3615: if (call == MAT_INITIAL_MATRIX) {
3616: /* (4) Create parallel newmat */
3617: PetscMPIInt rank, size;
3618: PetscInt csize;
3620: PetscCallMPI(MPI_Comm_size(comm, &size));
3621: PetscCallMPI(MPI_Comm_rank(comm, &rank));
3623: /*
3624: Determine the number of non-zeros in the diagonal and off-diagonal
3625: portions of the matrix in order to do correct preallocation
3626: */
3628: /* first get start and end of "diagonal" columns */
3629: PetscCall(ISGetLocalSize(iscol, &csize));
3630: if (csize == PETSC_DECIDE) {
3631: PetscCall(ISGetSize(isrow, &mglobal));
3632: if (mglobal == Ncols) { /* square matrix */
3633: nlocal = m;
3634: } else {
3635: nlocal = Ncols / size + ((Ncols % size) > rank);
3636: }
3637: } else {
3638: nlocal = csize;
3639: }
3640: PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm));
3641: rstart = rend - nlocal;
3642: PetscCheck(rank != size - 1 || rend == Ncols, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, Ncols);
3644: /* next, compute all the lengths */
3645: jj = aij->j;
3646: PetscCall(PetscMalloc1(2 * m + 1, &dlens));
3647: olens = dlens + m;
3648: for (i = 0; i < m; i++) {
3649: jend = ii[i + 1] - ii[i];
3650: olen = 0;
3651: dlen = 0;
3652: for (j = 0; j < jend; j++) {
3653: if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3654: else dlen++;
3655: jj++;
3656: }
3657: olens[i] = olen;
3658: dlens[i] = dlen;
3659: }
3661: PetscCall(ISGetBlockSize(isrow, &bs));
3662: PetscCall(ISGetBlockSize(iscol, &cbs));
3664: PetscCall(MatCreate(comm, &M));
3665: PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, Ncols));
3666: PetscCall(MatSetBlockSizes(M, bs, cbs));
3667: PetscCall(MatSetType(M, ((PetscObject)mat)->type_name));
3668: PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens));
3669: PetscCall(PetscFree(dlens));
3671: } else { /* call == MAT_REUSE_MATRIX */
3672: M = *newmat;
3673: PetscCall(MatGetLocalSize(M, &i, NULL));
3674: PetscCheck(i == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request");
3675: PetscCall(MatZeroEntries(M));
3676: /*
3677: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3678: rather than the slower MatSetValues().
3679: */
3680: M->was_assembled = PETSC_TRUE;
3681: M->assembled = PETSC_FALSE;
3682: }
3684: /* (5) Set values of Msub to *newmat */
3685: PetscCall(PetscMalloc1(count, &colsub));
3686: PetscCall(MatGetOwnershipRange(M, &rstart, NULL));
3688: jj = aij->j;
3689: PetscCall(MatSeqAIJGetArrayRead(Msub, (const PetscScalar **)&aa));
3690: for (i = 0; i < m; i++) {
3691: row = rstart + i;
3692: nz = ii[i + 1] - ii[i];
3693: for (j = 0; j < nz; j++) colsub[j] = cmap[jj[j]];
3694: PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, colsub, aa, INSERT_VALUES));
3695: jj += nz;
3696: aa += nz;
3697: }
3698: PetscCall(MatSeqAIJRestoreArrayRead(Msub, (const PetscScalar **)&aa));
3699: PetscCall(ISRestoreIndices(iscmap, &cmap));
3701: PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY));
3702: PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY));
3704: PetscCall(PetscFree(colsub));
3706: /* save Msub, iscol_sub and iscmap used in processor for next request */
3707: if (call == MAT_INITIAL_MATRIX) {
3708: *newmat = M;
3709: PetscCall(PetscObjectCompose((PetscObject)*newmat, "SubMatrix", (PetscObject)Msub));
3710: PetscCall(MatDestroy(&Msub));
3712: PetscCall(PetscObjectCompose((PetscObject)*newmat, "SubIScol", (PetscObject)iscol_sub));
3713: PetscCall(ISDestroy(&iscol_sub));
3715: PetscCall(PetscObjectCompose((PetscObject)*newmat, "Subcmap", (PetscObject)iscmap));
3716: PetscCall(ISDestroy(&iscmap));
3718: if (iscol_local) {
3719: PetscCall(PetscObjectCompose((PetscObject)*newmat, "ISAllGather", (PetscObject)iscol_local));
3720: PetscCall(ISDestroy(&iscol_local));
3721: }
3722: }
3723: PetscFunctionReturn(PETSC_SUCCESS);
3724: }
3726: /*
3727: Not great since it makes two copies of the submatrix, first an SeqAIJ
3728: in local and then by concatenating the local matrices the end result.
3729: Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3731: This requires a sequential iscol with all indices.
3732: */
3733: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat, IS isrow, IS iscol, PetscInt csize, MatReuse call, Mat *newmat)
3734: {
3735: PetscMPIInt rank, size;
3736: PetscInt i, m, n, rstart, row, rend, nz, *cwork, j, bs, cbs;
3737: PetscInt *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal;
3738: Mat M, Mreuse;
3739: MatScalar *aa, *vwork;
3740: MPI_Comm comm;
3741: Mat_SeqAIJ *aij;
3742: PetscBool colflag, allcolumns = PETSC_FALSE;
3744: PetscFunctionBegin;
3745: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3746: PetscCallMPI(MPI_Comm_rank(comm, &rank));
3747: PetscCallMPI(MPI_Comm_size(comm, &size));
3749: /* Check for special case: each processor gets entire matrix columns */
3750: PetscCall(ISIdentity(iscol, &colflag));
3751: PetscCall(ISGetLocalSize(iscol, &n));
3752: if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3753: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPI_C_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
3755: if (call == MAT_REUSE_MATRIX) {
3756: PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Mreuse));
3757: PetscCheck(Mreuse, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3758: PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_REUSE_MATRIX, allcolumns, &Mreuse));
3759: } else {
3760: PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_INITIAL_MATRIX, allcolumns, &Mreuse));
3761: }
3763: /*
3764: m - number of local rows
3765: n - number of columns (same on all processors)
3766: rstart - first row in new global matrix generated
3767: */
3768: PetscCall(MatGetSize(Mreuse, &m, &n));
3769: PetscCall(MatGetBlockSizes(Mreuse, &bs, &cbs));
3770: if (call == MAT_INITIAL_MATRIX) {
3771: aij = (Mat_SeqAIJ *)Mreuse->data;
3772: ii = aij->i;
3773: jj = aij->j;
3775: /*
3776: Determine the number of non-zeros in the diagonal and off-diagonal
3777: portions of the matrix in order to do correct preallocation
3778: */
3780: /* first get start and end of "diagonal" columns */
3781: if (csize == PETSC_DECIDE) {
3782: PetscCall(ISGetSize(isrow, &mglobal));
3783: if (mglobal == n) { /* square matrix */
3784: nlocal = m;
3785: } else {
3786: nlocal = n / size + ((n % size) > rank);
3787: }
3788: } else {
3789: nlocal = csize;
3790: }
3791: PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm));
3792: rstart = rend - nlocal;
3793: PetscCheck(rank != size - 1 || rend == n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, n);
3795: /* next, compute all the lengths */
3796: PetscCall(PetscMalloc1(2 * m + 1, &dlens));
3797: olens = dlens + m;
3798: for (i = 0; i < m; i++) {
3799: jend = ii[i + 1] - ii[i];
3800: olen = 0;
3801: dlen = 0;
3802: for (j = 0; j < jend; j++) {
3803: if (*jj < rstart || *jj >= rend) olen++;
3804: else dlen++;
3805: jj++;
3806: }
3807: olens[i] = olen;
3808: dlens[i] = dlen;
3809: }
3810: PetscCall(MatCreate(comm, &M));
3811: PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, n));
3812: PetscCall(MatSetBlockSizes(M, bs, cbs));
3813: PetscCall(MatSetType(M, ((PetscObject)mat)->type_name));
3814: PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens));
3815: PetscCall(PetscFree(dlens));
3816: } else {
3817: PetscInt ml, nl;
3819: M = *newmat;
3820: PetscCall(MatGetLocalSize(M, &ml, &nl));
3821: PetscCheck(ml == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request");
3822: PetscCall(MatZeroEntries(M));
3823: /*
3824: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3825: rather than the slower MatSetValues().
3826: */
3827: M->was_assembled = PETSC_TRUE;
3828: M->assembled = PETSC_FALSE;
3829: }
3830: PetscCall(MatGetOwnershipRange(M, &rstart, &rend));
3831: aij = (Mat_SeqAIJ *)Mreuse->data;
3832: ii = aij->i;
3833: jj = aij->j;
3835: /* trigger copy to CPU if needed */
3836: PetscCall(MatSeqAIJGetArrayRead(Mreuse, (const PetscScalar **)&aa));
3837: for (i = 0; i < m; i++) {
3838: row = rstart + i;
3839: nz = ii[i + 1] - ii[i];
3840: cwork = jj;
3841: jj = PetscSafePointerPlusOffset(jj, nz);
3842: vwork = aa;
3843: aa = PetscSafePointerPlusOffset(aa, nz);
3844: PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, cwork, vwork, INSERT_VALUES));
3845: }
3846: PetscCall(MatSeqAIJRestoreArrayRead(Mreuse, (const PetscScalar **)&aa));
3848: PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY));
3849: PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY));
3850: *newmat = M;
3852: /* save submatrix used in processor for next request */
3853: if (call == MAT_INITIAL_MATRIX) {
3854: PetscCall(PetscObjectCompose((PetscObject)M, "SubMatrix", (PetscObject)Mreuse));
3855: PetscCall(MatDestroy(&Mreuse));
3856: }
3857: PetscFunctionReturn(PETSC_SUCCESS);
3858: }
3860: static PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[])
3861: {
3862: PetscInt m, cstart, cend, j, nnz, i, d, *ld;
3863: PetscInt *d_nnz, *o_nnz, nnz_max = 0, rstart, ii, irstart;
3864: const PetscInt *JJ;
3865: PetscBool nooffprocentries;
3866: Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)B->data;
3868: PetscFunctionBegin;
3869: PetscCall(PetscLayoutSetUp(B->rmap));
3870: PetscCall(PetscLayoutSetUp(B->cmap));
3871: m = B->rmap->n;
3872: cstart = B->cmap->rstart;
3873: cend = B->cmap->rend;
3874: rstart = B->rmap->rstart;
3875: irstart = Ii[0];
3877: PetscCall(PetscCalloc2(m, &d_nnz, m, &o_nnz));
3879: if (PetscDefined(USE_DEBUG)) {
3880: for (i = 0; i < m; i++) {
3881: nnz = Ii[i + 1] - Ii[i];
3882: JJ = PetscSafePointerPlusOffset(J, Ii[i] - irstart);
3883: PetscCheck(nnz >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Local row %" PetscInt_FMT " has a negative %" PetscInt_FMT " number of columns", i, nnz);
3884: PetscCheck(!nnz || !(JJ[0] < 0), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " starts with negative column index %" PetscInt_FMT, i, JJ[0]);
3885: PetscCheck(!nnz || !(JJ[nnz - 1] >= B->cmap->N), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " ends with too large a column index %" PetscInt_FMT " (max allowed %" PetscInt_FMT ")", i, JJ[nnz - 1], B->cmap->N);
3886: }
3887: }
3889: for (i = 0; i < m; i++) {
3890: nnz = Ii[i + 1] - Ii[i];
3891: JJ = PetscSafePointerPlusOffset(J, Ii[i] - irstart);
3892: nnz_max = PetscMax(nnz_max, nnz);
3893: d = 0;
3894: for (j = 0; j < nnz; j++) {
3895: if (cstart <= JJ[j] && JJ[j] < cend) d++;
3896: }
3897: d_nnz[i] = d;
3898: o_nnz[i] = nnz - d;
3899: }
3900: PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz));
3901: PetscCall(PetscFree2(d_nnz, o_nnz));
3903: for (i = 0; i < m; i++) {
3904: ii = i + rstart;
3905: PetscCall(MatSetValues_MPIAIJ(B, 1, &ii, Ii[i + 1] - Ii[i], PetscSafePointerPlusOffset(J, Ii[i] - irstart), PetscSafePointerPlusOffset(v, Ii[i] - irstart), INSERT_VALUES));
3906: }
3907: nooffprocentries = B->nooffprocentries;
3908: B->nooffprocentries = PETSC_TRUE;
3909: PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
3910: PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
3911: B->nooffprocentries = nooffprocentries;
3913: /* count number of entries below block diagonal */
3914: PetscCall(PetscFree(Aij->ld));
3915: PetscCall(PetscCalloc1(m, &ld));
3916: Aij->ld = ld;
3917: for (i = 0; i < m; i++) {
3918: nnz = Ii[i + 1] - Ii[i];
3919: j = 0;
3920: while (j < nnz && J[j] < cstart) j++;
3921: ld[i] = j;
3922: if (J) J += nnz;
3923: }
3925: PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
3926: PetscFunctionReturn(PETSC_SUCCESS);
3927: }
3929: /*@
3930: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in `MATAIJ` format
3931: (the default parallel PETSc format).
3933: Collective
3935: Input Parameters:
3936: + B - the matrix
3937: . i - the indices into `j` for the start of each local row (indices start with zero)
3938: . j - the column indices for each local row (indices start with zero)
3939: - v - optional values in the matrix
3941: Level: developer
3943: Notes:
3944: The `i`, `j`, and `v` arrays ARE copied by this routine into the internal format used by PETSc;
3945: thus you CANNOT change the matrix entries by changing the values of `v` after you have
3946: called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays.
3948: The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array.
3950: A convenience routine for this functionality is `MatCreateMPIAIJWithArrays()`.
3952: You can update the matrix with new numerical values using `MatUpdateMPIAIJWithArrays()` after this call if the column indices in `j` are sorted.
3954: If you do **not** use `MatUpdateMPIAIJWithArrays()`, the column indices in `j` do not need to be sorted. If you will use
3955: `MatUpdateMPIAIJWithArrays()`, the column indices **must** be sorted.
3957: The format which is used for the sparse matrix input, is equivalent to a
3958: row-major ordering.. i.e for the following matrix, the input data expected is
3959: as shown
3960: .vb
3961: 1 0 0
3962: 2 0 3 P0
3963: -------
3964: 4 5 6 P1
3966: Process0 [P0] rows_owned=[0,1]
3967: i = {0,1,3} [size = nrow+1 = 2+1]
3968: j = {0,0,2} [size = 3]
3969: v = {1,2,3} [size = 3]
3971: Process1 [P1] rows_owned=[2]
3972: i = {0,3} [size = nrow+1 = 1+1]
3973: j = {0,1,2} [size = 3]
3974: v = {4,5,6} [size = 3]
3975: .ve
3977: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatCreateAIJ()`,
3978: `MatCreateSeqAIJWithArrays()`, `MatCreateMPIAIJWithSplitArrays()`, `MatCreateMPIAIJWithArrays()`, `MatSetPreallocationCOO()`, `MatSetValuesCOO()`
3979: @*/
3980: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B, const PetscInt i[], const PetscInt j[], const PetscScalar v[])
3981: {
3982: PetscFunctionBegin;
3983: PetscTryMethod(B, "MatMPIAIJSetPreallocationCSR_C", (Mat, const PetscInt[], const PetscInt[], const PetscScalar[]), (B, i, j, v));
3984: PetscFunctionReturn(PETSC_SUCCESS);
3985: }
3987: /*@
3988: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in `MATMPIAIJ` format
3989: (the default parallel PETSc format). For good matrix assembly performance
3990: the user should preallocate the matrix storage by setting the parameters
3991: `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
3993: Collective
3995: Input Parameters:
3996: + B - the matrix
3997: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3998: (same value is used for all local rows)
3999: . d_nnz - array containing the number of nonzeros in the various rows of the
4000: DIAGONAL portion of the local submatrix (possibly different for each row)
4001: or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
4002: The size of this array is equal to the number of local rows, i.e 'm'.
4003: For matrices that will be factored, you must leave room for (and set)
4004: the diagonal entry even if it is zero.
4005: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4006: submatrix (same value is used for all local rows).
4007: - o_nnz - array containing the number of nonzeros in the various rows of the
4008: OFF-DIAGONAL portion of the local submatrix (possibly different for
4009: each row) or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
4010: structure. The size of this array is equal to the number
4011: of local rows, i.e 'm'.
4013: Example Usage:
4014: Consider the following 8x8 matrix with 34 non-zero values, that is
4015: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4016: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4017: as follows
4019: .vb
4020: 1 2 0 | 0 3 0 | 0 4
4021: Proc0 0 5 6 | 7 0 0 | 8 0
4022: 9 0 10 | 11 0 0 | 12 0
4023: -------------------------------------
4024: 13 0 14 | 15 16 17 | 0 0
4025: Proc1 0 18 0 | 19 20 21 | 0 0
4026: 0 0 0 | 22 23 0 | 24 0
4027: -------------------------------------
4028: Proc2 25 26 27 | 0 0 28 | 29 0
4029: 30 0 0 | 31 32 33 | 0 34
4030: .ve
4032: This can be represented as a collection of submatrices as
4033: .vb
4034: A B C
4035: D E F
4036: G H I
4037: .ve
4039: Where the submatrices A,B,C are owned by proc0, D,E,F are
4040: owned by proc1, G,H,I are owned by proc2.
4042: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4043: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4044: The 'M','N' parameters are 8,8, and have the same values on all procs.
4046: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4047: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4048: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4049: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4050: part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ`
4051: matrix, and [DF] as another `MATSEQAIJ` matrix.
4053: When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
4054: allocated for every row of the local DIAGONAL submatrix, and `o_nz`
4055: storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
4056: One way to choose `d_nz` and `o_nz` is to use the maximum number of nonzeros over
4057: the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4058: In this case, the values of `d_nz`, `o_nz` are
4059: .vb
4060: proc0 dnz = 2, o_nz = 2
4061: proc1 dnz = 3, o_nz = 2
4062: proc2 dnz = 1, o_nz = 4
4063: .ve
4064: We are allocating `m`*(`d_nz`+`o_nz`) storage locations for every proc. This
4065: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4066: for proc3. i.e we are using 12+15+10=37 storage locations to store
4067: 34 values.
4069: When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
4070: for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4071: In the above case the values for `d_nnz`, `o_nnz` are
4072: .vb
4073: proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
4074: proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
4075: proc2 d_nnz = [1,1] and o_nnz = [4,4]
4076: .ve
4077: Here the space allocated is sum of all the above values i.e 34, and
4078: hence pre-allocation is perfect.
4080: Level: intermediate
4082: Notes:
4083: If the *_nnz parameter is given then the *_nz parameter is ignored
4085: The `MATAIJ` format, also called compressed row storage (CSR), is compatible with standard Fortran
4086: storage. The stored row and column indices begin with zero.
4087: See [Sparse Matrices](sec_matsparse) for details.
4089: The parallel matrix is partitioned such that the first m0 rows belong to
4090: process 0, the next m1 rows belong to process 1, the next m2 rows belong
4091: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
4093: The DIAGONAL portion of the local submatrix of a processor can be defined
4094: as the submatrix which is obtained by extraction the part corresponding to
4095: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4096: first row that belongs to the processor, r2 is the last row belonging to
4097: the this processor, and c1-c2 is range of indices of the local part of a
4098: vector suitable for applying the matrix to. This is an mxn matrix. In the
4099: common case of a square matrix, the row and column ranges are the same and
4100: the DIAGONAL part is also square. The remaining portion of the local
4101: submatrix (mxN) constitute the OFF-DIAGONAL portion.
4103: If `o_nnz` and `d_nnz` are specified, then `o_nz` and `d_nz` are ignored.
4105: You can call `MatGetInfo()` to get information on how effective the preallocation was;
4106: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4107: You can also run with the option `-info` and look for messages with the string
4108: malloc in them to see if additional memory allocation was needed.
4110: .seealso: [](ch_matrices), `Mat`, [Sparse Matrices](sec_matsparse), `MATMPIAIJ`, `MATAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatCreateAIJ()`, `MatMPIAIJSetPreallocationCSR()`,
4111: `MatGetInfo()`, `PetscSplitOwnership()`, `MatSetPreallocationCOO()`, `MatSetValuesCOO()`
4112: @*/
4113: PetscErrorCode MatMPIAIJSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
4114: {
4115: PetscFunctionBegin;
4118: PetscTryMethod(B, "MatMPIAIJSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
4119: PetscFunctionReturn(PETSC_SUCCESS);
4120: }
4122: /*@
4123: MatCreateMPIAIJWithArrays - creates a `MATMPIAIJ` matrix using arrays that contain in standard
4124: CSR format for the local rows.
4126: Collective
4128: Input Parameters:
4129: + comm - MPI communicator
4130: . m - number of local rows (Cannot be `PETSC_DECIDE`)
4131: . n - This value should be the same as the local size used in creating the
4132: x vector for the matrix-vector product $ y = Ax$. (or `PETSC_DECIDE` to have
4133: calculated if `N` is given) For square matrices n is almost always `m`.
4134: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
4135: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
4136: . i - row indices (of length m+1); that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4137: . j - global column indices
4138: - a - optional matrix values
4140: Output Parameter:
4141: . mat - the matrix
4143: Level: intermediate
4145: Notes:
4146: The `i`, `j`, and `a` arrays ARE copied by this routine into the internal format used by PETSc;
4147: thus you CANNOT change the matrix entries by changing the values of `a[]` after you have
4148: called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays.
4150: The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array.
4152: Once you have created the matrix you can update it with new numerical values using `MatUpdateMPIAIJWithArray()`
4154: If you do **not** use `MatUpdateMPIAIJWithArray()`, the column indices in `j` do not need to be sorted. If you will use
4155: `MatUpdateMPIAIJWithArrays()`, the column indices **must** be sorted.
4157: The format which is used for the sparse matrix input, is equivalent to a
4158: row-major ordering, i.e., for the following matrix, the input data expected is
4159: as shown
4160: .vb
4161: 1 0 0
4162: 2 0 3 P0
4163: -------
4164: 4 5 6 P1
4166: Process0 [P0] rows_owned=[0,1]
4167: i = {0,1,3} [size = nrow+1 = 2+1]
4168: j = {0,0,2} [size = 3]
4169: v = {1,2,3} [size = 3]
4171: Process1 [P1] rows_owned=[2]
4172: i = {0,3} [size = nrow+1 = 1+1]
4173: j = {0,1,2} [size = 3]
4174: v = {4,5,6} [size = 3]
4175: .ve
4177: .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4178: `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArray()`, `MatSetPreallocationCOO()`, `MatSetValuesCOO()`
4179: @*/
4180: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt i[], const PetscInt j[], const PetscScalar a[], Mat *mat)
4181: {
4182: PetscFunctionBegin;
4183: PetscCheck(!i || !i[0], PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
4184: PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
4185: PetscCall(MatCreate(comm, mat));
4186: PetscCall(MatSetSizes(*mat, m, n, M, N));
4187: /* PetscCall(MatSetBlockSizes(M,bs,cbs)); */
4188: PetscCall(MatSetType(*mat, MATMPIAIJ));
4189: PetscCall(MatMPIAIJSetPreallocationCSR(*mat, i, j, a));
4190: PetscFunctionReturn(PETSC_SUCCESS);
4191: }
4193: /*@
4194: MatUpdateMPIAIJWithArrays - updates a `MATMPIAIJ` matrix using arrays that contain in standard
4195: CSR format for the local rows. Only the numerical values are updated the other arrays must be identical to what was passed
4196: from `MatCreateMPIAIJWithArrays()`
4198: Deprecated: Use `MatUpdateMPIAIJWithArray()`
4200: Collective
4202: Input Parameters:
4203: + mat - the matrix
4204: . m - number of local rows (Cannot be `PETSC_DECIDE`)
4205: . n - This value should be the same as the local size used in creating the
4206: x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
4207: calculated if N is given) For square matrices n is almost always m.
4208: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given)
4209: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given)
4210: . Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4211: . J - column indices
4212: - v - matrix values
4214: Level: deprecated
4216: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4217: `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArray()`, `MatSetPreallocationCOO()`, `MatSetValuesCOO()`
4218: @*/
4219: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[])
4220: {
4221: PetscInt nnz, i;
4222: PetscBool nooffprocentries;
4223: Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)mat->data;
4224: Mat_SeqAIJ *Ad = (Mat_SeqAIJ *)Aij->A->data;
4225: PetscScalar *ad, *ao;
4226: PetscInt ldi, Iii, md;
4227: const PetscInt *Adi = Ad->i;
4228: PetscInt *ld = Aij->ld;
4230: PetscFunctionBegin;
4231: PetscCheck(Ii[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
4232: PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
4233: PetscCheck(m == mat->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4234: PetscCheck(n == mat->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");
4236: PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad));
4237: PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao));
4239: for (i = 0; i < m; i++) {
4240: if (PetscDefined(USE_DEBUG)) {
4241: for (PetscInt j = Ii[i] + 1; j < Ii[i + 1]; ++j) {
4242: PetscCheck(J[j] >= J[j - 1], PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column entry number %" PetscInt_FMT " (actual column %" PetscInt_FMT ") in row %" PetscInt_FMT " is not sorted", j - Ii[i], J[j], i);
4243: PetscCheck(J[j] != J[j - 1], PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column entry number %" PetscInt_FMT " (actual column %" PetscInt_FMT ") in row %" PetscInt_FMT " is identical to previous entry", j - Ii[i], J[j], i);
4244: }
4245: }
4246: nnz = Ii[i + 1] - Ii[i];
4247: Iii = Ii[i];
4248: ldi = ld[i];
4249: md = Adi[i + 1] - Adi[i];
4250: PetscCall(PetscArraycpy(ao, v + Iii, ldi));
4251: PetscCall(PetscArraycpy(ad, v + Iii + ldi, md));
4252: PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md));
4253: ad += md;
4254: ao += nnz - md;
4255: }
4256: nooffprocentries = mat->nooffprocentries;
4257: mat->nooffprocentries = PETSC_TRUE;
4258: PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad));
4259: PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao));
4260: PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A));
4261: PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B));
4262: PetscCall(PetscObjectStateIncrease((PetscObject)mat));
4263: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
4264: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
4265: mat->nooffprocentries = nooffprocentries;
4266: PetscFunctionReturn(PETSC_SUCCESS);
4267: }
4269: /*@
4270: MatUpdateMPIAIJWithArray - updates an `MATMPIAIJ` matrix using an array that contains the nonzero values
4272: Collective
4274: Input Parameters:
4275: + mat - the matrix
4276: - v - matrix values, stored by row
4278: Level: intermediate
4280: Notes:
4281: The matrix must have been obtained with `MatCreateMPIAIJWithArrays()` or `MatMPIAIJSetPreallocationCSR()`
4283: The column indices in the call to `MatCreateMPIAIJWithArrays()` or `MatMPIAIJSetPreallocationCSR()` must have been sorted for this call to work correctly
4285: .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4286: `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArrays()`, `MatSetPreallocationCOO()`, `MatSetValuesCOO()`
4287: @*/
4288: PetscErrorCode MatUpdateMPIAIJWithArray(Mat mat, const PetscScalar v[])
4289: {
4290: PetscInt nnz, i, m;
4291: PetscBool nooffprocentries;
4292: Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)mat->data;
4293: Mat_SeqAIJ *Ad = (Mat_SeqAIJ *)Aij->A->data;
4294: Mat_SeqAIJ *Ao = (Mat_SeqAIJ *)Aij->B->data;
4295: PetscScalar *ad, *ao;
4296: const PetscInt *Adi = Ad->i, *Adj = Ao->i;
4297: PetscInt ldi, Iii, md;
4298: PetscInt *ld = Aij->ld;
4300: PetscFunctionBegin;
4301: m = mat->rmap->n;
4303: PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad));
4304: PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao));
4305: Iii = 0;
4306: for (i = 0; i < m; i++) {
4307: nnz = Adi[i + 1] - Adi[i] + Adj[i + 1] - Adj[i];
4308: ldi = ld[i];
4309: md = Adi[i + 1] - Adi[i];
4310: PetscCall(PetscArraycpy(ad, v + Iii + ldi, md));
4311: ad += md;
4312: if (ao) {
4313: PetscCall(PetscArraycpy(ao, v + Iii, ldi));
4314: PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md));
4315: ao += nnz - md;
4316: }
4317: Iii += nnz;
4318: }
4319: nooffprocentries = mat->nooffprocentries;
4320: mat->nooffprocentries = PETSC_TRUE;
4321: PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad));
4322: PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao));
4323: PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A));
4324: PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B));
4325: PetscCall(PetscObjectStateIncrease((PetscObject)mat));
4326: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
4327: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
4328: mat->nooffprocentries = nooffprocentries;
4329: PetscFunctionReturn(PETSC_SUCCESS);
4330: }
4332: /*@
4333: MatCreateAIJ - Creates a sparse parallel matrix in `MATAIJ` format
4334: (the default parallel PETSc format). For good matrix assembly performance
4335: the user should preallocate the matrix storage by setting the parameters
4336: `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
4338: Collective
4340: Input Parameters:
4341: + comm - MPI communicator
4342: . m - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
4343: This value should be the same as the local size used in creating the
4344: y vector for the matrix-vector product y = Ax.
4345: . n - This value should be the same as the local size used in creating the
4346: x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
4347: calculated if N is given) For square matrices n is almost always m.
4348: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given)
4349: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given)
4350: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
4351: (same value is used for all local rows)
4352: . d_nnz - array containing the number of nonzeros in the various rows of the
4353: DIAGONAL portion of the local submatrix (possibly different for each row)
4354: or `NULL`, if `d_nz` is used to specify the nonzero structure.
4355: The size of this array is equal to the number of local rows, i.e 'm'.
4356: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4357: submatrix (same value is used for all local rows).
4358: - o_nnz - array containing the number of nonzeros in the various rows of the
4359: OFF-DIAGONAL portion of the local submatrix (possibly different for
4360: each row) or `NULL`, if `o_nz` is used to specify the nonzero
4361: structure. The size of this array is equal to the number
4362: of local rows, i.e 'm'.
4364: Output Parameter:
4365: . A - the matrix
4367: Options Database Keys:
4368: + -mat_no_inode - Do not use inodes
4369: . -mat_inode_limit limit - Sets inode limit (max limit=5)
4370: - -matmult_vecscatter_view viewer - View the vecscatter (i.e., communication pattern) used in `MatMult()` of sparse parallel matrices.
4371: See viewer types in manual of `MatView()`. Of them, ascii_matlab, draw or binary cause the `VecScatter`
4372: to be viewed as a matrix. Entry (i,j) is the size of message (in bytes) rank i sends to rank j in one `MatMult()` call.
4374: Level: intermediate
4376: Notes:
4377: It is recommended that one use `MatCreateFromOptions()` or the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
4378: MatXXXXSetPreallocation() paradigm instead of this routine directly.
4379: [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`]
4381: If the *_nnz parameter is given then the *_nz parameter is ignored
4383: The `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
4384: processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
4385: storage requirements for this matrix.
4387: If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one
4388: processor than it must be used on all processors that share the object for
4389: that argument.
4391: If `m` and `n` are not `PETSC_DECIDE`, then the values determine the `PetscLayout` of the matrix and the ranges returned by
4392: `MatGetOwnershipRange()`, `MatGetOwnershipRanges()`, `MatGetOwnershipRangeColumn()`, and `MatGetOwnershipRangesColumn()`.
4394: The user MUST specify either the local or global matrix dimensions
4395: (possibly both).
4397: The parallel matrix is partitioned across processors such that the
4398: first `m0` rows belong to process 0, the next `m1` rows belong to
4399: process 1, the next `m2` rows belong to process 2, etc., where
4400: `m0`, `m1`, `m2`... are the input parameter `m` on each MPI process. I.e., each MPI process stores
4401: values corresponding to [m x N] submatrix.
4403: The columns are logically partitioned with the n0 columns belonging
4404: to 0th partition, the next n1 columns belonging to the next
4405: partition etc.. where n0,n1,n2... are the input parameter 'n'.
4407: The DIAGONAL portion of the local submatrix on any given processor
4408: is the submatrix corresponding to the rows and columns m,n
4409: corresponding to the given processor. i.e diagonal matrix on
4410: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4411: etc. The remaining portion of the local submatrix [m x (N-n)]
4412: constitute the OFF-DIAGONAL portion. The example below better
4413: illustrates this concept. The two matrices, the DIAGONAL portion and
4414: the OFF-DIAGONAL portion are each stored as `MATSEQAIJ` matrices.
4416: For a square global matrix we define each processor's diagonal portion
4417: to be its local rows and the corresponding columns (a square submatrix);
4418: each processor's off-diagonal portion encompasses the remainder of the
4419: local matrix (a rectangular submatrix).
4421: If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
4423: When calling this routine with a single process communicator, a matrix of
4424: type `MATSEQAIJ` is returned. If a matrix of type `MATMPIAIJ` is desired for this
4425: type of communicator, use the construction mechanism
4426: .vb
4427: MatCreate(..., &A);
4428: MatSetType(A, MATMPIAIJ);
4429: MatSetSizes(A, m, n, M, N);
4430: MatMPIAIJSetPreallocation(A, ...);
4431: .ve
4433: By default, this format uses inodes (identical nodes) when possible.
4434: We search for consecutive rows with the same nonzero structure, thereby
4435: reusing matrix information to achieve increased efficiency.
4437: Example Usage:
4438: Consider the following 8x8 matrix with 34 non-zero values, that is
4439: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4440: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4441: as follows
4443: .vb
4444: 1 2 0 | 0 3 0 | 0 4
4445: Proc0 0 5 6 | 7 0 0 | 8 0
4446: 9 0 10 | 11 0 0 | 12 0
4447: -------------------------------------
4448: 13 0 14 | 15 16 17 | 0 0
4449: Proc1 0 18 0 | 19 20 21 | 0 0
4450: 0 0 0 | 22 23 0 | 24 0
4451: -------------------------------------
4452: Proc2 25 26 27 | 0 0 28 | 29 0
4453: 30 0 0 | 31 32 33 | 0 34
4454: .ve
4456: This can be represented as a collection of submatrices as
4458: .vb
4459: A B C
4460: D E F
4461: G H I
4462: .ve
4464: Where the submatrices A,B,C are owned by proc0, D,E,F are
4465: owned by proc1, G,H,I are owned by proc2.
4467: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4468: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4469: The 'M','N' parameters are 8,8, and have the same values on all procs.
4471: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4472: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4473: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4474: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4475: part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ`
4476: matrix, and [DF] as another SeqAIJ matrix.
4478: When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
4479: allocated for every row of the local DIAGONAL submatrix, and `o_nz`
4480: storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
4481: One way to choose `d_nz` and `o_nz` is to use the maximum number of nonzeros over
4482: the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4483: In this case, the values of `d_nz`,`o_nz` are
4484: .vb
4485: proc0 dnz = 2, o_nz = 2
4486: proc1 dnz = 3, o_nz = 2
4487: proc2 dnz = 1, o_nz = 4
4488: .ve
4489: We are allocating m*(`d_nz`+`o_nz`) storage locations for every proc. This
4490: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4491: for proc3. i.e we are using 12+15+10=37 storage locations to store
4492: 34 values.
4494: When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
4495: for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4496: In the above case the values for d_nnz,o_nnz are
4497: .vb
4498: proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
4499: proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
4500: proc2 d_nnz = [1,1] and o_nnz = [4,4]
4501: .ve
4502: Here the space allocated is sum of all the above values i.e 34, and
4503: hence pre-allocation is perfect.
4505: .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4506: `MATMPIAIJ`, `MatCreateMPIAIJWithArrays()`, `MatGetOwnershipRange()`, `MatGetOwnershipRanges()`, `MatGetOwnershipRangeColumn()`,
4507: `MatGetOwnershipRangesColumn()`, `PetscLayout`
4508: @*/
4509: PetscErrorCode MatCreateAIJ(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
4510: {
4511: PetscMPIInt size;
4513: PetscFunctionBegin;
4514: PetscCall(MatCreate(comm, A));
4515: PetscCall(MatSetSizes(*A, m, n, M, N));
4516: PetscCallMPI(MPI_Comm_size(comm, &size));
4517: if (size > 1) {
4518: PetscCall(MatSetType(*A, MATMPIAIJ));
4519: PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
4520: } else {
4521: PetscCall(MatSetType(*A, MATSEQAIJ));
4522: PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
4523: }
4524: PetscFunctionReturn(PETSC_SUCCESS);
4525: }
4527: /*@C
4528: MatMPIAIJGetSeqAIJ - Returns the local pieces of this distributed matrix
4530: Not Collective
4532: Input Parameter:
4533: . A - The `MATMPIAIJ` matrix
4535: Output Parameters:
4536: + Ad - The local diagonal block as a `MATSEQAIJ` matrix
4537: . Ao - The local off-diagonal block as a `MATSEQAIJ` matrix
4538: - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
4540: Level: intermediate
4542: Note:
4543: The rows in `Ad` and `Ao` are in [0, Nr), where Nr is the number of local rows on this process. The columns
4544: in `Ad` are in [0, Nc) where Nc is the number of local columns. The columns are `Ao` are in [0, Nco), where Nco is
4545: the number of nonzero columns in the local off-diagonal piece of the matrix `A`. The array colmap maps these
4546: local column numbers to global column numbers in the original matrix.
4548: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()`, `MatCreateAIJ()`, `MATSEQAIJ`
4549: @*/
4550: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
4551: {
4552: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
4553: PetscBool flg;
4555: PetscFunctionBegin;
4556: PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &flg));
4557: PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPIAIJ matrix as input");
4558: if (Ad) *Ad = a->A;
4559: if (Ao) *Ao = a->B;
4560: if (colmap) *colmap = a->garray;
4561: PetscFunctionReturn(PETSC_SUCCESS);
4562: }
4564: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm, Mat inmat, PetscInt n, MatReuse scall, Mat *outmat)
4565: {
4566: PetscInt m, N, i, rstart, nnz, Ii;
4567: PetscInt *indx;
4568: PetscScalar *values;
4569: MatType rootType;
4571: PetscFunctionBegin;
4572: PetscCall(MatGetSize(inmat, &m, &N));
4573: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4574: PetscInt *dnz, *onz, sum, bs, cbs;
4576: if (n == PETSC_DECIDE) PetscCall(PetscSplitOwnership(comm, &n, &N));
4577: /* Check sum(n) = N */
4578: PetscCallMPI(MPIU_Allreduce(&n, &sum, 1, MPIU_INT, MPI_SUM, comm));
4579: PetscCheck(sum == N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Sum of local columns %" PetscInt_FMT " != global columns %" PetscInt_FMT, sum, N);
4581: PetscCallMPI(MPI_Scan(&m, &rstart, 1, MPIU_INT, MPI_SUM, comm));
4582: rstart -= m;
4584: MatPreallocateBegin(comm, m, n, dnz, onz);
4585: for (i = 0; i < m; i++) {
4586: PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, NULL));
4587: PetscCall(MatPreallocateSet(i + rstart, nnz, indx, dnz, onz));
4588: PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, NULL));
4589: }
4591: PetscCall(MatCreate(comm, outmat));
4592: PetscCall(MatSetSizes(*outmat, m, n, PETSC_DETERMINE, PETSC_DETERMINE));
4593: PetscCall(MatGetBlockSizes(inmat, &bs, &cbs));
4594: PetscCall(MatSetBlockSizes(*outmat, bs, cbs));
4595: PetscCall(MatGetRootType_Private(inmat, &rootType));
4596: PetscCall(MatSetType(*outmat, rootType));
4597: PetscCall(MatSeqAIJSetPreallocation(*outmat, 0, dnz));
4598: PetscCall(MatMPIAIJSetPreallocation(*outmat, 0, dnz, 0, onz));
4599: MatPreallocateEnd(dnz, onz);
4600: PetscCall(MatSetOption(*outmat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
4601: }
4603: /* numeric phase */
4604: PetscCall(MatGetOwnershipRange(*outmat, &rstart, NULL));
4605: for (i = 0; i < m; i++) {
4606: PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, &values));
4607: Ii = i + rstart;
4608: PetscCall(MatSetValues(*outmat, 1, &Ii, nnz, indx, values, INSERT_VALUES));
4609: PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, &values));
4610: }
4611: PetscCall(MatAssemblyBegin(*outmat, MAT_FINAL_ASSEMBLY));
4612: PetscCall(MatAssemblyEnd(*outmat, MAT_FINAL_ASSEMBLY));
4613: PetscFunctionReturn(PETSC_SUCCESS);
4614: }
4616: static PetscErrorCode MatMergeSeqsToMPIDestroy(PetscCtxRt data)
4617: {
4618: MatMergeSeqsToMPI *merge = *(MatMergeSeqsToMPI **)data;
4620: PetscFunctionBegin;
4621: if (!merge) PetscFunctionReturn(PETSC_SUCCESS);
4622: PetscCall(PetscFree(merge->id_r));
4623: PetscCall(PetscFree(merge->len_s));
4624: PetscCall(PetscFree(merge->len_r));
4625: PetscCall(PetscFree(merge->bi));
4626: PetscCall(PetscFree(merge->bj));
4627: PetscCall(PetscFree(merge->buf_ri[0]));
4628: PetscCall(PetscFree(merge->buf_ri));
4629: PetscCall(PetscFree(merge->buf_rj[0]));
4630: PetscCall(PetscFree(merge->buf_rj));
4631: PetscCall(PetscFree(merge->coi));
4632: PetscCall(PetscFree(merge->coj));
4633: PetscCall(PetscFree(merge->owners_co));
4634: PetscCall(PetscLayoutDestroy(&merge->rowmap));
4635: PetscCall(PetscFree(merge));
4636: PetscFunctionReturn(PETSC_SUCCESS);
4637: }
4639: #include <../src/mat/utils/freespace.h>
4640: #include <petscbt.h>
4642: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat, Mat mpimat)
4643: {
4644: MPI_Comm comm;
4645: Mat_SeqAIJ *a = (Mat_SeqAIJ *)seqmat->data;
4646: PetscMPIInt size, rank, taga, *len_s;
4647: PetscInt N = mpimat->cmap->N, i, j, *owners, *ai = a->i, *aj, m;
4648: PetscMPIInt proc, k;
4649: PetscInt **buf_ri, **buf_rj;
4650: PetscInt anzi, *bj_i, *bi, *bj, arow, bnzi, nextaj;
4651: PetscInt nrows, **buf_ri_k, **nextrow, **nextai;
4652: MPI_Request *s_waits, *r_waits;
4653: MPI_Status *status;
4654: const MatScalar *aa, *a_a;
4655: MatScalar **abuf_r, *ba_i;
4656: MatMergeSeqsToMPI *merge;
4657: PetscContainer container;
4659: PetscFunctionBegin;
4660: PetscCall(PetscObjectGetComm((PetscObject)mpimat, &comm));
4661: PetscCall(PetscLogEventBegin(MAT_Seqstompinum, seqmat, 0, 0, 0));
4663: PetscCallMPI(MPI_Comm_size(comm, &size));
4664: PetscCallMPI(MPI_Comm_rank(comm, &rank));
4666: PetscCall(PetscObjectQuery((PetscObject)mpimat, "MatMergeSeqsToMPI", (PetscObject *)&container));
4667: PetscCheck(container, PetscObjectComm((PetscObject)mpimat), PETSC_ERR_PLIB, "Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic");
4668: PetscCall(PetscContainerGetPointer(container, &merge));
4669: PetscCall(MatSeqAIJGetArrayRead(seqmat, &a_a));
4670: aa = a_a;
4672: bi = merge->bi;
4673: bj = merge->bj;
4674: buf_ri = merge->buf_ri;
4675: buf_rj = merge->buf_rj;
4677: PetscCall(PetscMalloc1(size, &status));
4678: owners = merge->rowmap->range;
4679: len_s = merge->len_s;
4681: /* send and recv matrix values */
4682: PetscCall(PetscObjectGetNewTag((PetscObject)mpimat, &taga));
4683: PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
4685: PetscCall(PetscMalloc1(merge->nsend + 1, &s_waits));
4686: for (proc = 0, k = 0; proc < size; proc++) {
4687: if (!len_s[proc]) continue;
4688: i = owners[proc];
4689: PetscCallMPI(MPIU_Isend(aa + ai[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
4690: k++;
4691: }
4693: if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
4694: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
4695: PetscCall(PetscFree(status));
4697: PetscCall(PetscFree(s_waits));
4698: PetscCall(PetscFree(r_waits));
4700: /* insert mat values of mpimat */
4701: PetscCall(PetscMalloc1(N, &ba_i));
4702: PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai));
4704: for (k = 0; k < merge->nrecv; k++) {
4705: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4706: nrows = *buf_ri_k[k];
4707: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4708: nextai[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
4709: }
4711: /* set values of ba */
4712: m = merge->rowmap->n;
4713: for (i = 0; i < m; i++) {
4714: arow = owners[rank] + i;
4715: bj_i = bj + bi[i]; /* col indices of the i-th row of mpimat */
4716: bnzi = bi[i + 1] - bi[i];
4717: PetscCall(PetscArrayzero(ba_i, bnzi));
4719: /* add local non-zero vals of this proc's seqmat into ba */
4720: anzi = ai[arow + 1] - ai[arow];
4721: aj = a->j + ai[arow];
4722: aa = a_a + ai[arow];
4723: nextaj = 0;
4724: for (j = 0; nextaj < anzi; j++) {
4725: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4726: ba_i[j] += aa[nextaj++];
4727: }
4728: }
4730: /* add received vals into ba */
4731: for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
4732: /* i-th row */
4733: if (i == *nextrow[k]) {
4734: anzi = *(nextai[k] + 1) - *nextai[k];
4735: aj = buf_rj[k] + *nextai[k];
4736: aa = abuf_r[k] + *nextai[k];
4737: nextaj = 0;
4738: for (j = 0; nextaj < anzi; j++) {
4739: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4740: ba_i[j] += aa[nextaj++];
4741: }
4742: }
4743: nextrow[k]++;
4744: nextai[k]++;
4745: }
4746: }
4747: PetscCall(MatSetValues(mpimat, 1, &arow, bnzi, bj_i, ba_i, INSERT_VALUES));
4748: }
4749: PetscCall(MatSeqAIJRestoreArrayRead(seqmat, &a_a));
4750: PetscCall(MatAssemblyBegin(mpimat, MAT_FINAL_ASSEMBLY));
4751: PetscCall(MatAssemblyEnd(mpimat, MAT_FINAL_ASSEMBLY));
4753: PetscCall(PetscFree(abuf_r[0]));
4754: PetscCall(PetscFree(abuf_r));
4755: PetscCall(PetscFree(ba_i));
4756: PetscCall(PetscFree3(buf_ri_k, nextrow, nextai));
4757: PetscCall(PetscLogEventEnd(MAT_Seqstompinum, seqmat, 0, 0, 0));
4758: PetscFunctionReturn(PETSC_SUCCESS);
4759: }
4761: PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, Mat *mpimat)
4762: {
4763: Mat B_mpi;
4764: Mat_SeqAIJ *a = (Mat_SeqAIJ *)seqmat->data;
4765: PetscMPIInt size, rank, tagi, tagj, *len_s, *len_si, *len_ri;
4766: PetscInt **buf_rj, **buf_ri, **buf_ri_k;
4767: PetscInt M = seqmat->rmap->n, N = seqmat->cmap->n, i, *owners, *ai = a->i, *aj = a->j;
4768: PetscInt len, *dnz, *onz, bs, cbs;
4769: PetscInt k, anzi, *bi, *bj, *lnk, nlnk, arow, bnzi;
4770: PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextai;
4771: MPI_Request *si_waits, *sj_waits, *ri_waits, *rj_waits;
4772: MPI_Status *status;
4773: PetscFreeSpaceList free_space = NULL, current_space = NULL;
4774: PetscBT lnkbt;
4775: MatMergeSeqsToMPI *merge;
4776: PetscContainer container;
4778: PetscFunctionBegin;
4779: PetscCall(PetscLogEventBegin(MAT_Seqstompisym, seqmat, 0, 0, 0));
4781: /* make sure it is a PETSc comm */
4782: PetscCall(PetscCommDuplicate(comm, &comm, NULL));
4783: PetscCallMPI(MPI_Comm_size(comm, &size));
4784: PetscCallMPI(MPI_Comm_rank(comm, &rank));
4786: PetscCall(PetscNew(&merge));
4787: PetscCall(PetscMalloc1(size, &status));
4789: /* determine row ownership */
4790: PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
4791: PetscCall(PetscLayoutSetLocalSize(merge->rowmap, m));
4792: PetscCall(PetscLayoutSetSize(merge->rowmap, M));
4793: PetscCall(PetscLayoutSetBlockSize(merge->rowmap, 1));
4794: PetscCall(PetscLayoutSetUp(merge->rowmap));
4795: PetscCall(PetscMalloc1(size, &len_si));
4796: PetscCall(PetscMalloc1(size, &merge->len_s));
4798: m = merge->rowmap->n;
4799: owners = merge->rowmap->range;
4801: /* determine the number of messages to send, their lengths */
4802: len_s = merge->len_s;
4804: len = 0; /* length of buf_si[] */
4805: merge->nsend = 0;
4806: for (PetscMPIInt proc = 0; proc < size; proc++) {
4807: len_si[proc] = 0;
4808: if (proc == rank) {
4809: len_s[proc] = 0;
4810: } else {
4811: PetscCall(PetscMPIIntCast(owners[proc + 1] - owners[proc] + 1, &len_si[proc]));
4812: PetscCall(PetscMPIIntCast(ai[owners[proc + 1]] - ai[owners[proc]], &len_s[proc])); /* num of rows to be sent to [proc] */
4813: }
4814: if (len_s[proc]) {
4815: merge->nsend++;
4816: nrows = 0;
4817: for (i = owners[proc]; i < owners[proc + 1]; i++) {
4818: if (ai[i + 1] > ai[i]) nrows++;
4819: }
4820: PetscCall(PetscMPIIntCast(2 * (nrows + 1), &len_si[proc]));
4821: len += len_si[proc];
4822: }
4823: }
4825: /* determine the number and length of messages to receive for ij-structure */
4826: PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
4827: PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
4829: /* post the Irecv of j-structure */
4830: PetscCall(PetscCommGetNewTag(comm, &tagj));
4831: PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rj_waits));
4833: /* post the Isend of j-structure */
4834: PetscCall(PetscMalloc2(merge->nsend, &si_waits, merge->nsend, &sj_waits));
4836: for (PetscMPIInt proc = 0, k = 0; proc < size; proc++) {
4837: if (!len_s[proc]) continue;
4838: i = owners[proc];
4839: PetscCallMPI(MPIU_Isend(aj + ai[i], len_s[proc], MPIU_INT, proc, tagj, comm, sj_waits + k));
4840: k++;
4841: }
4843: /* receives and sends of j-structure are complete */
4844: if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, rj_waits, status));
4845: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, sj_waits, status));
4847: /* send and recv i-structure */
4848: PetscCall(PetscCommGetNewTag(comm, &tagi));
4849: PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &ri_waits));
4851: PetscCall(PetscMalloc1(len + 1, &buf_s));
4852: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4853: for (PetscMPIInt proc = 0, k = 0; proc < size; proc++) {
4854: if (!len_s[proc]) continue;
4855: /* form outgoing message for i-structure:
4856: buf_si[0]: nrows to be sent
4857: [1:nrows]: row index (global)
4858: [nrows+1:2*nrows+1]: i-structure index
4859: */
4860: nrows = len_si[proc] / 2 - 1;
4861: buf_si_i = buf_si + nrows + 1;
4862: buf_si[0] = nrows;
4863: buf_si_i[0] = 0;
4864: nrows = 0;
4865: for (i = owners[proc]; i < owners[proc + 1]; i++) {
4866: anzi = ai[i + 1] - ai[i];
4867: if (anzi) {
4868: buf_si_i[nrows + 1] = buf_si_i[nrows] + anzi; /* i-structure */
4869: buf_si[nrows + 1] = i - owners[proc]; /* local row index */
4870: nrows++;
4871: }
4872: }
4873: PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, si_waits + k));
4874: k++;
4875: buf_si += len_si[proc];
4876: }
4878: if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, ri_waits, status));
4879: if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, si_waits, status));
4881: PetscCall(PetscInfo(seqmat, "nsend: %d, nrecv: %d\n", merge->nsend, merge->nrecv));
4882: for (i = 0; i < merge->nrecv; i++) PetscCall(PetscInfo(seqmat, "recv len_ri=%d, len_rj=%d from [%d]\n", len_ri[i], merge->len_r[i], merge->id_r[i]));
4884: PetscCall(PetscFree(len_si));
4885: PetscCall(PetscFree(len_ri));
4886: PetscCall(PetscFree(rj_waits));
4887: PetscCall(PetscFree2(si_waits, sj_waits));
4888: PetscCall(PetscFree(ri_waits));
4889: PetscCall(PetscFree(buf_s));
4890: PetscCall(PetscFree(status));
4892: /* compute a local seq matrix in each processor */
4893: /* allocate bi array and free space for accumulating nonzero column info */
4894: PetscCall(PetscMalloc1(m + 1, &bi));
4895: bi[0] = 0;
4897: /* create and initialize a linked list */
4898: nlnk = N + 1;
4899: PetscCall(PetscLLCreate(N, N, nlnk, lnk, lnkbt));
4901: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4902: len = ai[owners[rank + 1]] - ai[owners[rank]];
4903: PetscCall(PetscFreeSpaceGet(PetscIntMultTruncate(2, len) + 1, &free_space));
4905: current_space = free_space;
4907: /* determine symbolic info for each local row */
4908: PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai));
4910: for (k = 0; k < merge->nrecv; k++) {
4911: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4912: nrows = *buf_ri_k[k];
4913: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4914: nextai[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
4915: }
4917: MatPreallocateBegin(comm, m, n, dnz, onz);
4918: len = 0;
4919: for (i = 0; i < m; i++) {
4920: bnzi = 0;
4921: /* add local non-zero cols of this proc's seqmat into lnk */
4922: arow = owners[rank] + i;
4923: anzi = ai[arow + 1] - ai[arow];
4924: aj = a->j + ai[arow];
4925: PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt));
4926: bnzi += nlnk;
4927: /* add received col data into lnk */
4928: for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
4929: if (i == *nextrow[k]) { /* i-th row */
4930: anzi = *(nextai[k] + 1) - *nextai[k];
4931: aj = buf_rj[k] + *nextai[k];
4932: PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt));
4933: bnzi += nlnk;
4934: nextrow[k]++;
4935: nextai[k]++;
4936: }
4937: }
4938: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4940: /* if free space is not available, make more free space */
4941: if (current_space->local_remaining < bnzi) PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(bnzi, current_space->total_array_size), ¤t_space));
4942: /* copy data into free space, then initialize lnk */
4943: PetscCall(PetscLLClean(N, N, bnzi, lnk, current_space->array, lnkbt));
4944: PetscCall(MatPreallocateSet(i + owners[rank], bnzi, current_space->array, dnz, onz));
4946: current_space->array += bnzi;
4947: current_space->local_used += bnzi;
4948: current_space->local_remaining -= bnzi;
4950: bi[i + 1] = bi[i] + bnzi;
4951: }
4953: PetscCall(PetscFree3(buf_ri_k, nextrow, nextai));
4955: PetscCall(PetscMalloc1(bi[m], &bj));
4956: PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
4957: PetscCall(PetscLLDestroy(lnk, lnkbt));
4959: /* create symbolic parallel matrix B_mpi */
4960: PetscCall(MatGetBlockSizes(seqmat, &bs, &cbs));
4961: PetscCall(MatCreate(comm, &B_mpi));
4962: if (n == PETSC_DECIDE) PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, N));
4963: else PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, PETSC_DETERMINE));
4964: PetscCall(MatSetBlockSizes(B_mpi, bs, cbs));
4965: PetscCall(MatSetType(B_mpi, MATMPIAIJ));
4966: PetscCall(MatMPIAIJSetPreallocation(B_mpi, 0, dnz, 0, onz));
4967: MatPreallocateEnd(dnz, onz);
4968: PetscCall(MatSetOption(B_mpi, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
4970: /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4971: B_mpi->assembled = PETSC_FALSE;
4972: merge->bi = bi;
4973: merge->bj = bj;
4974: merge->buf_ri = buf_ri;
4975: merge->buf_rj = buf_rj;
4976: merge->coi = NULL;
4977: merge->coj = NULL;
4978: merge->owners_co = NULL;
4980: PetscCall(PetscCommDestroy(&comm));
4982: /* attach the supporting struct to B_mpi for reuse */
4983: PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
4984: PetscCall(PetscContainerSetPointer(container, merge));
4985: PetscCall(PetscContainerSetCtxDestroy(container, MatMergeSeqsToMPIDestroy));
4986: PetscCall(PetscObjectCompose((PetscObject)B_mpi, "MatMergeSeqsToMPI", (PetscObject)container));
4987: PetscCall(PetscContainerDestroy(&container));
4988: *mpimat = B_mpi;
4990: PetscCall(PetscLogEventEnd(MAT_Seqstompisym, seqmat, 0, 0, 0));
4991: PetscFunctionReturn(PETSC_SUCCESS);
4992: }
4994: /*@
4995: MatCreateMPIAIJSumSeqAIJ - Creates a `MATMPIAIJ` matrix by adding sequential
4996: matrices from each processor
4998: Collective
5000: Input Parameters:
5001: + comm - the communicators the parallel matrix will live on
5002: . seqmat - the input sequential matrices
5003: . m - number of local rows (or `PETSC_DECIDE`)
5004: . n - number of local columns (or `PETSC_DECIDE`)
5005: - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5007: Output Parameter:
5008: . mpimat - the parallel matrix generated
5010: Level: advanced
5012: Note:
5013: The dimensions of the sequential matrix in each processor MUST be the same.
5014: The input seqmat is included into the container `MatMergeSeqsToMPIDestroy`, and will be
5015: destroyed when `mpimat` is destroyed. Call `PetscObjectQuery()` to access `seqmat`.
5017: .seealso: [](ch_matrices), `Mat`, `MatCreateAIJ()`
5018: @*/
5019: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, MatReuse scall, Mat *mpimat)
5020: {
5021: PetscMPIInt size;
5023: PetscFunctionBegin;
5024: PetscCallMPI(MPI_Comm_size(comm, &size));
5025: if (size == 1) {
5026: PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0));
5027: if (scall == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(seqmat, MAT_COPY_VALUES, mpimat));
5028: else PetscCall(MatCopy(seqmat, *mpimat, SAME_NONZERO_PATTERN));
5029: PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0));
5030: PetscFunctionReturn(PETSC_SUCCESS);
5031: }
5032: PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0));
5033: if (scall == MAT_INITIAL_MATRIX) PetscCall(MatCreateMPIAIJSumSeqAIJSymbolic(comm, seqmat, m, n, mpimat));
5034: PetscCall(MatCreateMPIAIJSumSeqAIJNumeric(seqmat, *mpimat));
5035: PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0));
5036: PetscFunctionReturn(PETSC_SUCCESS);
5037: }
5039: /*@
5040: MatAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATAIJ` matrix.
5042: Not Collective
5044: Input Parameter:
5045: . A - the matrix
5047: Output Parameter:
5048: . A_loc - the local sequential matrix generated
5050: Level: developer
5052: Notes:
5053: The matrix is created by taking `A`'s local rows and putting them into a sequential matrix
5054: with `mlocal` rows and `n` columns. Where `mlocal` is obtained with `MatGetLocalSize()` and
5055: `n` is the global column count obtained with `MatGetSize()`
5057: In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix.
5059: For parallel matrices this creates an entirely new matrix. If the matrix is sequential it merely increases the reference count.
5061: Destroy the matrix with `MatDestroy()`
5063: .seealso: [](ch_matrices), `Mat`, `MatMPIAIJGetLocalMat()`
5064: @*/
5065: PetscErrorCode MatAIJGetLocalMat(Mat A, Mat *A_loc)
5066: {
5067: PetscBool mpi;
5069: PetscFunctionBegin;
5070: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &mpi));
5071: if (mpi) PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, A_loc));
5072: else {
5073: *A_loc = A;
5074: PetscCall(PetscObjectReference((PetscObject)*A_loc));
5075: }
5076: PetscFunctionReturn(PETSC_SUCCESS);
5077: }
5079: /*@
5080: MatMPIAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix.
5082: Not Collective
5084: Input Parameters:
5085: + A - the matrix
5086: - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5088: Output Parameter:
5089: . A_loc - the local sequential matrix generated
5091: Level: developer
5093: Notes:
5094: The matrix is created by taking all `A`'s local rows and putting them into a sequential
5095: matrix with `mlocal` rows and `n` columns.`mlocal` is the row count obtained with
5096: `MatGetLocalSize()` and `n` is the global column count obtained with `MatGetSize()`.
5098: In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix.
5100: When `A` is sequential and `MAT_INITIAL_MATRIX` is requested, the matrix returned is the diagonal part of `A` (which contains the entire matrix),
5101: with its reference count increased by one. Hence changing values of `A_loc` changes `A`. If `MAT_REUSE_MATRIX` is requested on a sequential matrix
5102: then `MatCopy`(Adiag,*`A_loc`,`SAME_NONZERO_PATTERN`) is called to fill `A_loc`. Thus one can preallocate the appropriate sequential matrix `A_loc`
5103: and then call this routine with `MAT_REUSE_MATRIX`. In this case, one can modify the values of `A_loc` without affecting the original sequential matrix.
5105: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMatCondensed()`, `MatMPIAIJGetLocalMatMerge()`
5106: @*/
5107: PetscErrorCode MatMPIAIJGetLocalMat(Mat A, MatReuse scall, Mat *A_loc)
5108: {
5109: Mat_MPIAIJ *mpimat = (Mat_MPIAIJ *)A->data;
5110: Mat_SeqAIJ *mat, *a, *b;
5111: PetscInt *ai, *aj, *bi, *bj, *cmap = mpimat->garray;
5112: const PetscScalar *aa, *ba, *aav, *bav;
5113: PetscScalar *ca, *cam;
5114: PetscMPIInt size;
5115: PetscInt am = A->rmap->n, i, j, k, cstart = A->cmap->rstart;
5116: PetscInt *ci, *cj, col, ncols_d, ncols_o, jo;
5117: PetscBool match;
5119: PetscFunctionBegin;
5120: PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &match));
5121: PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input");
5122: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
5123: if (size == 1) {
5124: if (scall == MAT_INITIAL_MATRIX) {
5125: PetscCall(PetscObjectReference((PetscObject)mpimat->A));
5126: *A_loc = mpimat->A;
5127: } else if (scall == MAT_REUSE_MATRIX) {
5128: PetscCall(MatCopy(mpimat->A, *A_loc, SAME_NONZERO_PATTERN));
5129: }
5130: PetscFunctionReturn(PETSC_SUCCESS);
5131: }
5133: PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0));
5134: a = (Mat_SeqAIJ *)mpimat->A->data;
5135: b = (Mat_SeqAIJ *)mpimat->B->data;
5136: ai = a->i;
5137: aj = a->j;
5138: bi = b->i;
5139: bj = b->j;
5140: PetscCall(MatSeqAIJGetArrayRead(mpimat->A, &aav));
5141: PetscCall(MatSeqAIJGetArrayRead(mpimat->B, &bav));
5142: aa = aav;
5143: ba = bav;
5144: if (scall == MAT_INITIAL_MATRIX) {
5145: PetscCall(PetscMalloc1(1 + am, &ci));
5146: ci[0] = 0;
5147: for (i = 0; i < am; i++) ci[i + 1] = ci[i] + (ai[i + 1] - ai[i]) + (bi[i + 1] - bi[i]);
5148: PetscCall(PetscMalloc1(1 + ci[am], &cj));
5149: PetscCall(PetscMalloc1(1 + ci[am], &ca));
5150: k = 0;
5151: for (i = 0; i < am; i++) {
5152: ncols_o = bi[i + 1] - bi[i];
5153: ncols_d = ai[i + 1] - ai[i];
5154: /* off-diagonal portion of A */
5155: for (jo = 0; jo < ncols_o; jo++) {
5156: col = cmap[*bj];
5157: if (col >= cstart) break;
5158: cj[k] = col;
5159: bj++;
5160: ca[k++] = *ba++;
5161: }
5162: /* diagonal portion of A */
5163: for (j = 0; j < ncols_d; j++) {
5164: cj[k] = cstart + *aj++;
5165: ca[k++] = *aa++;
5166: }
5167: /* off-diagonal portion of A */
5168: for (j = jo; j < ncols_o; j++) {
5169: cj[k] = cmap[*bj++];
5170: ca[k++] = *ba++;
5171: }
5172: }
5173: /* put together the new matrix */
5174: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, A->cmap->N, ci, cj, ca, A_loc));
5175: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5176: /* Since these are PETSc arrays, change flags to free them as necessary. */
5177: mat = (Mat_SeqAIJ *)(*A_loc)->data;
5178: mat->free_a = PETSC_TRUE;
5179: mat->free_ij = PETSC_TRUE;
5180: mat->nonew = 0;
5181: } else if (scall == MAT_REUSE_MATRIX) {
5182: mat = (Mat_SeqAIJ *)(*A_loc)->data;
5183: ci = mat->i;
5184: cj = mat->j;
5185: PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &cam));
5186: for (i = 0; i < am; i++) {
5187: /* off-diagonal portion of A */
5188: ncols_o = bi[i + 1] - bi[i];
5189: for (jo = 0; jo < ncols_o; jo++) {
5190: col = cmap[*bj];
5191: if (col >= cstart) break;
5192: *cam++ = *ba++;
5193: bj++;
5194: }
5195: /* diagonal portion of A */
5196: ncols_d = ai[i + 1] - ai[i];
5197: for (j = 0; j < ncols_d; j++) *cam++ = *aa++;
5198: /* off-diagonal portion of A */
5199: for (j = jo; j < ncols_o; j++) {
5200: *cam++ = *ba++;
5201: bj++;
5202: }
5203: }
5204: PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &cam));
5205: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall);
5206: PetscCall(MatSeqAIJRestoreArrayRead(mpimat->A, &aav));
5207: PetscCall(MatSeqAIJRestoreArrayRead(mpimat->B, &bav));
5208: PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0));
5209: PetscFunctionReturn(PETSC_SUCCESS);
5210: }
5212: /*@
5213: MatMPIAIJGetLocalMatMerge - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix by taking all its local rows and putting them into a sequential matrix with
5214: mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and off-diagonal part
5216: Not Collective
5218: Input Parameters:
5219: + A - the matrix
5220: - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5222: Output Parameters:
5223: + glob - sequential `IS` with global indices associated with the columns of the local sequential matrix generated (can be `NULL`)
5224: - A_loc - the local sequential matrix generated
5226: Level: developer
5228: Note:
5229: This is different from `MatMPIAIJGetLocalMat()` since the first columns in the returning matrix are those associated with the diagonal
5230: part, then those associated with the off-diagonal part (in its local ordering)
5232: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()`
5233: @*/
5234: PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A, MatReuse scall, IS *glob, Mat *A_loc)
5235: {
5236: Mat Ao, Ad;
5237: const PetscInt *cmap;
5238: PetscMPIInt size;
5239: PetscErrorCode (*f)(Mat, MatReuse, IS *, Mat *);
5241: PetscFunctionBegin;
5242: PetscCall(MatMPIAIJGetSeqAIJ(A, &Ad, &Ao, &cmap));
5243: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
5244: if (size == 1) {
5245: if (scall == MAT_INITIAL_MATRIX) {
5246: PetscCall(PetscObjectReference((PetscObject)Ad));
5247: *A_loc = Ad;
5248: } else if (scall == MAT_REUSE_MATRIX) {
5249: PetscCall(MatCopy(Ad, *A_loc, SAME_NONZERO_PATTERN));
5250: }
5251: if (glob) PetscCall(ISCreateStride(PetscObjectComm((PetscObject)Ad), Ad->cmap->n, Ad->cmap->rstart, 1, glob));
5252: PetscFunctionReturn(PETSC_SUCCESS);
5253: }
5254: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJGetLocalMatMerge_C", &f));
5255: PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0));
5256: if (f) PetscCall((*f)(A, scall, glob, A_loc));
5257: else {
5258: Mat_SeqAIJ *a = (Mat_SeqAIJ *)Ad->data;
5259: Mat_SeqAIJ *b = (Mat_SeqAIJ *)Ao->data;
5260: Mat_SeqAIJ *c;
5261: PetscInt *ai = a->i, *aj = a->j;
5262: PetscInt *bi = b->i, *bj = b->j;
5263: PetscInt *ci, *cj;
5264: const PetscScalar *aa, *ba;
5265: PetscScalar *ca;
5266: PetscInt i, j, am, dn, on;
5268: PetscCall(MatGetLocalSize(Ad, &am, &dn));
5269: PetscCall(MatGetLocalSize(Ao, NULL, &on));
5270: PetscCall(MatSeqAIJGetArrayRead(Ad, &aa));
5271: PetscCall(MatSeqAIJGetArrayRead(Ao, &ba));
5272: if (scall == MAT_INITIAL_MATRIX) {
5273: PetscInt k;
5274: PetscCall(PetscMalloc1(1 + am, &ci));
5275: PetscCall(PetscMalloc1(ai[am] + bi[am], &cj));
5276: PetscCall(PetscMalloc1(ai[am] + bi[am], &ca));
5277: ci[0] = 0;
5278: for (i = 0, k = 0; i < am; i++) {
5279: const PetscInt ncols_o = bi[i + 1] - bi[i];
5280: const PetscInt ncols_d = ai[i + 1] - ai[i];
5281: ci[i + 1] = ci[i] + ncols_o + ncols_d;
5282: /* diagonal portion of A */
5283: for (j = 0; j < ncols_d; j++, k++) {
5284: cj[k] = *aj++;
5285: ca[k] = *aa++;
5286: }
5287: /* off-diagonal portion of A */
5288: for (j = 0; j < ncols_o; j++, k++) {
5289: cj[k] = dn + *bj++;
5290: ca[k] = *ba++;
5291: }
5292: }
5293: /* put together the new matrix */
5294: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, dn + on, ci, cj, ca, A_loc));
5295: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5296: /* Since these are PETSc arrays, change flags to free them as necessary. */
5297: c = (Mat_SeqAIJ *)(*A_loc)->data;
5298: c->free_a = PETSC_TRUE;
5299: c->free_ij = PETSC_TRUE;
5300: c->nonew = 0;
5301: PetscCall(MatSetType(*A_loc, ((PetscObject)Ad)->type_name));
5302: } else if (scall == MAT_REUSE_MATRIX) {
5303: PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &ca));
5304: for (i = 0; i < am; i++) {
5305: const PetscInt ncols_d = ai[i + 1] - ai[i];
5306: const PetscInt ncols_o = bi[i + 1] - bi[i];
5307: /* diagonal portion of A */
5308: for (j = 0; j < ncols_d; j++) *ca++ = *aa++;
5309: /* off-diagonal portion of A */
5310: for (j = 0; j < ncols_o; j++) *ca++ = *ba++;
5311: }
5312: PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &ca));
5313: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall);
5314: PetscCall(MatSeqAIJRestoreArrayRead(Ad, &aa));
5315: PetscCall(MatSeqAIJRestoreArrayRead(Ao, &aa));
5316: if (glob) {
5317: PetscInt cst, *gidx;
5319: PetscCall(MatGetOwnershipRangeColumn(A, &cst, NULL));
5320: PetscCall(PetscMalloc1(dn + on, &gidx));
5321: for (i = 0; i < dn; i++) gidx[i] = cst + i;
5322: for (i = 0; i < on; i++) gidx[i + dn] = cmap[i];
5323: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)Ad), dn + on, gidx, PETSC_OWN_POINTER, glob));
5324: }
5325: }
5326: PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0));
5327: PetscFunctionReturn(PETSC_SUCCESS);
5328: }
5330: /*@C
5331: MatMPIAIJGetLocalMatCondensed - Creates a `MATSEQAIJ` matrix from an `MATMPIAIJ` matrix by taking all its local rows and NON-ZERO columns
5333: Not Collective
5335: Input Parameters:
5336: + A - the matrix
5337: . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5338: . row - index set of rows to extract (or `NULL`)
5339: - col - index set of columns to extract (or `NULL`)
5341: Output Parameter:
5342: . A_loc - the local sequential matrix generated
5344: Level: developer
5346: .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()`
5347: @*/
5348: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
5349: {
5350: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
5351: PetscInt i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
5352: IS isrowa, iscola;
5353: Mat *aloc;
5354: PetscBool match;
5356: PetscFunctionBegin;
5357: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &match));
5358: PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input");
5359: PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
5360: if (!row) {
5361: start = A->rmap->rstart;
5362: end = A->rmap->rend;
5363: PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
5364: } else {
5365: isrowa = *row;
5366: }
5367: if (!col) {
5368: start = A->cmap->rstart;
5369: cmap = a->garray;
5370: nzA = a->A->cmap->n;
5371: nzB = a->B->cmap->n;
5372: PetscCall(PetscMalloc1(nzA + nzB, &idx));
5373: ncols = 0;
5374: for (i = 0; i < nzB; i++) {
5375: if (cmap[i] < start) idx[ncols++] = cmap[i];
5376: else break;
5377: }
5378: imark = i;
5379: for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
5380: for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
5381: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
5382: } else {
5383: iscola = *col;
5384: }
5385: if (scall != MAT_INITIAL_MATRIX) {
5386: PetscCall(PetscMalloc1(1, &aloc));
5387: aloc[0] = *A_loc;
5388: }
5389: PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
5390: if (!col) { /* attach global id of condensed columns */
5391: PetscCall(PetscObjectCompose((PetscObject)aloc[0], "_petsc_GetLocalMatCondensed_iscol", (PetscObject)iscola));
5392: }
5393: *A_loc = aloc[0];
5394: PetscCall(PetscFree(aloc));
5395: if (!row) PetscCall(ISDestroy(&isrowa));
5396: if (!col) PetscCall(ISDestroy(&iscola));
5397: PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
5398: PetscFunctionReturn(PETSC_SUCCESS);
5399: }
5401: /*
5402: * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5403: * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5404: * on a global size.
5405: * */
5406: static PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P, IS rows, Mat *P_oth)
5407: {
5408: Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data;
5409: Mat_SeqAIJ *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data, *p_oth;
5410: PetscInt plocalsize, nrows, *ilocal, *oilocal, i, lidx, *nrcols, *nlcols, ncol;
5411: PetscMPIInt owner;
5412: PetscSFNode *iremote, *oiremote;
5413: const PetscInt *lrowindices;
5414: PetscSF sf, osf;
5415: PetscInt pcstart, *roffsets, *loffsets, *pnnz, j;
5416: PetscInt ontotalcols, dntotalcols, ntotalcols, nout;
5417: MPI_Comm comm;
5418: ISLocalToGlobalMapping mapping;
5419: const PetscScalar *pd_a, *po_a;
5421: PetscFunctionBegin;
5422: PetscCall(PetscObjectGetComm((PetscObject)P, &comm));
5423: /* plocalsize is the number of roots
5424: * nrows is the number of leaves
5425: * */
5426: PetscCall(MatGetLocalSize(P, &plocalsize, NULL));
5427: PetscCall(ISGetLocalSize(rows, &nrows));
5428: PetscCall(PetscCalloc1(nrows, &iremote));
5429: PetscCall(ISGetIndices(rows, &lrowindices));
5430: for (i = 0; i < nrows; i++) {
5431: /* Find a remote index and an owner for a row
5432: * The row could be local or remote
5433: * */
5434: owner = 0;
5435: lidx = 0;
5436: PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, &lidx));
5437: iremote[i].index = lidx;
5438: iremote[i].rank = owner;
5439: }
5440: /* Create SF to communicate how many nonzero columns for each row */
5441: PetscCall(PetscSFCreate(comm, &sf));
5442: /* SF will figure out the number of nonzero columns for each row, and their
5443: * offsets
5444: * */
5445: PetscCall(PetscSFSetGraph(sf, plocalsize, nrows, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
5446: PetscCall(PetscSFSetFromOptions(sf));
5447: PetscCall(PetscSFSetUp(sf));
5449: PetscCall(PetscCalloc1(2 * (plocalsize + 1), &roffsets));
5450: PetscCall(PetscCalloc1(2 * plocalsize, &nrcols));
5451: PetscCall(PetscCalloc1(nrows, &pnnz));
5452: roffsets[0] = 0;
5453: roffsets[1] = 0;
5454: for (i = 0; i < plocalsize; i++) {
5455: /* diagonal */
5456: nrcols[i * 2 + 0] = pd->i[i + 1] - pd->i[i];
5457: /* off-diagonal */
5458: nrcols[i * 2 + 1] = po->i[i + 1] - po->i[i];
5459: /* compute offsets so that we relative location for each row */
5460: roffsets[(i + 1) * 2 + 0] = roffsets[i * 2 + 0] + nrcols[i * 2 + 0];
5461: roffsets[(i + 1) * 2 + 1] = roffsets[i * 2 + 1] + nrcols[i * 2 + 1];
5462: }
5463: PetscCall(PetscCalloc1(2 * nrows, &nlcols));
5464: PetscCall(PetscCalloc1(2 * nrows, &loffsets));
5465: /* 'r' means root, and 'l' means leaf */
5466: PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE));
5467: PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE));
5468: PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE));
5469: PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE));
5470: PetscCall(PetscSFDestroy(&sf));
5471: PetscCall(PetscFree(roffsets));
5472: PetscCall(PetscFree(nrcols));
5473: dntotalcols = 0;
5474: ontotalcols = 0;
5475: ncol = 0;
5476: for (i = 0; i < nrows; i++) {
5477: pnnz[i] = nlcols[i * 2 + 0] + nlcols[i * 2 + 1];
5478: ncol = PetscMax(pnnz[i], ncol);
5479: /* diagonal */
5480: dntotalcols += nlcols[i * 2 + 0];
5481: /* off-diagonal */
5482: ontotalcols += nlcols[i * 2 + 1];
5483: }
5484: /* We do not need to figure the right number of columns
5485: * since all the calculations will be done by going through the raw data
5486: * */
5487: PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, nrows, ncol, 0, pnnz, P_oth));
5488: PetscCall(MatSetUp(*P_oth));
5489: PetscCall(PetscFree(pnnz));
5490: p_oth = (Mat_SeqAIJ *)(*P_oth)->data;
5491: /* diagonal */
5492: PetscCall(PetscCalloc1(dntotalcols, &iremote));
5493: /* off-diagonal */
5494: PetscCall(PetscCalloc1(ontotalcols, &oiremote));
5495: /* diagonal */
5496: PetscCall(PetscCalloc1(dntotalcols, &ilocal));
5497: /* off-diagonal */
5498: PetscCall(PetscCalloc1(ontotalcols, &oilocal));
5499: dntotalcols = 0;
5500: ontotalcols = 0;
5501: ntotalcols = 0;
5502: for (i = 0; i < nrows; i++) {
5503: owner = 0;
5504: PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, NULL));
5505: /* Set iremote for diag matrix */
5506: for (j = 0; j < nlcols[i * 2 + 0]; j++) {
5507: iremote[dntotalcols].index = loffsets[i * 2 + 0] + j;
5508: iremote[dntotalcols].rank = owner;
5509: /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5510: ilocal[dntotalcols++] = ntotalcols++;
5511: }
5512: /* off-diagonal */
5513: for (j = 0; j < nlcols[i * 2 + 1]; j++) {
5514: oiremote[ontotalcols].index = loffsets[i * 2 + 1] + j;
5515: oiremote[ontotalcols].rank = owner;
5516: oilocal[ontotalcols++] = ntotalcols++;
5517: }
5518: }
5519: PetscCall(ISRestoreIndices(rows, &lrowindices));
5520: PetscCall(PetscFree(loffsets));
5521: PetscCall(PetscFree(nlcols));
5522: PetscCall(PetscSFCreate(comm, &sf));
5523: /* P serves as roots and P_oth is leaves
5524: * Diag matrix
5525: * */
5526: PetscCall(PetscSFSetGraph(sf, pd->i[plocalsize], dntotalcols, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
5527: PetscCall(PetscSFSetFromOptions(sf));
5528: PetscCall(PetscSFSetUp(sf));
5530: PetscCall(PetscSFCreate(comm, &osf));
5531: /* off-diagonal */
5532: PetscCall(PetscSFSetGraph(osf, po->i[plocalsize], ontotalcols, oilocal, PETSC_OWN_POINTER, oiremote, PETSC_OWN_POINTER));
5533: PetscCall(PetscSFSetFromOptions(osf));
5534: PetscCall(PetscSFSetUp(osf));
5535: PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a));
5536: PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a));
5537: /* operate on the matrix internal data to save memory */
5538: PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5539: PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5540: PetscCall(MatGetOwnershipRangeColumn(P, &pcstart, NULL));
5541: /* Convert to global indices for diag matrix */
5542: for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] += pcstart;
5543: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE));
5544: /* We want P_oth store global indices */
5545: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, p->B->cmap->n, p->garray, PETSC_COPY_VALUES, &mapping));
5546: /* Use memory scalable approach */
5547: PetscCall(ISLocalToGlobalMappingSetType(mapping, ISLOCALTOGLOBALMAPPINGHASH));
5548: PetscCall(ISLocalToGlobalMappingApply(mapping, po->i[plocalsize], po->j, po->j));
5549: PetscCall(PetscSFBcastBegin(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE));
5550: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE));
5551: /* Convert back to local indices */
5552: for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] -= pcstart;
5553: PetscCall(PetscSFBcastEnd(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE));
5554: nout = 0;
5555: PetscCall(ISGlobalToLocalMappingApply(mapping, IS_GTOLM_DROP, po->i[plocalsize], po->j, &nout, po->j));
5556: PetscCheck(nout == po->i[plocalsize], comm, PETSC_ERR_ARG_INCOMP, "n %" PetscInt_FMT " does not equal to nout %" PetscInt_FMT " ", po->i[plocalsize], nout);
5557: PetscCall(ISLocalToGlobalMappingDestroy(&mapping));
5558: /* Exchange values */
5559: PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5560: PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5561: PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a));
5562: PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a));
5563: /* Stop PETSc from shrinking memory */
5564: for (i = 0; i < nrows; i++) p_oth->ilen[i] = p_oth->imax[i];
5565: PetscCall(MatAssemblyBegin(*P_oth, MAT_FINAL_ASSEMBLY));
5566: PetscCall(MatAssemblyEnd(*P_oth, MAT_FINAL_ASSEMBLY));
5567: /* Attach PetscSF objects to P_oth so that we can reuse it later */
5568: PetscCall(PetscObjectCompose((PetscObject)*P_oth, "diagsf", (PetscObject)sf));
5569: PetscCall(PetscObjectCompose((PetscObject)*P_oth, "offdiagsf", (PetscObject)osf));
5570: PetscCall(PetscSFDestroy(&sf));
5571: PetscCall(PetscSFDestroy(&osf));
5572: PetscFunctionReturn(PETSC_SUCCESS);
5573: }
5575: /*
5576: * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5577: * This supports MPIAIJ and MAIJ
5578: * */
5579: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A, Mat P, PetscInt dof, MatReuse reuse, Mat *P_oth)
5580: {
5581: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data;
5582: Mat_SeqAIJ *p_oth;
5583: IS rows, map;
5584: PetscHMapI hamp;
5585: PetscInt i, htsize, *rowindices, off, *mapping, key, count;
5586: MPI_Comm comm;
5587: PetscSF sf, osf;
5588: PetscBool has;
5590: PetscFunctionBegin;
5591: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
5592: PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, P, 0, 0));
5593: /* If it is the first time, create an index set of off-diag nonzero columns of A,
5594: * and then create a submatrix (that often is an overlapping matrix)
5595: * */
5596: if (reuse == MAT_INITIAL_MATRIX) {
5597: /* Use a hash table to figure out unique keys */
5598: PetscCall(PetscHMapICreateWithSize(a->B->cmap->n, &hamp));
5599: PetscCall(PetscCalloc1(a->B->cmap->n, &mapping));
5600: count = 0;
5601: /* Assume that a->g is sorted, otherwise the following does not make sense */
5602: for (i = 0; i < a->B->cmap->n; i++) {
5603: key = a->garray[i] / dof;
5604: PetscCall(PetscHMapIHas(hamp, key, &has));
5605: if (!has) {
5606: mapping[i] = count;
5607: PetscCall(PetscHMapISet(hamp, key, count++));
5608: } else {
5609: /* Current 'i' has the same value the previous step */
5610: mapping[i] = count - 1;
5611: }
5612: }
5613: PetscCall(ISCreateGeneral(comm, a->B->cmap->n, mapping, PETSC_OWN_POINTER, &map));
5614: PetscCall(PetscHMapIGetSize(hamp, &htsize));
5615: PetscCheck(htsize == count, comm, PETSC_ERR_ARG_INCOMP, " Size of hash map %" PetscInt_FMT " is inconsistent with count %" PetscInt_FMT, htsize, count);
5616: PetscCall(PetscCalloc1(htsize, &rowindices));
5617: off = 0;
5618: PetscCall(PetscHMapIGetKeys(hamp, &off, rowindices));
5619: PetscCall(PetscHMapIDestroy(&hamp));
5620: PetscCall(PetscSortInt(htsize, rowindices));
5621: PetscCall(ISCreateGeneral(comm, htsize, rowindices, PETSC_OWN_POINTER, &rows));
5622: /* In case, the matrix was already created but users want to recreate the matrix */
5623: PetscCall(MatDestroy(P_oth));
5624: PetscCall(MatCreateSeqSubMatrixWithRows_Private(P, rows, P_oth));
5625: PetscCall(PetscObjectCompose((PetscObject)*P_oth, "aoffdiagtopothmapping", (PetscObject)map));
5626: PetscCall(ISDestroy(&map));
5627: PetscCall(ISDestroy(&rows));
5628: } else if (reuse == MAT_REUSE_MATRIX) {
5629: /* If matrix was already created, we simply update values using SF objects
5630: * that as attached to the matrix earlier.
5631: */
5632: const PetscScalar *pd_a, *po_a;
5634: PetscCall(PetscObjectQuery((PetscObject)*P_oth, "diagsf", (PetscObject *)&sf));
5635: PetscCall(PetscObjectQuery((PetscObject)*P_oth, "offdiagsf", (PetscObject *)&osf));
5636: PetscCheck(sf && osf, comm, PETSC_ERR_ARG_NULL, "Matrix is not initialized yet");
5637: p_oth = (Mat_SeqAIJ *)(*P_oth)->data;
5638: /* Update values in place */
5639: PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a));
5640: PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a));
5641: PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5642: PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5643: PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5644: PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5645: PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a));
5646: PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a));
5647: } else SETERRQ(comm, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown reuse type");
5648: PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, P, 0, 0));
5649: PetscFunctionReturn(PETSC_SUCCESS);
5650: }
5652: /*@C
5653: MatGetBrowsOfAcols - Returns `IS` that contain rows of `B` that equal to nonzero columns of local `A`
5655: Collective
5657: Input Parameters:
5658: + A - the first matrix in `MATMPIAIJ` format
5659: . B - the second matrix in `MATMPIAIJ` format
5660: - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5662: Output Parameters:
5663: + rowb - On input index sets of rows of B to extract (or `NULL`), modified on output
5664: . colb - On input index sets of columns of B to extract (or `NULL`), modified on output
5665: - B_seq - the sequential matrix generated
5667: Level: developer
5669: .seealso: `Mat`, `MATMPIAIJ`, `IS`, `MatReuse`
5670: @*/
5671: PetscErrorCode MatGetBrowsOfAcols(Mat A, Mat B, MatReuse scall, IS *rowb, IS *colb, Mat *B_seq)
5672: {
5673: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
5674: PetscInt *idx, i, start, ncols, nzA, nzB, *cmap, imark;
5675: IS isrowb, iscolb;
5676: Mat *bseq = NULL;
5678: PetscFunctionBegin;
5679: PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
5680: A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
5681: PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAcols, A, B, 0, 0));
5683: if (scall == MAT_INITIAL_MATRIX) {
5684: start = A->cmap->rstart;
5685: cmap = a->garray;
5686: nzA = a->A->cmap->n;
5687: nzB = a->B->cmap->n;
5688: PetscCall(PetscMalloc1(nzA + nzB, &idx));
5689: ncols = 0;
5690: for (i = 0; i < nzB; i++) { /* row < local row index */
5691: if (cmap[i] < start) idx[ncols++] = cmap[i];
5692: else break;
5693: }
5694: imark = i;
5695: for (i = 0; i < nzA; i++) idx[ncols++] = start + i; /* local rows */
5696: for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5697: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &isrowb));
5698: PetscCall(ISCreateStride(PETSC_COMM_SELF, B->cmap->N, 0, 1, &iscolb));
5699: } else {
5700: PetscCheck(rowb && colb, PETSC_COMM_SELF, PETSC_ERR_SUP, "IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5701: isrowb = *rowb;
5702: iscolb = *colb;
5703: PetscCall(PetscMalloc1(1, &bseq));
5704: bseq[0] = *B_seq;
5705: }
5706: PetscCall(MatCreateSubMatrices(B, 1, &isrowb, &iscolb, scall, &bseq));
5707: *B_seq = bseq[0];
5708: PetscCall(PetscFree(bseq));
5709: if (!rowb) {
5710: PetscCall(ISDestroy(&isrowb));
5711: } else {
5712: *rowb = isrowb;
5713: }
5714: if (!colb) {
5715: PetscCall(ISDestroy(&iscolb));
5716: } else {
5717: *colb = iscolb;
5718: }
5719: PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAcols, A, B, 0, 0));
5720: PetscFunctionReturn(PETSC_SUCCESS);
5721: }
5723: /*
5724: MatGetBrowsOfAoCols_MPIAIJ - Creates a `MATSEQAIJ` matrix by taking rows of B that equal to nonzero columns
5725: of the OFF-DIAGONAL portion of local A
5727: Collective
5729: Input Parameters:
5730: + A,B - the matrices in `MATMPIAIJ` format
5731: - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5733: Output Parameter:
5734: + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5735: . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5736: . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5737: - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
5739: Developer Note:
5740: This directly accesses information inside the VecScatter associated with the matrix-vector product
5741: for this matrix. This is not desirable..
5743: Level: developer
5745: */
5747: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A, Mat B, MatReuse scall, PetscInt **startsj_s, PetscInt **startsj_r, MatScalar **bufa_ptr, Mat *B_oth)
5748: {
5749: Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
5750: VecScatter ctx;
5751: MPI_Comm comm;
5752: const PetscMPIInt *rprocs, *sprocs;
5753: PetscMPIInt nrecvs, nsends;
5754: const PetscInt *srow, *rstarts, *sstarts;
5755: PetscInt *rowlen, *bufj, *bufJ, ncols = 0, aBn = a->B->cmap->n, row, *b_othi, *b_othj, *rvalues = NULL, *svalues = NULL, *cols, sbs, rbs;
5756: PetscInt i, j, k = 0, l, ll, nrows, *rstartsj = NULL, *sstartsj, len;
5757: PetscScalar *b_otha, *bufa, *bufA, *vals = NULL;
5758: MPI_Request *reqs = NULL, *rwaits = NULL, *swaits = NULL;
5759: PetscMPIInt size, tag, rank, nreqs;
5761: PetscFunctionBegin;
5762: PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
5763: PetscCallMPI(MPI_Comm_size(comm, &size));
5765: PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
5766: A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
5767: PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, B, 0, 0));
5768: PetscCallMPI(MPI_Comm_rank(comm, &rank));
5770: if (size == 1) {
5771: startsj_s = NULL;
5772: bufa_ptr = NULL;
5773: *B_oth = NULL;
5774: PetscFunctionReturn(PETSC_SUCCESS);
5775: }
5777: ctx = a->Mvctx;
5778: tag = ((PetscObject)ctx)->tag;
5780: PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &srow, &sprocs, &sbs));
5781: /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5782: PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL /*indices not needed*/, &rprocs, &rbs));
5783: PetscCall(PetscMPIIntCast(nsends + nrecvs, &nreqs));
5784: PetscCall(PetscMalloc1(nreqs, &reqs));
5785: rwaits = reqs;
5786: swaits = PetscSafePointerPlusOffset(reqs, nrecvs);
5788: if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5789: if (scall == MAT_INITIAL_MATRIX) {
5790: /* i-array */
5791: /* post receives */
5792: if (nrecvs) PetscCall(PetscMalloc1(rbs * (rstarts[nrecvs] - rstarts[0]), &rvalues)); /* rstarts can be NULL when nrecvs=0 */
5793: for (i = 0; i < nrecvs; i++) {
5794: rowlen = rvalues + rstarts[i] * rbs;
5795: nrows = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of indices to be received */
5796: PetscCallMPI(MPIU_Irecv(rowlen, nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i));
5797: }
5799: /* pack the outgoing message */
5800: PetscCall(PetscMalloc2(nsends + 1, &sstartsj, nrecvs + 1, &rstartsj));
5802: sstartsj[0] = 0;
5803: rstartsj[0] = 0;
5804: len = 0; /* total length of j or a array to be sent */
5805: if (nsends) {
5806: k = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5807: PetscCall(PetscMalloc1(sbs * (sstarts[nsends] - sstarts[0]), &svalues));
5808: }
5809: for (i = 0; i < nsends; i++) {
5810: rowlen = svalues + (sstarts[i] - sstarts[0]) * sbs;
5811: nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5812: for (j = 0; j < nrows; j++) {
5813: row = srow[k] + B->rmap->range[rank]; /* global row idx */
5814: for (l = 0; l < sbs; l++) {
5815: PetscCall(MatGetRow_MPIAIJ(B, row + l, &ncols, NULL, NULL)); /* rowlength */
5817: rowlen[j * sbs + l] = ncols;
5819: len += ncols;
5820: PetscCall(MatRestoreRow_MPIAIJ(B, row + l, &ncols, NULL, NULL));
5821: }
5822: k++;
5823: }
5824: PetscCallMPI(MPIU_Isend(rowlen, nrows * sbs, MPIU_INT, sprocs[i], tag, comm, swaits + i));
5826: sstartsj[i + 1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5827: }
5828: /* recvs and sends of i-array are completed */
5829: if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5830: PetscCall(PetscFree(svalues));
5832: /* allocate buffers for sending j and a arrays */
5833: PetscCall(PetscMalloc1(len, &bufj));
5834: PetscCall(PetscMalloc1(len, &bufa));
5836: /* create i-array of B_oth */
5837: PetscCall(PetscMalloc1(aBn + 1, &b_othi));
5839: b_othi[0] = 0;
5840: len = 0; /* total length of j or a array to be received */
5841: k = 0;
5842: for (i = 0; i < nrecvs; i++) {
5843: rowlen = rvalues + (rstarts[i] - rstarts[0]) * rbs;
5844: nrows = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of rows to be received */
5845: for (j = 0; j < nrows; j++) {
5846: b_othi[k + 1] = b_othi[k] + rowlen[j];
5847: PetscCall(PetscIntSumError(rowlen[j], len, &len));
5848: k++;
5849: }
5850: rstartsj[i + 1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5851: }
5852: PetscCall(PetscFree(rvalues));
5854: /* allocate space for j and a arrays of B_oth */
5855: PetscCall(PetscMalloc1(b_othi[aBn], &b_othj));
5856: PetscCall(PetscMalloc1(b_othi[aBn], &b_otha));
5858: /* j-array */
5859: /* post receives of j-array */
5860: for (i = 0; i < nrecvs; i++) {
5861: nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */
5862: PetscCallMPI(MPIU_Irecv(PetscSafePointerPlusOffset(b_othj, rstartsj[i]), nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i));
5863: }
5865: /* pack the outgoing message j-array */
5866: if (nsends) k = sstarts[0];
5867: for (i = 0; i < nsends; i++) {
5868: nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5869: bufJ = PetscSafePointerPlusOffset(bufj, sstartsj[i]);
5870: for (j = 0; j < nrows; j++) {
5871: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5872: for (ll = 0; ll < sbs; ll++) {
5873: PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL));
5874: for (l = 0; l < ncols; l++) *bufJ++ = cols[l];
5875: PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL));
5876: }
5877: }
5878: PetscCallMPI(MPIU_Isend(PetscSafePointerPlusOffset(bufj, sstartsj[i]), sstartsj[i + 1] - sstartsj[i], MPIU_INT, sprocs[i], tag, comm, swaits + i));
5879: }
5881: /* recvs and sends of j-array are completed */
5882: if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5883: } else if (scall == MAT_REUSE_MATRIX) {
5884: sstartsj = *startsj_s;
5885: rstartsj = *startsj_r;
5886: bufa = *bufa_ptr;
5887: PetscCall(MatSeqAIJGetArrayWrite(*B_oth, &b_otha));
5888: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not possess an object container");
5890: /* a-array */
5891: /* post receives of a-array */
5892: for (i = 0; i < nrecvs; i++) {
5893: nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */
5894: PetscCallMPI(MPIU_Irecv(PetscSafePointerPlusOffset(b_otha, rstartsj[i]), nrows, MPIU_SCALAR, rprocs[i], tag, comm, rwaits + i));
5895: }
5897: /* pack the outgoing message a-array */
5898: if (nsends) k = sstarts[0];
5899: for (i = 0; i < nsends; i++) {
5900: nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5901: bufA = PetscSafePointerPlusOffset(bufa, sstartsj[i]);
5902: for (j = 0; j < nrows; j++) {
5903: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5904: for (ll = 0; ll < sbs; ll++) {
5905: PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals));
5906: for (l = 0; l < ncols; l++) *bufA++ = vals[l];
5907: PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals));
5908: }
5909: }
5910: PetscCallMPI(MPIU_Isend(PetscSafePointerPlusOffset(bufa, sstartsj[i]), sstartsj[i + 1] - sstartsj[i], MPIU_SCALAR, sprocs[i], tag, comm, swaits + i));
5911: }
5912: /* recvs and sends of a-array are completed */
5913: if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5914: PetscCall(PetscFree(reqs));
5916: if (scall == MAT_INITIAL_MATRIX) {
5917: Mat_SeqAIJ *b_oth;
5919: /* put together the new matrix */
5920: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, aBn, B->cmap->N, b_othi, b_othj, b_otha, B_oth));
5922: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5923: /* Since these are PETSc arrays, change flags to free them as necessary. */
5924: b_oth = (Mat_SeqAIJ *)(*B_oth)->data;
5925: b_oth->free_a = PETSC_TRUE;
5926: b_oth->free_ij = PETSC_TRUE;
5927: b_oth->nonew = 0;
5929: PetscCall(PetscFree(bufj));
5930: if (!startsj_s || !bufa_ptr) {
5931: PetscCall(PetscFree2(sstartsj, rstartsj));
5932: PetscCall(PetscFree(bufa_ptr));
5933: } else {
5934: *startsj_s = sstartsj;
5935: *startsj_r = rstartsj;
5936: *bufa_ptr = bufa;
5937: }
5938: } else if (scall == MAT_REUSE_MATRIX) {
5939: PetscCall(MatSeqAIJRestoreArrayWrite(*B_oth, &b_otha));
5940: }
5942: PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE, &nsends, &sstarts, &srow, &sprocs, &sbs));
5943: PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE, &nrecvs, &rstarts, NULL, &rprocs, &rbs));
5944: PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, B, 0, 0));
5945: PetscFunctionReturn(PETSC_SUCCESS);
5946: }
5948: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat, MatType, MatReuse, Mat *);
5949: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat, MatType, MatReuse, Mat *);
5950: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat, MatType, MatReuse, Mat *);
5951: #if defined(PETSC_HAVE_MKL_SPARSE)
5952: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat, MatType, MatReuse, Mat *);
5953: #endif
5954: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat, MatType, MatReuse, Mat *);
5955: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat, MatType, MatReuse, Mat *);
5956: #if defined(PETSC_HAVE_ELEMENTAL)
5957: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat, MatType, MatReuse, Mat *);
5958: #endif
5959: #if defined(PETSC_HAVE_SCALAPACK) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
5960: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat, MatType, MatReuse, Mat *);
5961: #endif
5962: #if defined(PETSC_HAVE_HYPRE)
5963: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat, MatType, MatReuse, Mat *);
5964: #endif
5965: #if defined(PETSC_HAVE_CUDA)
5966: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat, MatType, MatReuse, Mat *);
5967: #endif
5968: #if defined(PETSC_HAVE_HIP)
5969: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJHIPSPARSE(Mat, MatType, MatReuse, Mat *);
5970: #endif
5971: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
5972: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat, MatType, MatReuse, Mat *);
5973: #endif
5974: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat, MatType, MatReuse, Mat *);
5975: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat, MatType, MatReuse, Mat *);
5976: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);
5978: /*
5979: Computes (B'*A')' since computing B*A directly is untenable
5981: n p p
5982: [ ] [ ] [ ]
5983: m [ A ] * n [ B ] = m [ C ]
5984: [ ] [ ] [ ]
5986: */
5987: static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A, Mat B, Mat C)
5988: {
5989: Mat At, Bt, Ct;
5991: PetscFunctionBegin;
5992: PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
5993: PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &Bt));
5994: PetscCall(MatMatMult(Bt, At, MAT_INITIAL_MATRIX, PETSC_CURRENT, &Ct));
5995: PetscCall(MatDestroy(&At));
5996: PetscCall(MatDestroy(&Bt));
5997: PetscCall(MatTransposeSetPrecursor(Ct, C));
5998: PetscCall(MatTranspose(Ct, MAT_REUSE_MATRIX, &C));
5999: PetscCall(MatDestroy(&Ct));
6000: PetscFunctionReturn(PETSC_SUCCESS);
6001: }
6003: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A, Mat B, PetscReal fill, Mat C)
6004: {
6005: PetscBool cisdense;
6007: PetscFunctionBegin;
6008: PetscCheck(A->cmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "A->cmap->n %" PetscInt_FMT " != B->rmap->n %" PetscInt_FMT, A->cmap->n, B->rmap->n);
6009: PetscCall(MatSetSizes(C, A->rmap->n, B->cmap->n, A->rmap->N, B->cmap->N));
6010: PetscCall(MatSetBlockSizesFromMats(C, A, B));
6011: PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &cisdense, MATMPIDENSE, MATMPIDENSECUDA, MATMPIDENSEHIP, ""));
6012: if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)A)->type_name));
6013: PetscCall(MatSetUp(C));
6015: C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
6016: PetscFunctionReturn(PETSC_SUCCESS);
6017: }
6019: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
6020: {
6021: Mat_Product *product = C->product;
6022: Mat A = product->A, B = product->B;
6024: PetscFunctionBegin;
6025: PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
6026: A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
6027: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
6028: C->ops->productsymbolic = MatProductSymbolic_AB;
6029: PetscFunctionReturn(PETSC_SUCCESS);
6030: }
6032: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
6033: {
6034: Mat_Product *product = C->product;
6036: PetscFunctionBegin;
6037: if (product->type == MATPRODUCT_AB) PetscCall(MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C));
6038: PetscFunctionReturn(PETSC_SUCCESS);
6039: }
6041: /*
6042: Merge two sets of sorted nonzeros and return a CSR for the merged (sequential) matrix
6044: Input Parameters:
6046: j1,rowBegin1,rowEnd1,jmap1: describe the first set of nonzeros (Set1)
6047: j2,rowBegin2,rowEnd2,jmap2: describe the second set of nonzeros (Set2)
6049: mat: both sets' nonzeros are on m rows, where m is the number of local rows of the matrix mat
6051: For Set1, j1[] contains column indices of the nonzeros.
6052: For the k-th row (0<=k<m), [rowBegin1[k],rowEnd1[k]) index into j1[] and point to the begin/end nonzero in row k
6053: respectively (note rowEnd1[k] is not necessarily equal to rwoBegin1[k+1]). Indices in this range of j1[] are sorted,
6054: but might have repeats. jmap1[t+1] - jmap1[t] is the number of repeats for the t-th unique nonzero in Set1.
6056: Similar for Set2.
6058: This routine merges the two sets of nonzeros row by row and removes repeats.
6060: Output Parameters: (memory is allocated by the caller)
6062: i[],j[]: the CSR of the merged matrix, which has m rows.
6063: imap1[]: the k-th unique nonzero in Set1 (k=0,1,...) corresponds to imap1[k]-th unique nonzero in the merged matrix.
6064: imap2[]: similar to imap1[], but for Set2.
6065: Note we order nonzeros row-by-row and from left to right.
6066: */
6067: static PetscErrorCode MatMergeEntries_Internal(Mat mat, const PetscInt j1[], const PetscInt j2[], const PetscCount rowBegin1[], const PetscCount rowEnd1[], const PetscCount rowBegin2[], const PetscCount rowEnd2[], const PetscCount jmap1[], const PetscCount jmap2[], PetscCount imap1[], PetscCount imap2[], PetscInt i[], PetscInt j[])
6068: {
6069: PetscInt r, m; /* Row index of mat */
6070: PetscCount t, t1, t2, b1, e1, b2, e2;
6072: PetscFunctionBegin;
6073: PetscCall(MatGetLocalSize(mat, &m, NULL));
6074: t1 = t2 = t = 0; /* Count unique nonzeros of in Set1, Set1 and the merged respectively */
6075: i[0] = 0;
6076: for (r = 0; r < m; r++) { /* Do row by row merging */
6077: b1 = rowBegin1[r];
6078: e1 = rowEnd1[r];
6079: b2 = rowBegin2[r];
6080: e2 = rowEnd2[r];
6081: while (b1 < e1 && b2 < e2) {
6082: if (j1[b1] == j2[b2]) { /* Same column index and hence same nonzero */
6083: j[t] = j1[b1];
6084: imap1[t1] = t;
6085: imap2[t2] = t;
6086: b1 += jmap1[t1 + 1] - jmap1[t1]; /* Jump to next unique local nonzero */
6087: b2 += jmap2[t2 + 1] - jmap2[t2]; /* Jump to next unique remote nonzero */
6088: t1++;
6089: t2++;
6090: t++;
6091: } else if (j1[b1] < j2[b2]) {
6092: j[t] = j1[b1];
6093: imap1[t1] = t;
6094: b1 += jmap1[t1 + 1] - jmap1[t1];
6095: t1++;
6096: t++;
6097: } else {
6098: j[t] = j2[b2];
6099: imap2[t2] = t;
6100: b2 += jmap2[t2 + 1] - jmap2[t2];
6101: t2++;
6102: t++;
6103: }
6104: }
6105: /* Merge the remaining in either j1[] or j2[] */
6106: while (b1 < e1) {
6107: j[t] = j1[b1];
6108: imap1[t1] = t;
6109: b1 += jmap1[t1 + 1] - jmap1[t1];
6110: t1++;
6111: t++;
6112: }
6113: while (b2 < e2) {
6114: j[t] = j2[b2];
6115: imap2[t2] = t;
6116: b2 += jmap2[t2 + 1] - jmap2[t2];
6117: t2++;
6118: t++;
6119: }
6120: PetscCall(PetscIntCast(t, i + r + 1));
6121: }
6122: PetscFunctionReturn(PETSC_SUCCESS);
6123: }
6125: /*
6126: Split nonzeros in a block of local rows into two subsets: those in the diagonal block and those in the off-diagonal block
6128: Input Parameters:
6129: mat: an MPI matrix that provides row and column layout information for splitting. Let's say its number of local rows is m.
6130: n,i[],j[],perm[]: there are n input entries, belonging to m rows. Row/col indices of the entries are stored in i[] and j[]
6131: respectively, along with a permutation array perm[]. Length of the i[],j[],perm[] arrays is n.
6133: i[] is already sorted, but within a row, j[] is not sorted and might have repeats.
6134: i[] might contain negative indices at the beginning, which means the corresponding entries should be ignored in the splitting.
6136: Output Parameters:
6137: j[],perm[]: the routine needs to sort j[] within each row along with perm[].
6138: rowBegin[],rowMid[],rowEnd[]: of length m, and the memory is preallocated and zeroed by the caller.
6139: They contain indices pointing to j[]. For 0<=r<m, [rowBegin[r],rowMid[r]) point to begin/end entries of row r of the diagonal block,
6140: and [rowMid[r],rowEnd[r]) point to begin/end entries of row r of the off-diagonal block.
6142: Aperm[],Ajmap[],Atot,Annz: Arrays are allocated by this routine.
6143: Atot: number of entries belonging to the diagonal block.
6144: Annz: number of unique nonzeros belonging to the diagonal block.
6145: Aperm[Atot] stores values from perm[] for entries belonging to the diagonal block. Length of Aperm[] is Atot, though it may also count
6146: repeats (i.e., same 'i,j' pair).
6147: Ajmap[Annz+1] stores the number of repeats of each unique entry belonging to the diagonal block. More precisely, Ajmap[t+1] - Ajmap[t]
6148: is the number of repeats for the t-th unique entry in the diagonal block. Ajmap[0] is always 0.
6150: Atot: number of entries belonging to the diagonal block
6151: Annz: number of unique nonzeros belonging to the diagonal block.
6153: Bperm[], Bjmap[], Btot, Bnnz are similar but for the off-diagonal block.
6155: Aperm[],Bperm[],Ajmap[] and Bjmap[] are allocated separately by this routine with PetscMalloc1().
6156: */
6157: static PetscErrorCode MatSplitEntries_Internal(Mat mat, PetscCount n, const PetscInt i[], PetscInt j[], PetscCount perm[], PetscCount rowBegin[], PetscCount rowMid[], PetscCount rowEnd[], PetscCount *Atot_, PetscCount **Aperm_, PetscCount *Annz_, PetscCount **Ajmap_, PetscCount *Btot_, PetscCount **Bperm_, PetscCount *Bnnz_, PetscCount **Bjmap_)
6158: {
6159: PetscInt cstart, cend, rstart, rend, row, col;
6160: PetscCount Atot = 0, Btot = 0; /* Total number of nonzeros in the diagonal and off-diagonal blocks */
6161: PetscCount Annz = 0, Bnnz = 0; /* Number of unique nonzeros in the diagonal and off-diagonal blocks */
6162: PetscCount k, m, p, q, r, s, mid;
6163: PetscCount *Aperm, *Bperm, *Ajmap, *Bjmap;
6165: PetscFunctionBegin;
6166: PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend));
6167: PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend));
6168: m = rend - rstart;
6170: /* Skip negative rows */
6171: for (k = 0; k < n; k++)
6172: if (i[k] >= 0) break;
6174: /* Process [k,n): sort and partition each local row into diag and offdiag portions,
6175: fill rowBegin[], rowMid[], rowEnd[], and count Atot, Btot, Annz, Bnnz.
6176: */
6177: while (k < n) {
6178: row = i[k];
6179: /* Entries in [k,s) are in one row. Shift diagonal block col indices so that diag is ahead of offdiag after sorting the row */
6180: for (s = k; s < n; s++)
6181: if (i[s] != row) break;
6183: /* Shift diag columns to range of [-PETSC_INT_MAX, -1] */
6184: for (p = k; p < s; p++) {
6185: if (j[p] >= cstart && j[p] < cend) j[p] -= PETSC_INT_MAX;
6186: }
6187: PetscCall(PetscSortIntWithCountArray(s - k, j + k, perm + k));
6188: PetscCall(PetscSortedIntUpperBound(j, k, s, -1, &mid)); /* Separate [k,s) into [k,mid) for diag and [mid,s) for offdiag */
6189: rowBegin[row - rstart] = k;
6190: rowMid[row - rstart] = mid;
6191: rowEnd[row - rstart] = s;
6192: PetscCheck(k == s || j[s - 1] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column index %" PetscInt_FMT " is >= matrix column size %" PetscInt_FMT, j[s - 1], mat->cmap->N);
6194: /* Count nonzeros of this diag/offdiag row, which might have repeats */
6195: Atot += mid - k;
6196: Btot += s - mid;
6198: /* Count unique nonzeros of this diag row */
6199: for (p = k; p < mid;) {
6200: col = j[p];
6201: do {
6202: j[p] += PETSC_INT_MAX; /* Revert the modified diagonal indices */
6203: p++;
6204: } while (p < mid && j[p] == col);
6205: Annz++;
6206: }
6208: /* Count unique nonzeros of this offdiag row */
6209: for (p = mid; p < s;) {
6210: col = j[p];
6211: do {
6212: p++;
6213: } while (p < s && j[p] == col);
6214: Bnnz++;
6215: }
6216: k = s;
6217: }
6219: /* Allocation according to Atot, Btot, Annz, Bnnz */
6220: PetscCall(PetscMalloc1(Atot, &Aperm));
6221: PetscCall(PetscMalloc1(Btot, &Bperm));
6222: PetscCall(PetscMalloc1(Annz + 1, &Ajmap));
6223: PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap));
6225: /* Re-scan indices and copy diag/offdiag permutation indices to Aperm, Bperm and also fill Ajmap and Bjmap */
6226: Ajmap[0] = Bjmap[0] = Atot = Btot = Annz = Bnnz = 0;
6227: for (r = 0; r < m; r++) {
6228: k = rowBegin[r];
6229: mid = rowMid[r];
6230: s = rowEnd[r];
6231: PetscCall(PetscArraycpy(PetscSafePointerPlusOffset(Aperm, Atot), PetscSafePointerPlusOffset(perm, k), mid - k));
6232: PetscCall(PetscArraycpy(PetscSafePointerPlusOffset(Bperm, Btot), PetscSafePointerPlusOffset(perm, mid), s - mid));
6233: Atot += mid - k;
6234: Btot += s - mid;
6236: /* Scan column indices in this row and find out how many repeats each unique nonzero has */
6237: for (p = k; p < mid;) {
6238: col = j[p];
6239: q = p;
6240: do {
6241: p++;
6242: } while (p < mid && j[p] == col);
6243: Ajmap[Annz + 1] = Ajmap[Annz] + (p - q);
6244: Annz++;
6245: }
6247: for (p = mid; p < s;) {
6248: col = j[p];
6249: q = p;
6250: do {
6251: p++;
6252: } while (p < s && j[p] == col);
6253: Bjmap[Bnnz + 1] = Bjmap[Bnnz] + (p - q);
6254: Bnnz++;
6255: }
6256: }
6257: /* Output */
6258: *Aperm_ = Aperm;
6259: *Annz_ = Annz;
6260: *Atot_ = Atot;
6261: *Ajmap_ = Ajmap;
6262: *Bperm_ = Bperm;
6263: *Bnnz_ = Bnnz;
6264: *Btot_ = Btot;
6265: *Bjmap_ = Bjmap;
6266: PetscFunctionReturn(PETSC_SUCCESS);
6267: }
6269: /*
6270: Expand the jmap[] array to make a new one in view of nonzeros in the merged matrix
6272: Input Parameters:
6273: nnz1: number of unique nonzeros in a set that was used to produce imap[], jmap[]
6274: nnz: number of unique nonzeros in the merged matrix
6275: imap[nnz1]: i-th nonzero in the set is the imap[i]-th nonzero in the merged matrix
6276: jmap[nnz1+1]: i-th nonzero in the set has jmap[i+1] - jmap[i] repeats in the set
6278: Output Parameter: (memory is allocated by the caller)
6279: jmap_new[nnz+1]: i-th nonzero in the merged matrix has jmap_new[i+1] - jmap_new[i] repeats in the set
6281: Example:
6282: nnz1 = 4
6283: nnz = 6
6284: imap = [1,3,4,5]
6285: jmap = [0,3,5,6,7]
6286: then,
6287: jmap_new = [0,0,3,3,5,6,7]
6288: */
6289: static PetscErrorCode ExpandJmap_Internal(PetscCount nnz1, PetscCount nnz, const PetscCount imap[], const PetscCount jmap[], PetscCount jmap_new[])
6290: {
6291: PetscCount k, p;
6293: PetscFunctionBegin;
6294: jmap_new[0] = 0;
6295: p = nnz; /* p loops over jmap_new[] backwards */
6296: for (k = nnz1 - 1; k >= 0; k--) { /* k loops over imap[] */
6297: for (; p > imap[k]; p--) jmap_new[p] = jmap[k + 1];
6298: }
6299: for (; p >= 0; p--) jmap_new[p] = jmap[0];
6300: PetscFunctionReturn(PETSC_SUCCESS);
6301: }
6303: static PetscErrorCode MatCOOStructDestroy_MPIAIJ(PetscCtxRt data)
6304: {
6305: MatCOOStruct_MPIAIJ *coo = *(MatCOOStruct_MPIAIJ **)data;
6307: PetscFunctionBegin;
6308: PetscCall(PetscSFDestroy(&coo->sf));
6309: PetscCall(PetscFree(coo->Aperm1));
6310: PetscCall(PetscFree(coo->Bperm1));
6311: PetscCall(PetscFree(coo->Ajmap1));
6312: PetscCall(PetscFree(coo->Bjmap1));
6313: PetscCall(PetscFree(coo->Aimap2));
6314: PetscCall(PetscFree(coo->Bimap2));
6315: PetscCall(PetscFree(coo->Aperm2));
6316: PetscCall(PetscFree(coo->Bperm2));
6317: PetscCall(PetscFree(coo->Ajmap2));
6318: PetscCall(PetscFree(coo->Bjmap2));
6319: PetscCall(PetscFree(coo->Cperm1));
6320: PetscCall(PetscFree2(coo->sendbuf, coo->recvbuf));
6321: PetscCall(PetscFree(coo));
6322: PetscFunctionReturn(PETSC_SUCCESS);
6323: }
6325: PetscErrorCode MatSetPreallocationCOO_MPIAIJ(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[])
6326: {
6327: MPI_Comm comm;
6328: PetscMPIInt rank, size;
6329: PetscInt m, n, M, N, rstart, rend, cstart, cend; /* Sizes, indices of row/col, therefore with type PetscInt */
6330: PetscCount k, p, q, rem; /* Loop variables over coo arrays */
6331: Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)mat->data;
6332: PetscContainer container;
6333: MatCOOStruct_MPIAIJ *coo;
6335: PetscFunctionBegin;
6336: PetscCall(PetscFree(mpiaij->garray));
6337: PetscCall(VecDestroy(&mpiaij->lvec));
6338: #if defined(PETSC_USE_CTABLE)
6339: PetscCall(PetscHMapIDestroy(&mpiaij->colmap));
6340: #else
6341: PetscCall(PetscFree(mpiaij->colmap));
6342: #endif
6343: PetscCall(VecScatterDestroy(&mpiaij->Mvctx));
6344: mat->assembled = PETSC_FALSE;
6345: mat->was_assembled = PETSC_FALSE;
6347: PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
6348: PetscCallMPI(MPI_Comm_size(comm, &size));
6349: PetscCallMPI(MPI_Comm_rank(comm, &rank));
6350: PetscCall(PetscLayoutSetUp(mat->rmap));
6351: PetscCall(PetscLayoutSetUp(mat->cmap));
6352: PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend));
6353: PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend));
6354: PetscCall(MatGetLocalSize(mat, &m, &n));
6355: PetscCall(MatGetSize(mat, &M, &N));
6357: /* Sort (i,j) by row along with a permutation array, so that the to-be-ignored */
6358: /* entries come first, then local rows, then remote rows. */
6359: PetscCount n1 = coo_n, *perm1;
6360: PetscInt *i1 = coo_i, *j1 = coo_j;
6362: PetscCall(PetscMalloc1(n1, &perm1));
6363: for (k = 0; k < n1; k++) perm1[k] = k;
6365: /* Manipulate indices so that entries with negative row or col indices will have smallest
6366: row indices, local entries will have greater but negative row indices, and remote entries
6367: will have positive row indices.
6368: */
6369: for (k = 0; k < n1; k++) {
6370: if (i1[k] < 0 || j1[k] < 0) i1[k] = PETSC_INT_MIN; /* e.g., -2^31, minimal to move them ahead */
6371: else if (i1[k] >= rstart && i1[k] < rend) i1[k] -= PETSC_INT_MAX; /* e.g., minus 2^31-1 to shift local rows to range of [-PETSC_INT_MAX, -1] */
6372: else {
6373: PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "MAT_NO_OFF_PROC_ENTRIES is set but insert to remote rows");
6374: if (mpiaij->donotstash) i1[k] = PETSC_INT_MIN; /* Ignore offproc entries as if they had negative indices */
6375: }
6376: }
6378: /* Sort by row; after that, [0,k) have ignored entries, [k,rem) have local rows and [rem,n1) have remote rows */
6379: PetscCall(PetscSortIntWithIntCountArrayPair(n1, i1, j1, perm1));
6381: /* Advance k to the first entry we need to take care of */
6382: for (k = 0; k < n1; k++)
6383: if (i1[k] > PETSC_INT_MIN) break;
6384: PetscCount i1start = k;
6386: PetscCall(PetscSortedIntUpperBound(i1, k, n1, rend - 1 - PETSC_INT_MAX, &rem)); /* rem is upper bound of the last local row */
6387: for (; k < rem; k++) i1[k] += PETSC_INT_MAX; /* Revert row indices of local rows*/
6389: PetscCheck(n1 == 0 || i1[n1 - 1] < M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "COO row index %" PetscInt_FMT " is >= the matrix row size %" PetscInt_FMT, i1[n1 - 1], M);
6391: /* Send remote rows to their owner */
6392: /* Find which rows should be sent to which remote ranks*/
6393: PetscInt nsend = 0; /* Number of MPI ranks to send data to */
6394: PetscMPIInt *sendto; /* [nsend], storing remote ranks */
6395: PetscInt *nentries; /* [nsend], storing number of entries sent to remote ranks; Assume PetscInt is big enough for this count, and error if not */
6396: const PetscInt *ranges;
6397: PetscInt maxNsend = size >= 128 ? 128 : size; /* Assume max 128 neighbors; realloc when needed */
6399: PetscCall(PetscLayoutGetRanges(mat->rmap, &ranges));
6400: PetscCall(PetscMalloc2(maxNsend, &sendto, maxNsend, &nentries));
6401: for (k = rem; k < n1;) {
6402: PetscMPIInt owner;
6403: PetscInt firstRow, lastRow;
6405: /* Locate a row range */
6406: firstRow = i1[k]; /* first row of this owner */
6407: PetscCall(PetscLayoutFindOwner(mat->rmap, firstRow, &owner));
6408: lastRow = ranges[owner + 1] - 1; /* last row of this owner */
6410: /* Find the first index 'p' in [k,n) with i1[p] belonging to next owner */
6411: PetscCall(PetscSortedIntUpperBound(i1, k, n1, lastRow, &p));
6413: /* All entries in [k,p) belong to this remote owner */
6414: if (nsend >= maxNsend) { /* Double the remote ranks arrays if not long enough */
6415: PetscMPIInt *sendto2;
6416: PetscInt *nentries2;
6417: PetscInt maxNsend2 = (maxNsend <= size / 2) ? maxNsend * 2 : size;
6419: PetscCall(PetscMalloc2(maxNsend2, &sendto2, maxNsend2, &nentries2));
6420: PetscCall(PetscArraycpy(sendto2, sendto, maxNsend));
6421: PetscCall(PetscArraycpy(nentries2, nentries, maxNsend));
6422: PetscCall(PetscFree2(sendto, nentries));
6423: sendto = sendto2;
6424: nentries = nentries2;
6425: maxNsend = maxNsend2;
6426: }
6427: sendto[nsend] = owner;
6428: PetscCall(PetscIntCast(p - k, &nentries[nsend]));
6429: nsend++;
6430: k = p;
6431: }
6433: /* Build 1st SF to know offsets on remote to send data */
6434: PetscSF sf1;
6435: PetscInt nroots = 1, nroots2 = 0;
6436: PetscInt nleaves = nsend, nleaves2 = 0;
6437: PetscInt *offsets;
6438: PetscSFNode *iremote;
6440: PetscCall(PetscSFCreate(comm, &sf1));
6441: PetscCall(PetscMalloc1(nsend, &iremote));
6442: PetscCall(PetscMalloc1(nsend, &offsets));
6443: for (k = 0; k < nsend; k++) {
6444: iremote[k].rank = sendto[k];
6445: iremote[k].index = 0;
6446: nleaves2 += nentries[k];
6447: PetscCheck(nleaves2 >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF leaves is too large for PetscInt");
6448: }
6449: PetscCall(PetscSFSetGraph(sf1, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
6450: PetscCall(PetscSFFetchAndOpWithMemTypeBegin(sf1, MPIU_INT, PETSC_MEMTYPE_HOST, &nroots2 /*rootdata*/, PETSC_MEMTYPE_HOST, nentries /*leafdata*/, PETSC_MEMTYPE_HOST, offsets /*leafupdate*/, MPI_SUM));
6451: PetscCall(PetscSFFetchAndOpEnd(sf1, MPIU_INT, &nroots2, nentries, offsets, MPI_SUM)); /* Would nroots2 overflow, we check offsets[] below */
6452: PetscCall(PetscSFDestroy(&sf1));
6453: PetscAssert(nleaves2 == n1 - rem, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nleaves2 %" PetscInt_FMT " != number of remote entries %" PetscCount_FMT, nleaves2, n1 - rem);
6455: /* Build 2nd SF to send remote COOs to their owner */
6456: PetscSF sf2;
6457: nroots = nroots2;
6458: nleaves = nleaves2;
6459: PetscCall(PetscSFCreate(comm, &sf2));
6460: PetscCall(PetscSFSetFromOptions(sf2));
6461: PetscCall(PetscMalloc1(nleaves, &iremote));
6462: p = 0;
6463: for (k = 0; k < nsend; k++) {
6464: PetscCheck(offsets[k] >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF roots is too large for PetscInt");
6465: for (q = 0; q < nentries[k]; q++, p++) {
6466: iremote[p].rank = sendto[k];
6467: PetscCall(PetscIntCast(offsets[k] + q, &iremote[p].index));
6468: }
6469: }
6470: PetscCall(PetscSFSetGraph(sf2, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
6472: /* Send the remote COOs to their owner */
6473: PetscInt n2 = nroots, *i2, *j2; /* Buffers for received COOs from other ranks, along with a permutation array */
6474: PetscCount *perm2; /* Though PetscInt is enough for remote entries, we use PetscCount here as we want to reuse MatSplitEntries_Internal() */
6475: PetscCall(PetscMalloc3(n2, &i2, n2, &j2, n2, &perm2));
6476: PetscAssert(rem == 0 || i1 != NULL, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cannot add nonzero offset to null");
6477: PetscAssert(rem == 0 || j1 != NULL, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cannot add nonzero offset to null");
6478: PetscInt *i1prem = PetscSafePointerPlusOffset(i1, rem);
6479: PetscInt *j1prem = PetscSafePointerPlusOffset(j1, rem);
6480: PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, i1prem, PETSC_MEMTYPE_HOST, i2, MPI_REPLACE));
6481: PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, i1prem, i2, MPI_REPLACE));
6482: PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, j1prem, PETSC_MEMTYPE_HOST, j2, MPI_REPLACE));
6483: PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, j1prem, j2, MPI_REPLACE));
6485: PetscCall(PetscFree(offsets));
6486: PetscCall(PetscFree2(sendto, nentries));
6488: /* Sort received COOs by row along with the permutation array */
6489: for (k = 0; k < n2; k++) perm2[k] = k;
6490: PetscCall(PetscSortIntWithIntCountArrayPair(n2, i2, j2, perm2));
6492: /* sf2 only sends contiguous leafdata to contiguous rootdata. We record the permutation which will be used to fill leafdata */
6493: PetscCount *Cperm1;
6494: PetscAssert(rem == 0 || perm1 != NULL, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cannot add nonzero offset to null");
6495: PetscCount *perm1prem = PetscSafePointerPlusOffset(perm1, rem);
6496: PetscCall(PetscMalloc1(nleaves, &Cperm1));
6497: PetscCall(PetscArraycpy(Cperm1, perm1prem, nleaves));
6499: /* Support for HYPRE matrices, kind of a hack.
6500: Swap min column with diagonal so that diagonal values will go first */
6501: PetscBool hypre;
6502: PetscCall(PetscStrcmp("_internal_COO_mat_for_hypre", ((PetscObject)mat)->name, &hypre));
6503: if (hypre) {
6504: PetscInt *minj;
6505: PetscBT hasdiag;
6507: PetscCall(PetscBTCreate(m, &hasdiag));
6508: PetscCall(PetscMalloc1(m, &minj));
6509: for (k = 0; k < m; k++) minj[k] = PETSC_INT_MAX;
6510: for (k = i1start; k < rem; k++) {
6511: if (j1[k] < cstart || j1[k] >= cend) continue;
6512: const PetscInt rindex = i1[k] - rstart;
6513: if ((j1[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex));
6514: minj[rindex] = PetscMin(minj[rindex], j1[k]);
6515: }
6516: for (k = 0; k < n2; k++) {
6517: if (j2[k] < cstart || j2[k] >= cend) continue;
6518: const PetscInt rindex = i2[k] - rstart;
6519: if ((j2[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex));
6520: minj[rindex] = PetscMin(minj[rindex], j2[k]);
6521: }
6522: for (k = i1start; k < rem; k++) {
6523: const PetscInt rindex = i1[k] - rstart;
6524: if (j1[k] < cstart || j1[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue;
6525: if (j1[k] == minj[rindex]) j1[k] = i1[k] + (cstart - rstart);
6526: else if ((j1[k] - cstart) == rindex) j1[k] = minj[rindex];
6527: }
6528: for (k = 0; k < n2; k++) {
6529: const PetscInt rindex = i2[k] - rstart;
6530: if (j2[k] < cstart || j2[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue;
6531: if (j2[k] == minj[rindex]) j2[k] = i2[k] + (cstart - rstart);
6532: else if ((j2[k] - cstart) == rindex) j2[k] = minj[rindex];
6533: }
6534: PetscCall(PetscBTDestroy(&hasdiag));
6535: PetscCall(PetscFree(minj));
6536: }
6538: /* Split local COOs and received COOs into diag/offdiag portions */
6539: PetscCount *rowBegin1, *rowMid1, *rowEnd1;
6540: PetscCount *Ajmap1, *Aperm1, *Bjmap1, *Bperm1;
6541: PetscCount Annz1, Bnnz1, Atot1, Btot1;
6542: PetscCount *rowBegin2, *rowMid2, *rowEnd2;
6543: PetscCount *Ajmap2, *Aperm2, *Bjmap2, *Bperm2;
6544: PetscCount Annz2, Bnnz2, Atot2, Btot2;
6546: PetscCall(PetscCalloc3(m, &rowBegin1, m, &rowMid1, m, &rowEnd1));
6547: PetscCall(PetscCalloc3(m, &rowBegin2, m, &rowMid2, m, &rowEnd2));
6548: PetscCall(MatSplitEntries_Internal(mat, rem, i1, j1, perm1, rowBegin1, rowMid1, rowEnd1, &Atot1, &Aperm1, &Annz1, &Ajmap1, &Btot1, &Bperm1, &Bnnz1, &Bjmap1));
6549: PetscCall(MatSplitEntries_Internal(mat, n2, i2, j2, perm2, rowBegin2, rowMid2, rowEnd2, &Atot2, &Aperm2, &Annz2, &Ajmap2, &Btot2, &Bperm2, &Bnnz2, &Bjmap2));
6551: /* Merge local COOs with received COOs: diag with diag, offdiag with offdiag */
6552: PetscInt *Ai, *Bi;
6553: PetscInt *Aj, *Bj;
6555: PetscCall(PetscMalloc1(m + 1, &Ai));
6556: PetscCall(PetscMalloc1(m + 1, &Bi));
6557: PetscCall(PetscMalloc1(Annz1 + Annz2, &Aj)); /* Since local and remote entries might have dups, we might allocate excess memory */
6558: PetscCall(PetscMalloc1(Bnnz1 + Bnnz2, &Bj));
6560: PetscCount *Aimap1, *Bimap1, *Aimap2, *Bimap2;
6561: PetscCall(PetscMalloc1(Annz1, &Aimap1));
6562: PetscCall(PetscMalloc1(Bnnz1, &Bimap1));
6563: PetscCall(PetscMalloc1(Annz2, &Aimap2));
6564: PetscCall(PetscMalloc1(Bnnz2, &Bimap2));
6566: PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowBegin1, rowMid1, rowBegin2, rowMid2, Ajmap1, Ajmap2, Aimap1, Aimap2, Ai, Aj));
6567: PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowMid1, rowEnd1, rowMid2, rowEnd2, Bjmap1, Bjmap2, Bimap1, Bimap2, Bi, Bj));
6569: /* Expand Ajmap1/Bjmap1 to make them based off nonzeros in A/B, since we */
6570: /* expect nonzeros in A/B most likely have local contributing entries */
6571: PetscInt Annz = Ai[m];
6572: PetscInt Bnnz = Bi[m];
6573: PetscCount *Ajmap1_new, *Bjmap1_new;
6575: PetscCall(PetscMalloc1(Annz + 1, &Ajmap1_new));
6576: PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap1_new));
6578: PetscCall(ExpandJmap_Internal(Annz1, Annz, Aimap1, Ajmap1, Ajmap1_new));
6579: PetscCall(ExpandJmap_Internal(Bnnz1, Bnnz, Bimap1, Bjmap1, Bjmap1_new));
6581: PetscCall(PetscFree(Aimap1));
6582: PetscCall(PetscFree(Ajmap1));
6583: PetscCall(PetscFree(Bimap1));
6584: PetscCall(PetscFree(Bjmap1));
6585: PetscCall(PetscFree3(rowBegin1, rowMid1, rowEnd1));
6586: PetscCall(PetscFree3(rowBegin2, rowMid2, rowEnd2));
6587: PetscCall(PetscFree(perm1));
6588: PetscCall(PetscFree3(i2, j2, perm2));
6590: Ajmap1 = Ajmap1_new;
6591: Bjmap1 = Bjmap1_new;
6593: /* Reallocate Aj, Bj once we know actual numbers of unique nonzeros in A and B */
6594: if (Annz < Annz1 + Annz2) {
6595: PetscInt *Aj_new;
6596: PetscCall(PetscMalloc1(Annz, &Aj_new));
6597: PetscCall(PetscArraycpy(Aj_new, Aj, Annz));
6598: PetscCall(PetscFree(Aj));
6599: Aj = Aj_new;
6600: }
6602: if (Bnnz < Bnnz1 + Bnnz2) {
6603: PetscInt *Bj_new;
6604: PetscCall(PetscMalloc1(Bnnz, &Bj_new));
6605: PetscCall(PetscArraycpy(Bj_new, Bj, Bnnz));
6606: PetscCall(PetscFree(Bj));
6607: Bj = Bj_new;
6608: }
6610: /* Create new submatrices for on-process and off-process coupling */
6611: PetscScalar *Aa, *Ba;
6612: MatType rtype;
6613: Mat_SeqAIJ *a, *b;
6614: PetscObjectState state;
6615: PetscCall(PetscCalloc1(Annz, &Aa)); /* Zero matrix on device */
6616: PetscCall(PetscCalloc1(Bnnz, &Ba));
6617: /* make Aj[] local, i.e, based off the start column of the diagonal portion */
6618: if (cstart) {
6619: for (k = 0; k < Annz; k++) Aj[k] -= cstart;
6620: }
6622: PetscCall(MatGetRootType_Private(mat, &rtype));
6624: MatSeqXAIJGetOptions_Private(mpiaij->A);
6625: PetscCall(MatDestroy(&mpiaij->A));
6626: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, Ai, Aj, Aa, &mpiaij->A));
6627: PetscCall(MatSetBlockSizesFromMats(mpiaij->A, mat, mat));
6628: MatSeqXAIJRestoreOptions_Private(mpiaij->A);
6630: MatSeqXAIJGetOptions_Private(mpiaij->B);
6631: PetscCall(MatDestroy(&mpiaij->B));
6632: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, mat->cmap->N, Bi, Bj, Ba, &mpiaij->B));
6633: PetscCall(MatSetBlockSizesFromMats(mpiaij->B, mat, mat));
6634: MatSeqXAIJRestoreOptions_Private(mpiaij->B);
6636: PetscCall(MatSetUpMultiply_MPIAIJ(mat));
6637: mat->was_assembled = PETSC_TRUE; // was_assembled in effect means the Mvctx is built; doing so avoids redundant MatSetUpMultiply_MPIAIJ
6638: state = mpiaij->A->nonzerostate + mpiaij->B->nonzerostate;
6639: PetscCallMPI(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
6641: a = (Mat_SeqAIJ *)mpiaij->A->data;
6642: b = (Mat_SeqAIJ *)mpiaij->B->data;
6643: a->free_a = PETSC_TRUE;
6644: a->free_ij = PETSC_TRUE;
6645: b->free_a = PETSC_TRUE;
6646: b->free_ij = PETSC_TRUE;
6647: a->maxnz = a->nz;
6648: b->maxnz = b->nz;
6650: /* conversion must happen AFTER multiply setup */
6651: PetscCall(MatConvert(mpiaij->A, rtype, MAT_INPLACE_MATRIX, &mpiaij->A));
6652: PetscCall(MatConvert(mpiaij->B, rtype, MAT_INPLACE_MATRIX, &mpiaij->B));
6653: PetscCall(VecDestroy(&mpiaij->lvec));
6654: PetscCall(MatCreateVecs(mpiaij->B, &mpiaij->lvec, NULL));
6656: // Put the COO struct in a container and then attach that to the matrix
6657: PetscCall(PetscMalloc1(1, &coo));
6658: coo->n = coo_n;
6659: coo->sf = sf2;
6660: coo->sendlen = nleaves;
6661: coo->recvlen = nroots;
6662: coo->Annz = Annz;
6663: coo->Bnnz = Bnnz;
6664: coo->Annz2 = Annz2;
6665: coo->Bnnz2 = Bnnz2;
6666: coo->Atot1 = Atot1;
6667: coo->Atot2 = Atot2;
6668: coo->Btot1 = Btot1;
6669: coo->Btot2 = Btot2;
6670: coo->Ajmap1 = Ajmap1;
6671: coo->Aperm1 = Aperm1;
6672: coo->Bjmap1 = Bjmap1;
6673: coo->Bperm1 = Bperm1;
6674: coo->Aimap2 = Aimap2;
6675: coo->Ajmap2 = Ajmap2;
6676: coo->Aperm2 = Aperm2;
6677: coo->Bimap2 = Bimap2;
6678: coo->Bjmap2 = Bjmap2;
6679: coo->Bperm2 = Bperm2;
6680: coo->Cperm1 = Cperm1;
6681: // Allocate in preallocation. If not used, it has zero cost on host
6682: PetscCall(PetscMalloc2(coo->sendlen, &coo->sendbuf, coo->recvlen, &coo->recvbuf));
6683: PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
6684: PetscCall(PetscContainerSetPointer(container, coo));
6685: PetscCall(PetscContainerSetCtxDestroy(container, MatCOOStructDestroy_MPIAIJ));
6686: PetscCall(PetscObjectCompose((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject)container));
6687: PetscCall(PetscContainerDestroy(&container));
6688: PetscFunctionReturn(PETSC_SUCCESS);
6689: }
6691: static PetscErrorCode MatSetValuesCOO_MPIAIJ(Mat mat, const PetscScalar v[], InsertMode imode)
6692: {
6693: Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)mat->data;
6694: Mat A = mpiaij->A, B = mpiaij->B;
6695: PetscScalar *Aa, *Ba;
6696: PetscScalar *sendbuf, *recvbuf;
6697: const PetscCount *Ajmap1, *Ajmap2, *Aimap2;
6698: const PetscCount *Bjmap1, *Bjmap2, *Bimap2;
6699: const PetscCount *Aperm1, *Aperm2, *Bperm1, *Bperm2;
6700: const PetscCount *Cperm1;
6701: PetscContainer container;
6702: MatCOOStruct_MPIAIJ *coo;
6704: PetscFunctionBegin;
6705: PetscCall(PetscObjectQuery((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject *)&container));
6706: PetscCheck(container, PetscObjectComm((PetscObject)mat), PETSC_ERR_PLIB, "Not found MatCOOStruct on this matrix");
6707: PetscCall(PetscContainerGetPointer(container, &coo));
6708: sendbuf = coo->sendbuf;
6709: recvbuf = coo->recvbuf;
6710: Ajmap1 = coo->Ajmap1;
6711: Ajmap2 = coo->Ajmap2;
6712: Aimap2 = coo->Aimap2;
6713: Bjmap1 = coo->Bjmap1;
6714: Bjmap2 = coo->Bjmap2;
6715: Bimap2 = coo->Bimap2;
6716: Aperm1 = coo->Aperm1;
6717: Aperm2 = coo->Aperm2;
6718: Bperm1 = coo->Bperm1;
6719: Bperm2 = coo->Bperm2;
6720: Cperm1 = coo->Cperm1;
6722: PetscCall(MatSeqAIJGetArray(A, &Aa)); /* Might read and write matrix values */
6723: PetscCall(MatSeqAIJGetArray(B, &Ba));
6725: /* Pack entries to be sent to remote */
6726: for (PetscCount i = 0; i < coo->sendlen; i++) sendbuf[i] = v[Cperm1[i]];
6728: /* Send remote entries to their owner and overlap the communication with local computation */
6729: PetscCall(PetscSFReduceWithMemTypeBegin(coo->sf, MPIU_SCALAR, PETSC_MEMTYPE_HOST, sendbuf, PETSC_MEMTYPE_HOST, recvbuf, MPI_REPLACE));
6730: /* Add local entries to A and B */
6731: for (PetscCount i = 0; i < coo->Annz; i++) { /* All nonzeros in A are either zero'ed or added with a value (i.e., initialized) */
6732: PetscScalar sum = 0.0; /* Do partial summation first to improve numerical stability */
6733: for (PetscCount k = Ajmap1[i]; k < Ajmap1[i + 1]; k++) sum += v[Aperm1[k]];
6734: Aa[i] = (imode == INSERT_VALUES ? 0.0 : Aa[i]) + sum;
6735: }
6736: for (PetscCount i = 0; i < coo->Bnnz; i++) {
6737: PetscScalar sum = 0.0;
6738: for (PetscCount k = Bjmap1[i]; k < Bjmap1[i + 1]; k++) sum += v[Bperm1[k]];
6739: Ba[i] = (imode == INSERT_VALUES ? 0.0 : Ba[i]) + sum;
6740: }
6741: PetscCall(PetscSFReduceEnd(coo->sf, MPIU_SCALAR, sendbuf, recvbuf, MPI_REPLACE));
6743: /* Add received remote entries to A and B */
6744: for (PetscCount i = 0; i < coo->Annz2; i++) {
6745: for (PetscCount k = Ajmap2[i]; k < Ajmap2[i + 1]; k++) Aa[Aimap2[i]] += recvbuf[Aperm2[k]];
6746: }
6747: for (PetscCount i = 0; i < coo->Bnnz2; i++) {
6748: for (PetscCount k = Bjmap2[i]; k < Bjmap2[i + 1]; k++) Ba[Bimap2[i]] += recvbuf[Bperm2[k]];
6749: }
6750: PetscCall(MatSeqAIJRestoreArray(A, &Aa));
6751: PetscCall(MatSeqAIJRestoreArray(B, &Ba));
6752: PetscFunctionReturn(PETSC_SUCCESS);
6753: }
6755: /*MC
6756: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
6758: Options Database Keys:
6759: . -mat_type mpiaij - sets the matrix type to `MATMPIAIJ` during a call to `MatSetFromOptions()`
6761: Level: beginner
6763: Notes:
6764: `MatSetValues()` may be called for this matrix type with a `NULL` argument for the numerical values,
6765: in this case the values associated with the rows and columns one passes in are set to zero
6766: in the matrix
6768: `MatSetOptions`(,`MAT_STRUCTURE_ONLY`,`PETSC_TRUE`) may be called for this matrix type. In this no
6769: space is allocated for the nonzero entries and any entries passed with `MatSetValues()` are ignored
6771: .seealso: [](ch_matrices), `Mat`, `MATSEQAIJ`, `MATAIJ`, `MatCreateAIJ()`
6772: M*/
6773: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6774: {
6775: Mat_MPIAIJ *b;
6776: PetscMPIInt size;
6778: PetscFunctionBegin;
6779: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
6781: PetscCall(PetscNew(&b));
6782: B->data = (void *)b;
6783: B->ops[0] = MatOps_Values;
6784: B->assembled = PETSC_FALSE;
6785: B->insertmode = NOT_SET_VALUES;
6786: b->size = size;
6788: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
6790: /* build cache for off array entries formed */
6791: PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));
6793: b->donotstash = PETSC_FALSE;
6794: b->colmap = NULL;
6795: b->garray = NULL;
6796: b->roworiented = PETSC_TRUE;
6798: /* stuff used for matrix vector multiply */
6799: b->lvec = NULL;
6800: b->Mvctx = NULL;
6802: /* stuff for MatGetRow() */
6803: b->rowindices = NULL;
6804: b->rowvalues = NULL;
6805: b->getrowactive = PETSC_FALSE;
6807: /* flexible pointer used in CUSPARSE classes */
6808: b->spptr = NULL;
6810: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetUseScalableIncreaseOverlap_C", MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ));
6811: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPIAIJ));
6812: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPIAIJ));
6813: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPIAIJ));
6814: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJ));
6815: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatResetPreallocation_C", MatResetPreallocation_MPIAIJ));
6816: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatResetHash_C", MatResetHash_MPIAIJ));
6817: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocationCSR_C", MatMPIAIJSetPreallocationCSR_MPIAIJ));
6818: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPIAIJ));
6819: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijperm_C", MatConvert_MPIAIJ_MPIAIJPERM));
6820: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijsell_C", MatConvert_MPIAIJ_MPIAIJSELL));
6821: #if defined(PETSC_HAVE_CUDA)
6822: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcusparse_C", MatConvert_MPIAIJ_MPIAIJCUSPARSE));
6823: #endif
6824: #if defined(PETSC_HAVE_HIP)
6825: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijhipsparse_C", MatConvert_MPIAIJ_MPIAIJHIPSPARSE));
6826: #endif
6827: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6828: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijkokkos_C", MatConvert_MPIAIJ_MPIAIJKokkos));
6829: #endif
6830: #if defined(PETSC_HAVE_MKL_SPARSE)
6831: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijmkl_C", MatConvert_MPIAIJ_MPIAIJMKL));
6832: #endif
6833: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcrl_C", MatConvert_MPIAIJ_MPIAIJCRL));
6834: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpibaij_C", MatConvert_MPIAIJ_MPIBAIJ));
6835: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisbaij_C", MatConvert_MPIAIJ_MPISBAIJ));
6836: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpidense_C", MatConvert_MPIAIJ_MPIDense));
6837: #if defined(PETSC_HAVE_ELEMENTAL)
6838: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_elemental_C", MatConvert_MPIAIJ_Elemental));
6839: #endif
6840: #if defined(PETSC_HAVE_SCALAPACK) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
6841: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_scalapack_C", MatConvert_AIJ_ScaLAPACK));
6842: #endif
6843: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_is_C", MatConvert_XAIJ_IS));
6844: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisell_C", MatConvert_MPIAIJ_MPISELL));
6845: #if defined(PETSC_HAVE_HYPRE)
6846: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_hypre_C", MatConvert_AIJ_HYPRE));
6847: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", MatProductSetFromOptions_Transpose_AIJ_AIJ));
6848: #endif
6849: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_is_mpiaij_C", MatProductSetFromOptions_IS_XAIJ));
6850: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_mpiaij_mpiaij_C", MatProductSetFromOptions_MPIAIJ));
6851: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_MPIAIJ));
6852: PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetValuesCOO_C", MatSetValuesCOO_MPIAIJ));
6853: PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJ));
6854: PetscFunctionReturn(PETSC_SUCCESS);
6855: }
6857: /*@
6858: MatCreateMPIAIJWithSplitArrays - creates a `MATMPIAIJ` matrix using arrays that contain the "diagonal"
6859: and "off-diagonal" part of the matrix in CSR format.
6861: Collective
6863: Input Parameters:
6864: + comm - MPI communicator
6865: . m - number of local rows (Cannot be `PETSC_DECIDE`)
6866: . n - This value should be the same as the local size used in creating the
6867: x vector for the matrix-vector product $y = Ax$. (or `PETSC_DECIDE` to have
6868: calculated if `N` is given) For square matrices `n` is almost always `m`.
6869: . M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
6870: . N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
6871: . i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6872: . j - column indices, which must be local, i.e., based off the start column of the diagonal portion
6873: . a - matrix values
6874: . oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6875: . oj - column indices, which must be global, representing global columns in the `MATMPIAIJ` matrix
6876: - oa - matrix values
6878: Output Parameter:
6879: . mat - the matrix
6881: Level: advanced
6883: Notes:
6884: The `i`, `j`, and `a` arrays ARE NOT copied by this routine into the internal format used by PETSc (even in Fortran). The user
6885: must free the arrays once the matrix has been destroyed and not before.
6887: The `i` and `j` indices are 0 based
6889: See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix
6891: This sets local rows and cannot be used to set off-processor values.
6893: Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6894: legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6895: not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6896: the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6897: keep track of the underlying array. Use `MatSetOption`(A,`MAT_NO_OFF_PROC_ENTRIES`,`PETSC_TRUE`) to disable all
6898: communication if it is known that only local entries will be set.
6900: .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
6901: `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithArrays()`
6902: @*/
6903: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt i[], PetscInt j[], PetscScalar a[], PetscInt oi[], PetscInt oj[], PetscScalar oa[], Mat *mat)
6904: {
6905: Mat_MPIAIJ *maij;
6907: PetscFunctionBegin;
6908: PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
6909: PetscCheck(i[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
6910: PetscCheck(oi[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "oi (row indices) must start with 0");
6911: PetscCall(MatCreate(comm, mat));
6912: PetscCall(MatSetSizes(*mat, m, n, M, N));
6913: PetscCall(MatSetType(*mat, MATMPIAIJ));
6914: maij = (Mat_MPIAIJ *)(*mat)->data;
6916: (*mat)->preallocated = PETSC_TRUE;
6918: PetscCall(PetscLayoutSetUp((*mat)->rmap));
6919: PetscCall(PetscLayoutSetUp((*mat)->cmap));
6921: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, i, j, a, &maij->A));
6922: PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, (*mat)->cmap->N, oi, oj, oa, &maij->B));
6924: PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
6925: PetscCall(MatAssemblyBegin(*mat, MAT_FINAL_ASSEMBLY));
6926: PetscCall(MatAssemblyEnd(*mat, MAT_FINAL_ASSEMBLY));
6927: PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE));
6928: PetscCall(MatSetOption(*mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
6929: PetscFunctionReturn(PETSC_SUCCESS);
6930: }
6932: typedef struct {
6933: Mat *mp; /* intermediate products */
6934: PetscBool *mptmp; /* is the intermediate product temporary ? */
6935: PetscInt cp; /* number of intermediate products */
6937: /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */
6938: PetscInt *startsj_s, *startsj_r;
6939: PetscScalar *bufa;
6940: Mat P_oth;
6942: /* may take advantage of merging product->B */
6943: Mat Bloc; /* B-local by merging diag and off-diag */
6945: /* cusparse does not have support to split between symbolic and numeric phases.
6946: When api_user is true, we don't need to update the numerical values
6947: of the temporary storage */
6948: PetscBool reusesym;
6950: /* support for COO values insertion */
6951: PetscScalar *coo_v, *coo_w; /* store on-process and off-process COO scalars, and used as MPI recv/send buffers respectively */
6952: PetscInt **own; /* own[i] points to address of on-process COO indices for Mat mp[i] */
6953: PetscInt **off; /* off[i] points to address of off-process COO indices for Mat mp[i] */
6954: PetscBool hasoffproc; /* if true, have off-process values insertion (i.e. AtB or PtAP) */
6955: PetscSF sf; /* used for non-local values insertion and memory malloc */
6956: PetscMemType mtype;
6958: /* customization */
6959: PetscBool abmerge;
6960: PetscBool P_oth_bind;
6961: } MatMatMPIAIJBACKEND;
6963: static PetscErrorCode MatProductCtxDestroy_MatMatMPIAIJBACKEND(PetscCtxRt data)
6964: {
6965: MatMatMPIAIJBACKEND *mmdata = *(MatMatMPIAIJBACKEND **)data;
6966: PetscInt i;
6968: PetscFunctionBegin;
6969: PetscCall(PetscFree2(mmdata->startsj_s, mmdata->startsj_r));
6970: PetscCall(PetscFree(mmdata->bufa));
6971: PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_v));
6972: PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_w));
6973: PetscCall(MatDestroy(&mmdata->P_oth));
6974: PetscCall(MatDestroy(&mmdata->Bloc));
6975: PetscCall(PetscSFDestroy(&mmdata->sf));
6976: for (i = 0; i < mmdata->cp; i++) PetscCall(MatDestroy(&mmdata->mp[i]));
6977: PetscCall(PetscFree2(mmdata->mp, mmdata->mptmp));
6978: PetscCall(PetscFree(mmdata->own[0]));
6979: PetscCall(PetscFree(mmdata->own));
6980: PetscCall(PetscFree(mmdata->off[0]));
6981: PetscCall(PetscFree(mmdata->off));
6982: PetscCall(PetscFree(mmdata));
6983: PetscFunctionReturn(PETSC_SUCCESS);
6984: }
6986: /* Copy selected n entries with indices in idx[] of A to v[].
6987: If idx is NULL, copy the whole data array of A to v[]
6988: */
6989: static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
6990: {
6991: PetscErrorCode (*f)(Mat, PetscInt, const PetscInt[], PetscScalar[]);
6993: PetscFunctionBegin;
6994: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", &f));
6995: if (f) PetscCall((*f)(A, n, idx, v));
6996: else {
6997: const PetscScalar *vv;
6999: PetscCall(MatSeqAIJGetArrayRead(A, &vv));
7000: if (n && idx) {
7001: PetscScalar *w = v;
7002: const PetscInt *oi = idx;
7004: for (PetscInt j = 0; j < n; j++) *w++ = vv[*oi++];
7005: } else {
7006: PetscCall(PetscArraycpy(v, vv, n));
7007: }
7008: PetscCall(MatSeqAIJRestoreArrayRead(A, &vv));
7009: }
7010: PetscFunctionReturn(PETSC_SUCCESS);
7011: }
7013: static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C)
7014: {
7015: MatMatMPIAIJBACKEND *mmdata;
7016: PetscInt i, n_d, n_o;
7018: PetscFunctionBegin;
7019: MatCheckProduct(C, 1);
7020: PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
7021: mmdata = (MatMatMPIAIJBACKEND *)C->product->data;
7022: if (!mmdata->reusesym) { /* update temporary matrices */
7023: if (mmdata->P_oth) PetscCall(MatGetBrowsOfAoCols_MPIAIJ(C->product->A, C->product->B, MAT_REUSE_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7024: if (mmdata->Bloc) PetscCall(MatMPIAIJGetLocalMatMerge(C->product->B, MAT_REUSE_MATRIX, NULL, &mmdata->Bloc));
7025: }
7026: mmdata->reusesym = PETSC_FALSE;
7028: for (i = 0; i < mmdata->cp; i++) {
7029: PetscCheck(mmdata->mp[i]->ops->productnumeric, PetscObjectComm((PetscObject)mmdata->mp[i]), PETSC_ERR_PLIB, "Missing numeric op for %s", MatProductTypes[mmdata->mp[i]->product->type]);
7030: PetscCall((*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i]));
7031: }
7032: for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) {
7033: PetscInt noff;
7035: PetscCall(PetscIntCast(mmdata->off[i + 1] - mmdata->off[i], &noff));
7036: if (mmdata->mptmp[i]) continue;
7037: if (noff) {
7038: PetscInt nown;
7040: PetscCall(PetscIntCast(mmdata->own[i + 1] - mmdata->own[i], &nown));
7041: PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], noff, mmdata->off[i], mmdata->coo_w + n_o));
7042: PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], nown, mmdata->own[i], mmdata->coo_v + n_d));
7043: n_o += noff;
7044: n_d += nown;
7045: } else {
7046: Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mmdata->mp[i]->data;
7048: PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], mm->nz, NULL, mmdata->coo_v + n_d));
7049: n_d += mm->nz;
7050: }
7051: }
7052: if (mmdata->hasoffproc) { /* offprocess insertion */
7053: PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d));
7054: PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d));
7055: }
7056: PetscCall(MatSetValuesCOO(C, mmdata->coo_v, INSERT_VALUES));
7057: PetscFunctionReturn(PETSC_SUCCESS);
7058: }
7060: /* Support for Pt * A, A * P, or Pt * A * P */
7061: #define MAX_NUMBER_INTERMEDIATE 4
7062: PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C)
7063: {
7064: Mat_Product *product = C->product;
7065: Mat A, P, mp[MAX_NUMBER_INTERMEDIATE]; /* A, P and a series of intermediate matrices */
7066: Mat_MPIAIJ *a, *p;
7067: MatMatMPIAIJBACKEND *mmdata;
7068: ISLocalToGlobalMapping P_oth_l2g = NULL;
7069: IS glob = NULL;
7070: const char *prefix;
7071: char pprefix[256];
7072: const PetscInt *globidx, *P_oth_idx;
7073: PetscInt i, j, cp, m, n, M, N, *coo_i, *coo_j;
7074: PetscCount ncoo, ncoo_d, ncoo_o, ncoo_oown;
7075: PetscInt cmapt[MAX_NUMBER_INTERMEDIATE], rmapt[MAX_NUMBER_INTERMEDIATE]; /* col/row map type for each Mat in mp[]. */
7076: /* type-0: consecutive, start from 0; type-1: consecutive with */
7077: /* a base offset; type-2: sparse with a local to global map table */
7078: const PetscInt *cmapa[MAX_NUMBER_INTERMEDIATE], *rmapa[MAX_NUMBER_INTERMEDIATE]; /* col/row local to global map array (table) for type-2 map type */
7080: MatProductType ptype;
7081: PetscBool mptmp[MAX_NUMBER_INTERMEDIATE], hasoffproc = PETSC_FALSE, iscuda, iship, iskokk;
7082: PetscMPIInt size;
7084: PetscFunctionBegin;
7085: MatCheckProduct(C, 1);
7086: PetscCheck(!product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
7087: ptype = product->type;
7088: if (product->A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
7089: ptype = MATPRODUCT_AB;
7090: product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
7091: }
7092: switch (ptype) {
7093: case MATPRODUCT_AB:
7094: A = product->A;
7095: P = product->B;
7096: m = A->rmap->n;
7097: n = P->cmap->n;
7098: M = A->rmap->N;
7099: N = P->cmap->N;
7100: hasoffproc = PETSC_FALSE; /* will not scatter mat product values to other processes */
7101: break;
7102: case MATPRODUCT_AtB:
7103: P = product->A;
7104: A = product->B;
7105: m = P->cmap->n;
7106: n = A->cmap->n;
7107: M = P->cmap->N;
7108: N = A->cmap->N;
7109: hasoffproc = PETSC_TRUE;
7110: break;
7111: case MATPRODUCT_PtAP:
7112: A = product->A;
7113: P = product->B;
7114: m = P->cmap->n;
7115: n = P->cmap->n;
7116: M = P->cmap->N;
7117: N = P->cmap->N;
7118: hasoffproc = PETSC_TRUE;
7119: break;
7120: default:
7121: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]);
7122: }
7123: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)C), &size));
7124: if (size == 1) hasoffproc = PETSC_FALSE;
7126: /* defaults */
7127: for (i = 0; i < MAX_NUMBER_INTERMEDIATE; i++) {
7128: mp[i] = NULL;
7129: mptmp[i] = PETSC_FALSE;
7130: rmapt[i] = -1;
7131: cmapt[i] = -1;
7132: rmapa[i] = NULL;
7133: cmapa[i] = NULL;
7134: }
7136: /* customization */
7137: PetscCall(PetscNew(&mmdata));
7138: mmdata->reusesym = product->api_user;
7139: if (ptype == MATPRODUCT_AB) {
7140: if (product->api_user) {
7141: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
7142: PetscCall(PetscOptionsBool("-matmatmult_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL));
7143: PetscCall(PetscOptionsBool("-matmatmult_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7144: PetscOptionsEnd();
7145: } else {
7146: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
7147: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL));
7148: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7149: PetscOptionsEnd();
7150: }
7151: } else if (ptype == MATPRODUCT_PtAP) {
7152: if (product->api_user) {
7153: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
7154: PetscCall(PetscOptionsBool("-matptap_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7155: PetscOptionsEnd();
7156: } else {
7157: PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
7158: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7159: PetscOptionsEnd();
7160: }
7161: }
7162: a = (Mat_MPIAIJ *)A->data;
7163: p = (Mat_MPIAIJ *)P->data;
7164: PetscCall(MatSetSizes(C, m, n, M, N));
7165: PetscCall(PetscLayoutSetUp(C->rmap));
7166: PetscCall(PetscLayoutSetUp(C->cmap));
7167: PetscCall(MatSetType(C, ((PetscObject)A)->type_name));
7168: PetscCall(MatGetOptionsPrefix(C, &prefix));
7170: cp = 0;
7171: switch (ptype) {
7172: case MATPRODUCT_AB: /* A * P */
7173: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7175: /* A_diag * P_local (merged or not) */
7176: if (mmdata->abmerge) { /* P's diagonal and off-diag blocks are merged to one matrix, then multiplied by A_diag */
7177: /* P is product->B */
7178: PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7179: PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp]));
7180: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7181: PetscCall(MatProductSetFill(mp[cp], product->fill));
7182: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7183: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7184: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7185: mp[cp]->product->api_user = product->api_user;
7186: PetscCall(MatProductSetFromOptions(mp[cp]));
7187: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7188: PetscCall(ISGetIndices(glob, &globidx));
7189: rmapt[cp] = 1;
7190: cmapt[cp] = 2;
7191: cmapa[cp] = globidx;
7192: mptmp[cp] = PETSC_FALSE;
7193: cp++;
7194: } else { /* A_diag * P_diag and A_diag * P_off */
7195: PetscCall(MatProductCreate(a->A, p->A, NULL, &mp[cp]));
7196: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7197: PetscCall(MatProductSetFill(mp[cp], product->fill));
7198: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7199: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7200: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7201: mp[cp]->product->api_user = product->api_user;
7202: PetscCall(MatProductSetFromOptions(mp[cp]));
7203: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7204: rmapt[cp] = 1;
7205: cmapt[cp] = 1;
7206: mptmp[cp] = PETSC_FALSE;
7207: cp++;
7208: PetscCall(MatProductCreate(a->A, p->B, NULL, &mp[cp]));
7209: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7210: PetscCall(MatProductSetFill(mp[cp], product->fill));
7211: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7212: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7213: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7214: mp[cp]->product->api_user = product->api_user;
7215: PetscCall(MatProductSetFromOptions(mp[cp]));
7216: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7217: rmapt[cp] = 1;
7218: cmapt[cp] = 2;
7219: cmapa[cp] = p->garray;
7220: mptmp[cp] = PETSC_FALSE;
7221: cp++;
7222: }
7224: /* A_off * P_other */
7225: if (mmdata->P_oth) {
7226: PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g)); /* make P_oth use local col ids */
7227: PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx));
7228: PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)a->B)->type_name));
7229: PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind));
7230: PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp]));
7231: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7232: PetscCall(MatProductSetFill(mp[cp], product->fill));
7233: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7234: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7235: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7236: mp[cp]->product->api_user = product->api_user;
7237: PetscCall(MatProductSetFromOptions(mp[cp]));
7238: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7239: rmapt[cp] = 1;
7240: cmapt[cp] = 2;
7241: cmapa[cp] = P_oth_idx;
7242: mptmp[cp] = PETSC_FALSE;
7243: cp++;
7244: }
7245: break;
7247: case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */
7248: /* A is product->B */
7249: PetscCall(MatMPIAIJGetLocalMatMerge(A, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7250: if (A == P) { /* when A==P, we can take advantage of the already merged mmdata->Bloc */
7251: PetscCall(MatProductCreate(mmdata->Bloc, mmdata->Bloc, NULL, &mp[cp]));
7252: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7253: PetscCall(MatProductSetFill(mp[cp], product->fill));
7254: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7255: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7256: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7257: mp[cp]->product->api_user = product->api_user;
7258: PetscCall(MatProductSetFromOptions(mp[cp]));
7259: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7260: PetscCall(ISGetIndices(glob, &globidx));
7261: rmapt[cp] = 2;
7262: rmapa[cp] = globidx;
7263: cmapt[cp] = 2;
7264: cmapa[cp] = globidx;
7265: mptmp[cp] = PETSC_FALSE;
7266: cp++;
7267: } else {
7268: PetscCall(MatProductCreate(p->A, mmdata->Bloc, NULL, &mp[cp]));
7269: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7270: PetscCall(MatProductSetFill(mp[cp], product->fill));
7271: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7272: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7273: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7274: mp[cp]->product->api_user = product->api_user;
7275: PetscCall(MatProductSetFromOptions(mp[cp]));
7276: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7277: PetscCall(ISGetIndices(glob, &globidx));
7278: rmapt[cp] = 1;
7279: cmapt[cp] = 2;
7280: cmapa[cp] = globidx;
7281: mptmp[cp] = PETSC_FALSE;
7282: cp++;
7283: PetscCall(MatProductCreate(p->B, mmdata->Bloc, NULL, &mp[cp]));
7284: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7285: PetscCall(MatProductSetFill(mp[cp], product->fill));
7286: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7287: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7288: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7289: mp[cp]->product->api_user = product->api_user;
7290: PetscCall(MatProductSetFromOptions(mp[cp]));
7291: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7292: rmapt[cp] = 2;
7293: rmapa[cp] = p->garray;
7294: cmapt[cp] = 2;
7295: cmapa[cp] = globidx;
7296: mptmp[cp] = PETSC_FALSE;
7297: cp++;
7298: }
7299: break;
7300: case MATPRODUCT_PtAP:
7301: PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7302: /* P is product->B */
7303: PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7304: PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp]));
7305: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_PtAP));
7306: PetscCall(MatProductSetFill(mp[cp], product->fill));
7307: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7308: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7309: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7310: mp[cp]->product->api_user = product->api_user;
7311: PetscCall(MatProductSetFromOptions(mp[cp]));
7312: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7313: PetscCall(ISGetIndices(glob, &globidx));
7314: rmapt[cp] = 2;
7315: rmapa[cp] = globidx;
7316: cmapt[cp] = 2;
7317: cmapa[cp] = globidx;
7318: mptmp[cp] = PETSC_FALSE;
7319: cp++;
7320: if (mmdata->P_oth) {
7321: PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g));
7322: PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx));
7323: PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)a->B)->type_name));
7324: PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind));
7325: PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp]));
7326: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7327: PetscCall(MatProductSetFill(mp[cp], product->fill));
7328: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7329: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7330: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7331: mp[cp]->product->api_user = product->api_user;
7332: PetscCall(MatProductSetFromOptions(mp[cp]));
7333: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7334: mptmp[cp] = PETSC_TRUE;
7335: cp++;
7336: PetscCall(MatProductCreate(mmdata->Bloc, mp[1], NULL, &mp[cp]));
7337: PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7338: PetscCall(MatProductSetFill(mp[cp], product->fill));
7339: PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7340: PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7341: PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7342: mp[cp]->product->api_user = product->api_user;
7343: PetscCall(MatProductSetFromOptions(mp[cp]));
7344: PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7345: rmapt[cp] = 2;
7346: rmapa[cp] = globidx;
7347: cmapt[cp] = 2;
7348: cmapa[cp] = P_oth_idx;
7349: mptmp[cp] = PETSC_FALSE;
7350: cp++;
7351: }
7352: break;
7353: default:
7354: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]);
7355: }
7356: /* sanity check */
7357: if (size > 1)
7358: for (i = 0; i < cp; i++) PetscCheck(rmapt[i] != 2 || hasoffproc, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unexpected offproc map type for product %" PetscInt_FMT, i);
7360: PetscCall(PetscMalloc2(cp, &mmdata->mp, cp, &mmdata->mptmp));
7361: for (i = 0; i < cp; i++) {
7362: mmdata->mp[i] = mp[i];
7363: mmdata->mptmp[i] = mptmp[i];
7364: }
7365: mmdata->cp = cp;
7366: C->product->data = mmdata;
7367: C->product->destroy = MatProductCtxDestroy_MatMatMPIAIJBACKEND;
7368: C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND;
7370: /* memory type */
7371: mmdata->mtype = PETSC_MEMTYPE_HOST;
7372: PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iscuda, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, ""));
7373: PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iship, MATSEQAIJHIPSPARSE, MATMPIAIJHIPSPARSE, ""));
7374: PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iskokk, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, ""));
7375: if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA;
7376: else if (iship) mmdata->mtype = PETSC_MEMTYPE_HIP;
7377: else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_KOKKOS;
7379: /* prepare coo coordinates for values insertion */
7381: /* count total nonzeros of those intermediate seqaij Mats
7382: ncoo_d: # of nonzeros of matrices that do not have offproc entries
7383: ncoo_o: # of nonzeros (of matrices that might have offproc entries) that will be inserted to remote procs
7384: ncoo_oown: # of nonzeros (of matrices that might have offproc entries) that will be inserted locally
7385: */
7386: for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) {
7387: Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data;
7388: if (mptmp[cp]) continue;
7389: if (rmapt[cp] == 2 && hasoffproc) { /* the rows need to be scatter to all processes (might include self) */
7390: const PetscInt *rmap = rmapa[cp];
7391: const PetscInt mr = mp[cp]->rmap->n;
7392: const PetscInt rs = C->rmap->rstart;
7393: const PetscInt re = C->rmap->rend;
7394: const PetscInt *ii = mm->i;
7395: for (i = 0; i < mr; i++) {
7396: const PetscInt gr = rmap[i];
7397: const PetscInt nz = ii[i + 1] - ii[i];
7398: if (gr < rs || gr >= re) ncoo_o += nz; /* this row is offproc */
7399: else ncoo_oown += nz; /* this row is local */
7400: }
7401: } else ncoo_d += mm->nz;
7402: }
7404: /*
7405: ncoo: total number of nonzeros (including those inserted by remote procs) belonging to this proc
7407: ncoo = ncoo_d + ncoo_oown + ncoo2, which ncoo2 is number of nonzeros inserted to me by other procs.
7409: off[0] points to a big index array, which is shared by off[1,2,...]. Similarly, for own[0].
7411: off[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert to others
7412: own[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert locally
7413: so, off[p+1]-off[p] is the number of nonzeros that mp[p] will send to others.
7415: coo_i/j/v[]: [ncoo] row/col/val of nonzeros belonging to this proc.
7416: Ex. coo_i[]: the beginning part (of size ncoo_d + ncoo_oown) stores i of local nonzeros, and the remaining part stores i of nonzeros I will receive.
7417: */
7418: PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->off)); /* +1 to make a csr-like data structure */
7419: PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->own));
7421: /* gather (i,j) of nonzeros inserted by remote procs */
7422: if (hasoffproc) {
7423: PetscSF msf;
7424: PetscInt ncoo2, *coo_i2, *coo_j2;
7426: PetscCall(PetscMalloc1(ncoo_o, &mmdata->off[0]));
7427: PetscCall(PetscMalloc1(ncoo_oown, &mmdata->own[0]));
7428: PetscCall(PetscMalloc2(ncoo_o, &coo_i, ncoo_o, &coo_j)); /* to collect (i,j) of entries to be sent to others */
7430: for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) {
7431: Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data;
7432: PetscInt *idxoff = mmdata->off[cp];
7433: PetscInt *idxown = mmdata->own[cp];
7434: if (!mptmp[cp] && rmapt[cp] == 2) { /* row map is sparse */
7435: const PetscInt *rmap = rmapa[cp];
7436: const PetscInt *cmap = cmapa[cp];
7437: const PetscInt *ii = mm->i;
7438: PetscInt *coi = coo_i + ncoo_o;
7439: PetscInt *coj = coo_j + ncoo_o;
7440: const PetscInt mr = mp[cp]->rmap->n;
7441: const PetscInt rs = C->rmap->rstart;
7442: const PetscInt re = C->rmap->rend;
7443: const PetscInt cs = C->cmap->rstart;
7444: for (i = 0; i < mr; i++) {
7445: const PetscInt *jj = mm->j + ii[i];
7446: const PetscInt gr = rmap[i];
7447: const PetscInt nz = ii[i + 1] - ii[i];
7448: if (gr < rs || gr >= re) { /* this is an offproc row */
7449: for (j = ii[i]; j < ii[i + 1]; j++) {
7450: *coi++ = gr;
7451: *idxoff++ = j;
7452: }
7453: if (!cmapt[cp]) { /* already global */
7454: for (j = 0; j < nz; j++) *coj++ = jj[j];
7455: } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7456: for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7457: } else { /* offdiag */
7458: for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7459: }
7460: ncoo_o += nz;
7461: } else { /* this is a local row */
7462: for (j = ii[i]; j < ii[i + 1]; j++) *idxown++ = j;
7463: }
7464: }
7465: }
7466: mmdata->off[cp + 1] = idxoff;
7467: mmdata->own[cp + 1] = idxown;
7468: }
7470: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf));
7471: PetscInt incoo_o;
7472: PetscCall(PetscIntCast(ncoo_o, &incoo_o));
7473: PetscCall(PetscSFSetGraphLayout(mmdata->sf, C->rmap, incoo_o /*nleaves*/, NULL /*ilocal*/, PETSC_OWN_POINTER, coo_i));
7474: PetscCall(PetscSFGetMultiSF(mmdata->sf, &msf));
7475: PetscCall(PetscSFGetGraph(msf, &ncoo2 /*nroots*/, NULL, NULL, NULL));
7476: ncoo = ncoo_d + ncoo_oown + ncoo2;
7477: PetscCall(PetscMalloc2(ncoo, &coo_i2, ncoo, &coo_j2));
7478: PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown)); /* put (i,j) of remote nonzeros at back */
7479: PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown));
7480: PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown));
7481: PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown));
7482: PetscCall(PetscFree2(coo_i, coo_j));
7483: /* allocate MPI send buffer to collect nonzero values to be sent to remote procs */
7484: PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo_o * sizeof(PetscScalar), (void **)&mmdata->coo_w));
7485: coo_i = coo_i2;
7486: coo_j = coo_j2;
7487: } else { /* no offproc values insertion */
7488: ncoo = ncoo_d;
7489: PetscCall(PetscMalloc2(ncoo, &coo_i, ncoo, &coo_j));
7491: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf));
7492: PetscCall(PetscSFSetGraph(mmdata->sf, 0, 0, NULL, PETSC_OWN_POINTER, NULL, PETSC_OWN_POINTER));
7493: PetscCall(PetscSFSetUp(mmdata->sf));
7494: }
7495: mmdata->hasoffproc = hasoffproc;
7497: /* gather (i,j) of nonzeros inserted locally */
7498: for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) {
7499: Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data;
7500: PetscInt *coi = coo_i + ncoo_d;
7501: PetscInt *coj = coo_j + ncoo_d;
7502: const PetscInt *jj = mm->j;
7503: const PetscInt *ii = mm->i;
7504: const PetscInt *cmap = cmapa[cp];
7505: const PetscInt *rmap = rmapa[cp];
7506: const PetscInt mr = mp[cp]->rmap->n;
7507: const PetscInt rs = C->rmap->rstart;
7508: const PetscInt re = C->rmap->rend;
7509: const PetscInt cs = C->cmap->rstart;
7511: if (mptmp[cp]) continue;
7512: if (rmapt[cp] == 1) { /* consecutive rows */
7513: /* fill coo_i */
7514: for (i = 0; i < mr; i++) {
7515: const PetscInt gr = i + rs;
7516: for (j = ii[i]; j < ii[i + 1]; j++) coi[j] = gr;
7517: }
7518: /* fill coo_j */
7519: if (!cmapt[cp]) { /* type-0, already global */
7520: PetscCall(PetscArraycpy(coj, jj, mm->nz));
7521: } else if (cmapt[cp] == 1) { /* type-1, local to global for consecutive columns of C */
7522: for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs; /* lid + col start */
7523: } else { /* type-2, local to global for sparse columns */
7524: for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]];
7525: }
7526: ncoo_d += mm->nz;
7527: } else if (rmapt[cp] == 2) { /* sparse rows */
7528: for (i = 0; i < mr; i++) {
7529: const PetscInt *jj = mm->j + ii[i];
7530: const PetscInt gr = rmap[i];
7531: const PetscInt nz = ii[i + 1] - ii[i];
7532: if (gr >= rs && gr < re) { /* local rows */
7533: for (j = ii[i]; j < ii[i + 1]; j++) *coi++ = gr;
7534: if (!cmapt[cp]) { /* type-0, already global */
7535: for (j = 0; j < nz; j++) *coj++ = jj[j];
7536: } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7537: for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7538: } else { /* type-2, local to global for sparse columns */
7539: for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7540: }
7541: ncoo_d += nz;
7542: }
7543: }
7544: }
7545: }
7546: if (glob) PetscCall(ISRestoreIndices(glob, &globidx));
7547: PetscCall(ISDestroy(&glob));
7548: if (P_oth_l2g) PetscCall(ISLocalToGlobalMappingRestoreIndices(P_oth_l2g, &P_oth_idx));
7549: PetscCall(ISLocalToGlobalMappingDestroy(&P_oth_l2g));
7550: /* allocate an array to store all nonzeros (inserted locally or remotely) belonging to this proc */
7551: PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo * sizeof(PetscScalar), (void **)&mmdata->coo_v));
7553: /* set block sizes */
7554: A = product->A;
7555: P = product->B;
7556: switch (ptype) {
7557: case MATPRODUCT_PtAP:
7558: PetscCall(MatSetBlockSizes(C, P->cmap->bs, P->cmap->bs));
7559: break;
7560: case MATPRODUCT_RARt:
7561: PetscCall(MatSetBlockSizes(C, P->rmap->bs, P->rmap->bs));
7562: break;
7563: case MATPRODUCT_ABC:
7564: PetscCall(MatSetBlockSizesFromMats(C, A, product->C));
7565: break;
7566: case MATPRODUCT_AB:
7567: PetscCall(MatSetBlockSizesFromMats(C, A, P));
7568: break;
7569: case MATPRODUCT_AtB:
7570: PetscCall(MatSetBlockSizes(C, A->cmap->bs, P->cmap->bs));
7571: break;
7572: case MATPRODUCT_ABt:
7573: PetscCall(MatSetBlockSizes(C, A->rmap->bs, P->rmap->bs));
7574: break;
7575: default:
7576: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for ProductType %s", MatProductTypes[ptype]);
7577: }
7579: /* preallocate with COO data */
7580: PetscCall(MatSetPreallocationCOO(C, ncoo, coo_i, coo_j));
7581: PetscCall(PetscFree2(coo_i, coo_j));
7582: PetscFunctionReturn(PETSC_SUCCESS);
7583: }
7585: PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat)
7586: {
7587: Mat_Product *product = mat->product;
7588: #if defined(PETSC_HAVE_DEVICE)
7589: PetscBool match = PETSC_FALSE;
7590: PetscBool usecpu = PETSC_FALSE;
7591: #else
7592: PetscBool match = PETSC_TRUE;
7593: #endif
7595: PetscFunctionBegin;
7596: MatCheckProduct(mat, 1);
7597: #if defined(PETSC_HAVE_DEVICE)
7598: if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, ((PetscObject)product->A)->type_name, &match));
7599: if (match) { /* we can always fallback to the CPU if requested */
7600: switch (product->type) {
7601: case MATPRODUCT_AB:
7602: if (product->api_user) {
7603: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat");
7604: PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
7605: PetscOptionsEnd();
7606: } else {
7607: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat");
7608: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
7609: PetscOptionsEnd();
7610: }
7611: break;
7612: case MATPRODUCT_AtB:
7613: if (product->api_user) {
7614: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat");
7615: PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
7616: PetscOptionsEnd();
7617: } else {
7618: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat");
7619: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
7620: PetscOptionsEnd();
7621: }
7622: break;
7623: case MATPRODUCT_PtAP:
7624: if (product->api_user) {
7625: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat");
7626: PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
7627: PetscOptionsEnd();
7628: } else {
7629: PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat");
7630: PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
7631: PetscOptionsEnd();
7632: }
7633: break;
7634: default:
7635: break;
7636: }
7637: match = (PetscBool)!usecpu;
7638: }
7639: #endif
7640: if (match) {
7641: switch (product->type) {
7642: case MATPRODUCT_AB:
7643: case MATPRODUCT_AtB:
7644: case MATPRODUCT_PtAP:
7645: mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND;
7646: break;
7647: default:
7648: break;
7649: }
7650: }
7651: /* fallback to MPIAIJ ops */
7652: if (!mat->ops->productsymbolic) PetscCall(MatProductSetFromOptions_MPIAIJ(mat));
7653: PetscFunctionReturn(PETSC_SUCCESS);
7654: }
7656: /*
7657: Produces a set of block column indices of the matrix row, one for each block represented in the original row
7659: n - the number of block indices in cc[]
7660: cc - the block indices (must be large enough to contain the indices)
7661: */
7662: static inline PetscErrorCode MatCollapseRow(Mat Amat, PetscInt row, PetscInt bs, PetscInt *n, PetscInt *cc)
7663: {
7664: PetscInt cnt = -1, nidx, j;
7665: const PetscInt *idx;
7667: PetscFunctionBegin;
7668: PetscCall(MatGetRow(Amat, row, &nidx, &idx, NULL));
7669: if (nidx) {
7670: cnt = 0;
7671: cc[cnt] = idx[0] / bs;
7672: for (j = 1; j < nidx; j++) {
7673: if (cc[cnt] < idx[j] / bs) cc[++cnt] = idx[j] / bs;
7674: }
7675: }
7676: PetscCall(MatRestoreRow(Amat, row, &nidx, &idx, NULL));
7677: *n = cnt + 1;
7678: PetscFunctionReturn(PETSC_SUCCESS);
7679: }
7681: /*
7682: Produces a set of block column indices of the matrix block row, one for each block represented in the original set of rows
7684: ncollapsed - the number of block indices
7685: collapsed - the block indices (must be large enough to contain the indices)
7686: */
7687: static inline PetscErrorCode MatCollapseRows(Mat Amat, PetscInt start, PetscInt bs, PetscInt *w0, PetscInt *w1, PetscInt *w2, PetscInt *ncollapsed, PetscInt **collapsed)
7688: {
7689: PetscInt i, nprev, *cprev = w0, ncur = 0, *ccur = w1, *merged = w2, *cprevtmp;
7691: PetscFunctionBegin;
7692: PetscCall(MatCollapseRow(Amat, start, bs, &nprev, cprev));
7693: for (i = start + 1; i < start + bs; i++) {
7694: PetscCall(MatCollapseRow(Amat, i, bs, &ncur, ccur));
7695: PetscCall(PetscMergeIntArray(nprev, cprev, ncur, ccur, &nprev, &merged));
7696: cprevtmp = cprev;
7697: cprev = merged;
7698: merged = cprevtmp;
7699: }
7700: *ncollapsed = nprev;
7701: if (collapsed) *collapsed = cprev;
7702: PetscFunctionReturn(PETSC_SUCCESS);
7703: }
7705: /*
7706: MatCreateGraph_Simple_AIJ - create simple scalar matrix (graph) from potentially blocked matrix
7708: Input Parameter:
7709: . Amat - matrix
7710: - symmetrize - make the result symmetric
7711: + scale - scale with diagonal
7713: Output Parameter:
7714: . a_Gmat - output scalar graph >= 0
7716: */
7717: PETSC_INTERN PetscErrorCode MatCreateGraph_Simple_AIJ(Mat Amat, PetscBool symmetrize, PetscBool scale, PetscReal filter, PetscInt index_size, PetscInt index[], Mat *a_Gmat)
7718: {
7719: PetscInt Istart, Iend, Ii, jj, kk, ncols, nloc, NN, MM, bs;
7720: MPI_Comm comm;
7721: Mat Gmat;
7722: PetscBool ismpiaij, isseqaij;
7723: Mat a, b, c;
7724: MatType jtype;
7726: PetscFunctionBegin;
7727: PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
7728: PetscCall(MatGetOwnershipRange(Amat, &Istart, &Iend));
7729: PetscCall(MatGetSize(Amat, &MM, &NN));
7730: PetscCall(MatGetBlockSize(Amat, &bs));
7731: nloc = (Iend - Istart) / bs;
7733: PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATSEQAIJ, &isseqaij));
7734: PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATMPIAIJ, &ismpiaij));
7735: PetscCheck(isseqaij || ismpiaij, comm, PETSC_ERR_USER, "Require (MPI)AIJ matrix type");
7737: /* TODO GPU: these calls are potentially expensive if matrices are large and we want to use the GPU */
7738: /* A solution consists in providing a new API, MatAIJGetCollapsedAIJ, and each class can provide a fast
7739: implementation */
7740: if (bs > 1) {
7741: PetscCall(MatGetType(Amat, &jtype));
7742: PetscCall(MatCreate(comm, &Gmat));
7743: PetscCall(MatSetType(Gmat, jtype));
7744: PetscCall(MatSetSizes(Gmat, nloc, nloc, PETSC_DETERMINE, PETSC_DETERMINE));
7745: PetscCall(MatSetBlockSizes(Gmat, 1, 1));
7746: if (isseqaij || ((Mat_MPIAIJ *)Amat->data)->garray) {
7747: PetscInt *d_nnz, *o_nnz;
7748: MatScalar *aa, val, *AA;
7749: PetscInt *aj, *ai, *AJ, nc, nmax = 0;
7751: if (isseqaij) {
7752: a = Amat;
7753: b = NULL;
7754: } else {
7755: Mat_MPIAIJ *d = (Mat_MPIAIJ *)Amat->data;
7756: a = d->A;
7757: b = d->B;
7758: }
7759: PetscCall(PetscInfo(Amat, "New bs>1 Graph. nloc=%" PetscInt_FMT "\n", nloc));
7760: PetscCall(PetscMalloc2(nloc, &d_nnz, (isseqaij ? 0 : nloc), &o_nnz));
7761: for (c = a, kk = 0; c && kk < 2; c = b, kk++) {
7762: PetscInt *nnz = (c == a) ? d_nnz : o_nnz;
7763: const PetscInt *cols1, *cols2;
7765: for (PetscInt brow = 0, nc1, nc2, ok = 1; brow < nloc * bs; brow += bs) { // block rows
7766: PetscCall(MatGetRow(c, brow, &nc2, &cols2, NULL));
7767: nnz[brow / bs] = nc2 / bs;
7768: if (nc2 % bs) ok = 0;
7769: if (nnz[brow / bs] > nmax) nmax = nnz[brow / bs];
7770: for (PetscInt ii = 1; ii < bs; ii++) { // check for non-dense blocks
7771: PetscCall(MatGetRow(c, brow + ii, &nc1, &cols1, NULL));
7772: if (nc1 != nc2) ok = 0;
7773: else {
7774: for (PetscInt jj = 0; jj < nc1 && ok == 1; jj++) {
7775: if (cols1[jj] != cols2[jj]) ok = 0;
7776: if (cols1[jj] % bs != jj % bs) ok = 0;
7777: }
7778: }
7779: PetscCall(MatRestoreRow(c, brow + ii, &nc1, &cols1, NULL));
7780: }
7781: PetscCall(MatRestoreRow(c, brow, &nc2, &cols2, NULL));
7782: if (!ok) {
7783: PetscCall(PetscFree2(d_nnz, o_nnz));
7784: PetscCall(PetscInfo(Amat, "Found sparse blocks - revert to slow method\n"));
7785: goto old_bs;
7786: }
7787: }
7788: }
7789: PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz));
7790: PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz));
7791: PetscCall(PetscFree2(d_nnz, o_nnz));
7792: PetscCall(PetscMalloc2(nmax, &AA, nmax, &AJ));
7793: // diag
7794: for (PetscInt brow = 0, n, grow; brow < nloc * bs; brow += bs) { // block rows
7795: Mat_SeqAIJ *aseq = (Mat_SeqAIJ *)a->data;
7797: ai = aseq->i;
7798: n = ai[brow + 1] - ai[brow];
7799: aj = aseq->j + ai[brow];
7800: for (PetscInt k = 0; k < n; k += bs) { // block columns
7801: AJ[k / bs] = aj[k] / bs + Istart / bs; // diag starts at (Istart,Istart)
7802: val = 0;
7803: if (index_size == 0) {
7804: for (PetscInt ii = 0; ii < bs; ii++) { // rows in block
7805: aa = aseq->a + ai[brow + ii] + k;
7806: for (PetscInt jj = 0; jj < bs; jj++) { // columns in block
7807: val += PetscAbs(PetscRealPart(aa[jj])); // a sort of norm
7808: }
7809: }
7810: } else { // use (index,index) value if provided
7811: for (PetscInt iii = 0; iii < index_size; iii++) { // rows in block
7812: PetscInt ii = index[iii];
7813: aa = aseq->a + ai[brow + ii] + k;
7814: for (PetscInt jjj = 0; jjj < index_size; jjj++) { // columns in block
7815: PetscInt jj = index[jjj];
7816: val += PetscAbs(PetscRealPart(aa[jj]));
7817: }
7818: }
7819: }
7820: PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%" PetscInt_FMT ") >= nmax (%" PetscInt_FMT ")", k / bs, nmax);
7821: AA[k / bs] = val;
7822: }
7823: grow = Istart / bs + brow / bs;
7824: PetscCall(MatSetValues(Gmat, 1, &grow, n / bs, AJ, AA, ADD_VALUES));
7825: }
7826: // off-diag
7827: if (ismpiaij) {
7828: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)Amat->data;
7829: const PetscScalar *vals;
7830: const PetscInt *cols, *garray = aij->garray;
7832: PetscCheck(garray, PETSC_COMM_SELF, PETSC_ERR_USER, "No garray ?");
7833: for (PetscInt brow = 0, grow; brow < nloc * bs; brow += bs) { // block rows
7834: PetscCall(MatGetRow(b, brow, &ncols, &cols, NULL));
7835: for (PetscInt k = 0, cidx = 0; k < ncols; k += bs, cidx++) {
7836: PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs >= nmax");
7837: AA[k / bs] = 0;
7838: AJ[cidx] = garray[cols[k]] / bs;
7839: }
7840: nc = ncols / bs;
7841: PetscCall(MatRestoreRow(b, brow, &ncols, &cols, NULL));
7842: if (index_size == 0) {
7843: for (PetscInt ii = 0; ii < bs; ii++) { // rows in block
7844: PetscCall(MatGetRow(b, brow + ii, &ncols, &cols, &vals));
7845: for (PetscInt k = 0; k < ncols; k += bs) {
7846: for (PetscInt jj = 0; jj < bs; jj++) { // cols in block
7847: PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%" PetscInt_FMT ") >= nmax (%" PetscInt_FMT ")", k / bs, nmax);
7848: AA[k / bs] += PetscAbs(PetscRealPart(vals[k + jj]));
7849: }
7850: }
7851: PetscCall(MatRestoreRow(b, brow + ii, &ncols, &cols, &vals));
7852: }
7853: } else { // use (index,index) value if provided
7854: for (PetscInt iii = 0; iii < index_size; iii++) { // rows in block
7855: PetscInt ii = index[iii];
7856: PetscCall(MatGetRow(b, brow + ii, &ncols, &cols, &vals));
7857: for (PetscInt k = 0; k < ncols; k += bs) {
7858: for (PetscInt jjj = 0; jjj < index_size; jjj++) { // cols in block
7859: PetscInt jj = index[jjj];
7860: AA[k / bs] += PetscAbs(PetscRealPart(vals[k + jj]));
7861: }
7862: }
7863: PetscCall(MatRestoreRow(b, brow + ii, &ncols, &cols, &vals));
7864: }
7865: }
7866: grow = Istart / bs + brow / bs;
7867: PetscCall(MatSetValues(Gmat, 1, &grow, nc, AJ, AA, ADD_VALUES));
7868: }
7869: }
7870: PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY));
7871: PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY));
7872: PetscCall(PetscFree2(AA, AJ));
7873: } else {
7874: const PetscScalar *vals;
7875: const PetscInt *idx;
7876: PetscInt *d_nnz, *o_nnz, *w0, *w1, *w2;
7877: old_bs:
7878: /*
7879: Determine the preallocation needed for the scalar matrix derived from the vector matrix.
7880: */
7881: PetscCall(PetscInfo(Amat, "OLD bs>1 CreateGraph\n"));
7882: PetscCall(PetscMalloc2(nloc, &d_nnz, (isseqaij ? 0 : nloc), &o_nnz));
7883: if (isseqaij) {
7884: PetscInt max_d_nnz;
7886: /*
7887: Determine exact preallocation count for (sequential) scalar matrix
7888: */
7889: PetscCall(MatSeqAIJGetMaxRowNonzeros(Amat, &max_d_nnz));
7890: max_d_nnz = PetscMin(nloc, bs * max_d_nnz);
7891: PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2));
7892: for (Ii = 0, jj = 0; Ii < Iend; Ii += bs, jj++) PetscCall(MatCollapseRows(Amat, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL));
7893: PetscCall(PetscFree3(w0, w1, w2));
7894: } else if (ismpiaij) {
7895: Mat Daij, Oaij;
7896: const PetscInt *garray;
7897: PetscInt max_d_nnz;
7899: PetscCall(MatMPIAIJGetSeqAIJ(Amat, &Daij, &Oaij, &garray));
7900: /*
7901: Determine exact preallocation count for diagonal block portion of scalar matrix
7902: */
7903: PetscCall(MatSeqAIJGetMaxRowNonzeros(Daij, &max_d_nnz));
7904: max_d_nnz = PetscMin(nloc, bs * max_d_nnz);
7905: PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2));
7906: for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) PetscCall(MatCollapseRows(Daij, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL));
7907: PetscCall(PetscFree3(w0, w1, w2));
7908: /*
7909: Over estimate (usually grossly over), preallocation count for off-diagonal portion of scalar matrix
7910: */
7911: for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) {
7912: o_nnz[jj] = 0;
7913: for (kk = 0; kk < bs; kk++) { /* rows that get collapsed to a single row */
7914: PetscCall(MatGetRow(Oaij, Ii + kk, &ncols, NULL, NULL));
7915: o_nnz[jj] += ncols;
7916: PetscCall(MatRestoreRow(Oaij, Ii + kk, &ncols, NULL, NULL));
7917: }
7918: if (o_nnz[jj] > (NN / bs - nloc)) o_nnz[jj] = NN / bs - nloc;
7919: }
7920: } else SETERRQ(comm, PETSC_ERR_USER, "Require AIJ matrix type");
7921: /* get scalar copy (norms) of matrix */
7922: PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz));
7923: PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz));
7924: PetscCall(PetscFree2(d_nnz, o_nnz));
7925: for (Ii = Istart; Ii < Iend; Ii++) {
7926: PetscInt dest_row = Ii / bs;
7928: PetscCall(MatGetRow(Amat, Ii, &ncols, &idx, &vals));
7929: for (jj = 0; jj < ncols; jj++) {
7930: PetscInt dest_col = idx[jj] / bs;
7931: PetscScalar sv = PetscAbs(PetscRealPart(vals[jj]));
7933: PetscCall(MatSetValues(Gmat, 1, &dest_row, 1, &dest_col, &sv, ADD_VALUES));
7934: }
7935: PetscCall(MatRestoreRow(Amat, Ii, &ncols, &idx, &vals));
7936: }
7937: PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY));
7938: PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY));
7939: }
7940: } else {
7941: if (symmetrize || filter >= 0 || scale) PetscCall(MatDuplicate(Amat, MAT_COPY_VALUES, &Gmat));
7942: else {
7943: Gmat = Amat;
7944: PetscCall(PetscObjectReference((PetscObject)Gmat));
7945: }
7946: if (isseqaij) {
7947: a = Gmat;
7948: b = NULL;
7949: } else {
7950: Mat_MPIAIJ *d = (Mat_MPIAIJ *)Gmat->data;
7951: a = d->A;
7952: b = d->B;
7953: }
7954: if (filter >= 0 || scale) {
7955: /* take absolute value of each entry */
7956: for (c = a, kk = 0; c && kk < 2; c = b, kk++) {
7957: MatInfo info;
7958: PetscScalar *avals;
7960: PetscCall(MatGetInfo(c, MAT_LOCAL, &info));
7961: PetscCall(MatSeqAIJGetArray(c, &avals));
7962: for (int jj = 0; jj < info.nz_used; jj++) avals[jj] = PetscAbsScalar(avals[jj]);
7963: PetscCall(MatSeqAIJRestoreArray(c, &avals));
7964: }
7965: }
7966: }
7967: if (symmetrize) {
7968: PetscBool isset, issym;
7970: PetscCall(MatIsSymmetricKnown(Amat, &isset, &issym));
7971: if (!isset || !issym) {
7972: Mat matTrans;
7974: PetscCall(MatTranspose(Gmat, MAT_INITIAL_MATRIX, &matTrans));
7975: PetscCall(MatAXPY(Gmat, 1.0, matTrans, Gmat->structurally_symmetric == PETSC_BOOL3_TRUE ? SAME_NONZERO_PATTERN : DIFFERENT_NONZERO_PATTERN));
7976: PetscCall(MatDestroy(&matTrans));
7977: }
7978: PetscCall(MatSetOption(Gmat, MAT_SYMMETRIC, PETSC_TRUE));
7979: } else if (Amat != Gmat) PetscCall(MatPropagateSymmetryOptions(Amat, Gmat));
7980: if (scale) {
7981: /* scale c for all diagonal values = 1 or -1 */
7982: Vec diag;
7984: PetscCall(MatCreateVecs(Gmat, &diag, NULL));
7985: PetscCall(MatGetDiagonal(Gmat, diag));
7986: PetscCall(VecReciprocal(diag));
7987: PetscCall(VecSqrtAbs(diag));
7988: PetscCall(MatDiagonalScale(Gmat, diag, diag));
7989: PetscCall(VecDestroy(&diag));
7990: }
7991: PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_graph_view"));
7992: if (filter >= 0) {
7993: PetscCall(MatFilter(Gmat, filter, PETSC_TRUE, PETSC_TRUE));
7994: PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_filter_graph_view"));
7995: }
7996: *a_Gmat = Gmat;
7997: PetscFunctionReturn(PETSC_SUCCESS);
7998: }
8000: PETSC_INTERN PetscErrorCode MatGetCurrentMemType_MPIAIJ(Mat A, PetscMemType *memtype)
8001: {
8002: Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)A->data;
8003: PetscMemType mD = PETSC_MEMTYPE_HOST, mO = PETSC_MEMTYPE_HOST;
8005: PetscFunctionBegin;
8006: if (mpiaij->A) PetscCall(MatGetCurrentMemType(mpiaij->A, &mD));
8007: if (mpiaij->B) PetscCall(MatGetCurrentMemType(mpiaij->B, &mO));
8008: *memtype = (mD == mO) ? mD : PETSC_MEMTYPE_HOST;
8009: PetscFunctionReturn(PETSC_SUCCESS);
8010: }
8012: /*
8013: Special version for direct calls from Fortran
8014: */
8016: /* Change these macros so can be used in void function */
8017: /* Identical to PetscCallVoid, except it assigns to *_ierr */
8018: #undef PetscCall
8019: #define PetscCall(...) \
8020: do { \
8021: PetscErrorCode ierr_msv_mpiaij = __VA_ARGS__; \
8022: if (PetscUnlikely(ierr_msv_mpiaij)) { \
8023: *_ierr = PetscError(PETSC_COMM_SELF, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr_msv_mpiaij, PETSC_ERROR_REPEAT, " "); \
8024: return; \
8025: } \
8026: } while (0)
8028: #undef SETERRQ
8029: #define SETERRQ(comm, ierr, ...) \
8030: do { \
8031: *_ierr = PetscError(comm, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr, PETSC_ERROR_INITIAL, __VA_ARGS__); \
8032: return; \
8033: } while (0)
8035: #if defined(PETSC_HAVE_FORTRAN_CAPS)
8036: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
8037: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
8038: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
8039: #else
8040: #endif
8041: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat, PetscInt *mm, const PetscInt im[], PetscInt *mn, const PetscInt in[], const PetscScalar v[], InsertMode *maddv, PetscErrorCode *_ierr)
8042: {
8043: Mat mat = *mmat;
8044: PetscInt m = *mm, n = *mn;
8045: InsertMode addv = *maddv;
8046: Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
8047: PetscScalar value;
8049: MatCheckPreallocated(mat, 1);
8050: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
8051: else PetscCheck(mat->insertmode == addv, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Cannot mix add values and insert values");
8052: {
8053: PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
8054: PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
8055: PetscBool roworiented = aij->roworiented;
8057: /* Some Variables required in the macro */
8058: Mat A = aij->A;
8059: Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
8060: PetscInt *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j;
8061: MatScalar *aa;
8062: PetscBool ignorezeroentries = ((a->ignorezeroentries && (addv == ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
8063: Mat B = aij->B;
8064: Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data;
8065: PetscInt *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n;
8066: MatScalar *ba;
8067: /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
8068: * cannot use "#if defined" inside a macro. */
8069: PETSC_UNUSED PetscBool inserted = PETSC_FALSE;
8071: PetscInt *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2;
8072: PetscInt nonew = a->nonew;
8073: MatScalar *ap1, *ap2;
8075: PetscFunctionBegin;
8076: PetscCall(MatSeqAIJGetArray(A, &aa));
8077: PetscCall(MatSeqAIJGetArray(B, &ba));
8078: for (i = 0; i < m; i++) {
8079: if (im[i] < 0) continue;
8080: PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
8081: if (im[i] >= rstart && im[i] < rend) {
8082: row = im[i] - rstart;
8083: lastcol1 = -1;
8084: rp1 = aj + ai[row];
8085: ap1 = aa + ai[row];
8086: rmax1 = aimax[row];
8087: nrow1 = ailen[row];
8088: low1 = 0;
8089: high1 = nrow1;
8090: lastcol2 = -1;
8091: rp2 = bj + bi[row];
8092: ap2 = ba + bi[row];
8093: rmax2 = bimax[row];
8094: nrow2 = bilen[row];
8095: low2 = 0;
8096: high2 = nrow2;
8098: for (j = 0; j < n; j++) {
8099: if (roworiented) value = v[i * n + j];
8100: else value = v[i + j * m];
8101: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
8102: if (in[j] >= cstart && in[j] < cend) {
8103: col = in[j] - cstart;
8104: MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]);
8105: } else if (in[j] < 0) continue;
8106: else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
8107: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
8108: } else {
8109: if (mat->was_assembled) {
8110: if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
8111: #if defined(PETSC_USE_CTABLE)
8112: PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col));
8113: col--;
8114: #else
8115: col = aij->colmap[in[j]] - 1;
8116: #endif
8117: if (col < 0 && !((Mat_SeqAIJ *)aij->A->data)->nonew) {
8118: PetscCall(MatDisAssemble_MPIAIJ(mat, PETSC_FALSE));
8119: col = in[j];
8120: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
8121: B = aij->B;
8122: b = (Mat_SeqAIJ *)B->data;
8123: bimax = b->imax;
8124: bi = b->i;
8125: bilen = b->ilen;
8126: bj = b->j;
8127: rp2 = bj + bi[row];
8128: ap2 = ba + bi[row];
8129: rmax2 = bimax[row];
8130: nrow2 = bilen[row];
8131: low2 = 0;
8132: high2 = nrow2;
8133: bm = aij->B->rmap->n;
8134: ba = b->a;
8135: inserted = PETSC_FALSE;
8136: }
8137: } else col = in[j];
8138: MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]);
8139: }
8140: }
8141: } else if (!aij->donotstash) {
8142: if (roworiented) {
8143: PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
8144: } else {
8145: PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
8146: }
8147: }
8148: }
8149: PetscCall(MatSeqAIJRestoreArray(A, &aa));
8150: PetscCall(MatSeqAIJRestoreArray(B, &ba));
8151: }
8152: PetscFunctionReturnVoid();
8153: }
8155: /* Undefining these here since they were redefined from their original definition above! No
8156: * other PETSc functions should be defined past this point, as it is impossible to recover the
8157: * original definitions */
8158: #undef PetscCall
8159: #undef SETERRQ