Actual source code: mpiaij.h
1: #pragma once
3: #include <../src/mat/impls/aij/seq/aij.h>
5: typedef struct { /* used by MatCreateMPIAIJSumSeqAIJ for reusing the merged matrix */
6: PetscLayout rowmap;
7: PetscInt **buf_ri, **buf_rj;
8: PetscMPIInt *len_s, *len_r, *id_r; /* array of length of comm->size, store send/recv matrix values */
9: PetscMPIInt nsend, nrecv;
10: PetscInt *bi, *bj; /* i and j array of the local portion of mpi C (matrix product) - rename to ci, cj! */
11: PetscInt *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
12: } Mat_Merge_SeqsToMPI;
14: typedef struct { /* used by MatPtAPXXX_MPIAIJ_MPIAIJ() and MatMatMultXXX_MPIAIJ_MPIAIJ() */
15: PetscInt *startsj_s, *startsj_r; /* used by MatGetBrowsOfAoCols_MPIAIJ */
16: PetscScalar *bufa; /* used by MatGetBrowsOfAoCols_MPIAIJ */
17: Mat P_loc, P_oth; /* partial B_seq -- intend to replace B_seq */
18: PetscInt *api, *apj; /* symbolic i and j arrays of the local product A_loc*B_seq */
19: PetscScalar *apv;
20: MatReuse reuse; /* flag to skip MatGetBrowsOfAoCols_MPIAIJ() and MatMPIAIJGetLocalMat() in 1st call of MatPtAPNumeric_MPIAIJ_MPIAIJ() */
21: PetscScalar *apa; /* tmp array for store a row of A*P used in MatMatMult() */
22: Mat A_loc; /* used by MatTransposeMatMult(), contains api and apj */
23: ISLocalToGlobalMapping ltog; /* mapping from local column indices to global column indices for A_loc */
24: Mat Pt; /* used by MatTransposeMatMult(), Pt = P^T */
25: Mat Rd, Ro, AP_loc, C_loc, C_oth;
26: PetscInt algType; /* implementation algorithm */
27: PetscSF sf; /* use it to communicate remote part of C */
28: PetscInt *c_othi, *c_rmti;
30: Mat_Merge_SeqsToMPI *merge;
31: } Mat_APMPI;
33: #if defined(PETSC_USE_CTABLE)
34: #define PETSCTABLE PetscHMapI
35: #else
36: #define PETSCTABLE PetscInt *
37: #endif
39: // Shared by MPIAIJ, MPIBAIJ, MPISBAIJ so that we can access common fields in the same way.
40: #define MPIAIJHEADER \
41: Mat A, B; /* local submatrices: A (diag part), B (off-diag part) */ \
42: PetscMPIInt size; /* size of communicator */ \
43: PetscMPIInt rank; /* rank of proc in communicator */ \
44: \
45: /* The following variables are used for matrix assembly */ \
46: PetscBool donotstash; /* if 1, off processor entries dropped */ \
47: MPI_Request *send_waits; /* array of send requests */ \
48: MPI_Request *recv_waits; /* array of receive requests */ \
49: PetscInt nsends, nrecvs; /* numbers of sends and receives */ \
50: MatScalar *svalues, *rvalues; /* sending and receiving data */ \
51: PetscInt rmax; /* maximum message length */ \
52: PETSCTABLE colmap; /* local col number of off-diag col */ \
53: PetscInt *garray; /* work array */ \
54: \
55: /* The following variables are used for matrix-vector products */ \
56: Vec lvec; /* local vector */ \
57: VecScatter Mvctx; /* scatter context for vector */ \
58: PetscBool roworiented; /* if true, row-oriented input, default true */ \
59: \
60: /* The following variables are for MatGetRow() */ \
61: PetscInt *rowindices; /* column indices for row */ \
62: PetscScalar *rowvalues; /* nonzero values in row */ \
63: PetscBool getrowactive
65: typedef struct {
66: MPIAIJHEADER;
67: Vec diag;
68: PetscInt *ld; /* number of entries per row left of diagonal block */
70: /* Used by device classes */
71: void *spptr;
73: struct _MatOps cops;
74: } Mat_MPIAIJ;
76: typedef struct {
77: PetscCount n; /* Number of COOs passed to MatSetPreallocationCOO)() */
78: PetscSF sf; /* SF to send/recv remote values in MatSetValuesCOO() */
79: PetscCount Annz, Bnnz; /* Number of entries in diagonal A and off-diagonal B */
80: PetscCount Annz2, Bnnz2; /* Number of unique remote entries belonging to A and B */
81: PetscCount Atot1, Atot2, Btot1, Btot2; /* Total local (tot1) and remote (tot2) entries (which might contain repeats) belonging to A and B */
82: PetscCount *Ajmap1, *Aperm1; /* Lengths: [Annz+1], [Atot1]. Local entries to diag */
83: PetscCount *Bjmap1, *Bperm1; /* Lengths: [Bnnz+1], [Btot1]. Local entries to offdiag */
84: PetscCount *Aimap2, *Ajmap2, *Aperm2; /* Lengths: [Annz2], [Annz2+1], [Atot2]. Remote entries to diag */
85: PetscCount *Bimap2, *Bjmap2, *Bperm2; /* Lengths: [Bnnz2], [Bnnz2+1], [Btot2]. Remote entries to offdiag */
86: PetscCount *Cperm1; /* [sendlen] Permutation to fill MPI send buffer. 'C' for communication */
87: PetscScalar *sendbuf, *recvbuf; /* Buffers for remote values in MatSetValuesCOO() */
88: PetscInt sendlen, recvlen; /* Lengths (in unit of PetscScalar) of send/recvbuf */
89: } MatCOOStruct_MPIAIJ;
91: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat);
93: PETSC_INTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat, MatAssemblyType);
95: PETSC_INTERN PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat);
96: PETSC_INTERN PetscErrorCode MatDisAssemble_MPIAIJ(Mat, PetscBool);
97: PETSC_INTERN PetscErrorCode MatDuplicate_MPIAIJ(Mat, MatDuplicateOption, Mat *);
98: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat, PetscInt, IS[], PetscInt);
99: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat, PetscInt, IS[], PetscInt);
100: PETSC_INTERN PetscErrorCode MatFDColoringCreate_MPIXAIJ(Mat, ISColoring, MatFDColoring);
101: PETSC_INTERN PetscErrorCode MatFDColoringSetUp_MPIXAIJ(Mat, ISColoring, MatFDColoring);
102: PETSC_INTERN PetscErrorCode MatCreateSubMatrices_MPIAIJ(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *[]);
103: PETSC_INTERN PetscErrorCode MatCreateSubMatricesMPI_MPIAIJ(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *[]);
104: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_All(Mat, MatCreateSubMatrixOption, MatReuse, Mat *[]);
105: PETSC_INTERN PetscErrorCode MatView_MPIAIJ(Mat, PetscViewer);
107: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat, IS, IS, MatReuse, Mat *);
108: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat, IS, IS, PetscInt, MatReuse, Mat *);
109: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat, IS, IS, IS, MatReuse, Mat *);
110: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat, IS, IS, MatReuse, Mat *);
111: PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat, MPI_Comm, MatReuse, Mat *);
113: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat, PetscViewer);
114: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ_Binary(Mat, PetscViewer);
115: PETSC_INTERN PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat);
117: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat);
118: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat);
119: PETSC_INTERN PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat);
120: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat);
122: PETSC_INTERN PetscErrorCode MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ(Mat);
124: PETSC_INTERN PetscErrorCode MatProductSymbolic_RARt_MPIAIJ_MPIAIJ(Mat);
125: PETSC_INTERN PetscErrorCode MatProductNumeric_RARt_MPIAIJ_MPIAIJ(Mat);
127: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat, Mat, PetscReal, Mat);
128: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat, Mat, PetscReal, Mat);
129: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat, Mat, PetscReal, Mat);
130: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat, Mat, Mat);
131: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat, Mat, Mat);
133: PETSC_INTERN PetscErrorCode MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ(Mat, Mat, Mat, PetscReal, Mat);
134: PETSC_INTERN PetscErrorCode MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ(Mat, Mat, Mat, Mat);
136: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat, Mat, PetscReal, Mat);
137: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat, Mat, Mat);
139: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat, Mat, PetscReal, Mat);
140: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat, Mat, PetscReal, Mat);
141: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat, Mat, PetscReal, Mat);
142: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat, Mat, Mat);
143: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat, Mat, Mat);
144: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat, Mat, Mat);
146: #if defined(PETSC_HAVE_HYPRE)
147: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
148: #endif
149: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIDense(Mat, MatType, MatReuse, Mat *);
150: #if defined(PETSC_HAVE_SCALAPACK)
151: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat, MatType, MatReuse, Mat *);
152: #endif
154: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
155: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_PtAP(void *);
156: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *);
158: PETSC_INTERN PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat, Mat, MatReuse, PetscInt **, PetscInt **, MatScalar **, Mat *);
159: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ(Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[], const PetscScalar[], InsertMode);
160: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat, const PetscInt[], const PetscInt[], const PetscScalar[]);
161: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat, const PetscInt[], const PetscInt[]);
162: PETSC_INTERN PetscErrorCode MatSetOption_MPIAIJ(Mat, MatOption, PetscBool);
163: PETSC_INTERN PetscErrorCode MatSetMPIAIJWithSplitSeqAIJ(Mat, Mat, Mat, PetscInt *);
165: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat, Mat, PetscReal, Mat);
166: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat, Mat, PetscReal, Mat);
167: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat, Mat, Mat);
168: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat, Mat, Mat);
169: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat, Mat, Mat);
170: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
171: PETSC_INTERN PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat, Mat *);
173: PETSC_INTERN PetscErrorCode MatSetFromOptions_MPIAIJ(Mat, PetscOptionItems);
174: PETSC_INTERN PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]);
176: PETSC_INTERN PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt, const PetscInt *, const PetscInt *, const PetscInt *, const PetscInt *, const PetscInt *, const PetscInt *, PetscInt *);
178: extern PetscErrorCode MatGetDiagonalBlock_MPIAIJ(Mat, Mat *);
179: extern PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat, Vec);
181: PETSC_INTERN PetscErrorCode MatGetSeqMats_MPIAIJ(Mat, Mat *, Mat *);
182: PETSC_INTERN PetscErrorCode MatSetSeqMats_MPIAIJ(Mat, IS, IS, IS, MatStructure, Mat, Mat);
184: PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_MPIAIJ(Mat, PetscCount, PetscInt[], PetscInt[]);
186: /* compute apa = A[i,:]*P = Ad[i,:]*P_loc + Ao*[i,:]*P_oth using sparse axpy */
187: #define AProw_scalable(i, ad, ao, p_loc, p_oth, api, apj, apa) \
188: do { \
189: PetscInt _anz, _pnz, _j, _k, *_ai, *_aj, _row, *_pi, *_pj, _nextp, *_apJ; \
190: PetscScalar *_aa, _valtmp, *_pa; \
191: _apJ = apj + api[i]; \
192: /* diagonal portion of A */ \
193: _ai = ad->i; \
194: _anz = _ai[i + 1] - _ai[i]; \
195: _aj = ad->j + _ai[i]; \
196: _aa = ad->a + _ai[i]; \
197: for (_j = 0; _j < _anz; _j++) { \
198: _row = _aj[_j]; \
199: _pi = p_loc->i; \
200: _pnz = _pi[_row + 1] - _pi[_row]; \
201: _pj = p_loc->j + _pi[_row]; \
202: _pa = p_loc->a + _pi[_row]; \
203: /* perform sparse axpy */ \
204: _valtmp = _aa[_j]; \
205: _nextp = 0; \
206: for (_k = 0; _nextp < _pnz; _k++) { \
207: if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */ \
208: apa[_k] += _valtmp * _pa[_nextp++]; \
209: } \
210: } \
211: (void)PetscLogFlops(2.0 * _pnz); \
212: } \
213: /* off-diagonal portion of A */ \
214: if (p_oth) { \
215: _ai = ao->i; \
216: _anz = _ai[i + 1] - _ai[i]; \
217: _aj = ao->j + _ai[i]; \
218: _aa = ao->a + _ai[i]; \
219: for (_j = 0; _j < _anz; _j++) { \
220: _row = _aj[_j]; \
221: _pi = p_oth->i; \
222: _pnz = _pi[_row + 1] - _pi[_row]; \
223: _pj = p_oth->j + _pi[_row]; \
224: _pa = p_oth->a + _pi[_row]; \
225: /* perform sparse axpy */ \
226: _valtmp = _aa[_j]; \
227: _nextp = 0; \
228: for (_k = 0; _nextp < _pnz; _k++) { \
229: if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */ \
230: apa[_k] += _valtmp * _pa[_nextp++]; \
231: } \
232: } \
233: (void)PetscLogFlops(2.0 * _pnz); \
234: } \
235: } \
236: } while (0)
238: #define AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa) \
239: do { \
240: PetscInt _anz, _pnz, _j, _k, *_ai, *_aj, _row, *_pi, *_pj; \
241: PetscScalar *_aa, _valtmp, *_pa; \
242: /* diagonal portion of A */ \
243: _ai = ad->i; \
244: _anz = _ai[i + 1] - _ai[i]; \
245: _aj = PetscSafePointerPlusOffset(ad->j, _ai[i]); \
246: _aa = PetscSafePointerPlusOffset(ad->a, _ai[i]); \
247: for (_j = 0; _j < _anz; _j++) { \
248: _row = _aj[_j]; \
249: _pi = p_loc->i; \
250: _pnz = _pi[_row + 1] - _pi[_row]; \
251: _pj = p_loc->j + _pi[_row]; \
252: _pa = p_loc->a + _pi[_row]; \
253: /* perform dense axpy */ \
254: _valtmp = _aa[_j]; \
255: for (_k = 0; _k < _pnz; _k++) apa[_pj[_k]] += _valtmp * _pa[_k]; \
256: (void)PetscLogFlops(2.0 * _pnz); \
257: } \
258: /* off-diagonal portion of A */ \
259: if (p_oth) { \
260: _ai = ao->i; \
261: _anz = _ai[i + 1] - _ai[i]; \
262: _aj = PetscSafePointerPlusOffset(ao->j, _ai[i]); \
263: _aa = PetscSafePointerPlusOffset(ao->a, _ai[i]); \
264: for (_j = 0; _j < _anz; _j++) { \
265: _row = _aj[_j]; \
266: _pi = p_oth->i; \
267: _pnz = _pi[_row + 1] - _pi[_row]; \
268: _pj = p_oth->j + _pi[_row]; \
269: _pa = p_oth->a + _pi[_row]; \
270: /* perform dense axpy */ \
271: _valtmp = _aa[_j]; \
272: for (_k = 0; _k < _pnz; _k++) apa[_pj[_k]] += _valtmp * _pa[_k]; \
273: (void)PetscLogFlops(2.0 * _pnz); \
274: } \
275: } \
276: } while (0)