Actual source code: mpiaijsell.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: /*@C
  3:   MatCreateMPIAIJSELL - Creates a sparse parallel matrix whose local
  4:   portions are stored as `MATSEQAIJSELL` matrices (a matrix class that inherits
  5:   from SEQAIJ but performs some operations in SELL format).

  7:   Collective

  9:   Input Parameters:
 10: + comm  - MPI communicator
 11: . m     - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
 12:            This value should be the same as the local size used in creating the
 13:            y vector for the matrix-vector product y = Ax.
 14: . n     - This value should be the same as the local size used in creating the
 15:        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
 16:        calculated if `N` is given) For square matrices `n` is almost always `m`.
 17: . M     - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
 18: . N     - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
 19: . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
 20:            (same value is used for all local rows)
 21: . d_nnz - array containing the number of nonzeros in the various rows of the
 22:            DIAGONAL portion of the local submatrix (possibly different for each row)
 23:            or `NULL`, if `d_nz` is used to specify the nonzero structure.
 24:            The size of this array is equal to the number of local rows, i.e `m`.
 25:            For matrices you plan to factor you must leave room for the diagonal entry and
 26:            put in the entry even if it is zero.
 27: . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
 28:            submatrix (same value is used for all local rows).
 29: - o_nnz - array containing the number of nonzeros in the various rows of the
 30:            OFF-DIAGONAL portion of the local submatrix (possibly different for
 31:            each row) or `NULL`, if `o_nz` is used to specify the nonzero
 32:            structure. The size of this array is equal to the number
 33:            of local rows, i.e `m`.

 35:   Output Parameter:
 36: . A - the matrix

 38:   Options Database Key:
 39: . -mat_aijsell_eager_shadow - Construct shadow matrix upon matrix assembly; default is to take a "lazy" approach, performing this step the first
 40:                                time the matrix is applied

 42:   Level: intermediate

 44:   Notes:
 45:   If the *_nnz parameter is given then the *_nz parameter is ignored

 47:   `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
 48:   processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
 49:   storage requirements for this matrix.

 51:   If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one
 52:   processor than it must be used on all processors that share the object for
 53:   that argument.

 55:   The user MUST specify either the local or global matrix dimensions
 56:   (possibly both).

 58:   The parallel matrix is partitioned such that the first m0 rows belong to
 59:   process 0, the next m1 rows belong to process 1, the next m2 rows belong
 60:   to process 2 etc.. where m0,m1,m2... are the input parameter `m`.

 62:   The DIAGONAL portion of the local submatrix of a processor can be defined
 63:   as the submatrix which is obtained by extraction the part corresponding
 64:   to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
 65:   first row that belongs to the processor, and r2 is the last row belonging
 66:   to the this processor. This is a square mxm matrix. The remaining portion
 67:   of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.

 69:   If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.

 71:   When calling this routine with a single process communicator, a matrix of
 72:   type `MATSEQAIJSELL` is returned.  If a matrix of type `MATMPIAIJSELL` is desired
 73:   for this type of communicator, use the construction mechanism
 74: .vb
 75:    MatCreate(...,&A);
 76:    MatSetType(A,MPIAIJSELL);
 77:    MatMPIAIJSetPreallocation(A,...);
 78: .ve

 80: .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATSEQAIJSELL`, `MATMPIAIJSELL`, `MATAIJSELL`, `MatCreate()`, `MatCreateSeqAIJSELL()`, `MatSetValues()`
 81: @*/
 82: PetscErrorCode MatCreateMPIAIJSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
 83: {
 84:   PetscMPIInt size;

 86:   PetscFunctionBegin;
 87:   PetscCall(MatCreate(comm, A));
 88:   PetscCall(MatSetSizes(*A, m, n, M, N));
 89:   PetscCallMPI(MPI_Comm_size(comm, &size));
 90:   if (size > 1) {
 91:     PetscCall(MatSetType(*A, MATMPIAIJSELL));
 92:     PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
 93:   } else {
 94:     PetscCall(MatSetType(*A, MATSEQAIJSELL));
 95:     PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
 96:   }
 97:   PetscFunctionReturn(PETSC_SUCCESS);
 98: }

100: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJSELL(Mat, MatType, MatReuse, Mat *);

102: static PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJSELL(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
103: {
104:   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;

106:   PetscFunctionBegin;
107:   PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(B, d_nz, d_nnz, o_nz, o_nnz));
108:   PetscCall(MatConvert_SeqAIJ_SeqAIJSELL(b->A, MATSEQAIJSELL, MAT_INPLACE_MATRIX, &b->A));
109:   PetscCall(MatConvert_SeqAIJ_SeqAIJSELL(b->B, MATSEQAIJSELL, MAT_INPLACE_MATRIX, &b->B));
110:   PetscFunctionReturn(PETSC_SUCCESS);
111: }

113: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat A, MatType type, MatReuse reuse, Mat *newmat)
114: {
115:   Mat B = *newmat;

117:   PetscFunctionBegin;
118:   if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));

120:   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJSELL));
121:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJSELL));
122:   *newmat = B;
123:   PetscFunctionReturn(PETSC_SUCCESS);
124: }

126: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJSELL(Mat A)
127: {
128:   PetscFunctionBegin;
129:   PetscCall(MatSetType(A, MATMPIAIJ));
130:   PetscCall(MatConvert_MPIAIJ_MPIAIJSELL(A, MATMPIAIJSELL, MAT_INPLACE_MATRIX, &A));
131:   PetscFunctionReturn(PETSC_SUCCESS);
132: }

134: /*MC
135:    MATAIJSELL - "AIJSELL" - A matrix type to be used for sparse matrices.

137:    This matrix type is identical to `MATSEQAIJSELL` when constructed with a single process communicator,
138:    and `MATMPIAIJSELL` otherwise.  As a result, for single process communicators,
139:    MatSeqAIJSetPreallocation() is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
140:    for communicators controlling multiple processes.  It is recommended that you call both of
141:    the above preallocation routines for simplicity.

143:    Options Database Key:
144: . -mat_type aijsell - sets the matrix type to `MATAIJSELL`

146:   Level: beginner

148: .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJSELL()`, `MATSEQAIJSELL`, `MATMPIAIJSELL`, `MATSEQAIJ`, `MATMPIAIJ`, `MATSEQAIJPERM`, `MATMPIAIJPERM`, `MATSEQAIJMKL`, `MATMPIAIJMKL`
149: M*/