Actual source code: gcreate.c
1: #include <petsc/private/matimpl.h>
3: #include <../src/mat/impls/aij/seq/aij.h>
4: #include <../src/mat/impls/aij/mpi/mpiaij.h>
6: PetscErrorCode MatSetBlockSizes_Default(Mat mat, PetscInt rbs, PetscInt cbs)
7: {
8: PetscFunctionBegin;
9: if (!mat->preallocated) PetscFunctionReturn(PETSC_SUCCESS);
10: PetscCheck(mat->rmap->bs <= 0 || mat->rmap->bs == rbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change row block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->rmap->bs, rbs);
11: PetscCheck(mat->cmap->bs <= 0 || mat->cmap->bs == cbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change column block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->cmap->bs, cbs);
12: PetscFunctionReturn(PETSC_SUCCESS);
13: }
15: PetscErrorCode MatShift_Basic(Mat Y, PetscScalar a)
16: {
17: PetscInt i, start, end, oldValA = 0, oldValB = 0;
18: PetscScalar alpha = a;
19: PetscBool prevoption;
20: PetscBool isSeqAIJDerived, isMPIAIJDerived; // all classes sharing SEQAIJHEADER or MPIAIJHEADER
21: Mat A = NULL, B = NULL;
23: PetscFunctionBegin;
24: PetscCall(MatGetOption(Y, MAT_NO_OFF_PROC_ENTRIES, &prevoption));
25: PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
26: PetscCall(PetscObjectBaseTypeCompareAny((PetscObject)Y, &isSeqAIJDerived, MATSEQAIJ, MATSEQBAIJ, MATSEQSBAIJ, ""));
27: PetscCall(PetscObjectBaseTypeCompareAny((PetscObject)Y, &isMPIAIJDerived, MATMPIAIJ, MATMPIBAIJ, MATMPISBAIJ, ""));
29: if (isSeqAIJDerived) A = Y;
30: else if (isMPIAIJDerived) {
31: Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)Y->data;
32: A = mpiaij->A;
33: B = mpiaij->B;
34: }
36: if (A) {
37: oldValA = ((Mat_SeqAIJ *)A->data)->nonew;
38: ((Mat_SeqAIJ *)A->data)->nonew = 0; // so that new nonzero locations are allowed
39: }
40: if (B) {
41: oldValB = ((Mat_SeqAIJ *)B->data)->nonew;
42: ((Mat_SeqAIJ *)B->data)->nonew = 0;
43: }
45: PetscCall(MatGetOwnershipRange(Y, &start, &end));
46: for (i = start; i < end; i++) {
47: if (i < Y->cmap->N) PetscCall(MatSetValues(Y, 1, &i, 1, &i, &alpha, ADD_VALUES));
48: }
49: PetscCall(MatAssemblyBegin(Y, MAT_FINAL_ASSEMBLY));
50: PetscCall(MatAssemblyEnd(Y, MAT_FINAL_ASSEMBLY));
51: PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, prevoption));
52: if (A) ((Mat_SeqAIJ *)A->data)->nonew = oldValA;
53: if (B) ((Mat_SeqAIJ *)B->data)->nonew = oldValB;
54: PetscFunctionReturn(PETSC_SUCCESS);
55: }
57: /*@
58: MatCreate - Creates a matrix where the type is determined
59: from either a call to `MatSetType()` or from the options database
60: with a call to `MatSetFromOptions()`.
62: Collective
64: Input Parameter:
65: . comm - MPI communicator
67: Output Parameter:
68: . A - the matrix
70: Options Database Keys:
71: + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()`
72: . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()`
73: . -mat_type seqdense - `MATSEQDENSE`, uses `MatCreateSeqDense()`
74: . -mat_type mpidense - `MATMPIDENSE` type, uses `MatCreateDense()`
75: . -mat_type seqbaij - `MATSEQBAIJ` type, uses `MatCreateSeqBAIJ()`
76: - -mat_type mpibaij - `MATMPIBAIJ` type, uses `MatCreateBAIJ()`
78: See the manpages for particular formats (e.g., `MATSEQAIJ`)
79: for additional format-specific options.
81: Level: beginner
83: Notes:
84: The default matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` or
85: `MatCreateAIJ()` if you do not set a type in the options database. If you never call
86: `MatSetType()` or `MatSetFromOptions()` it will generate an error when you try to use the
87: matrix.
89: .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`,
90: `MatCreateSeqDense()`, `MatCreateDense()`,
91: `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`,
92: `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`,
93: `MatConvert()`
94: @*/
95: PetscErrorCode MatCreate(MPI_Comm comm, Mat *A)
96: {
97: Mat B;
99: PetscFunctionBegin;
100: PetscAssertPointer(A, 2);
101: PetscCall(MatInitializePackage());
103: PetscCall(PetscHeaderCreate(B, MAT_CLASSID, "Mat", "Matrix", "Mat", comm, MatDestroy, MatView));
104: PetscCall(PetscLayoutCreate(comm, &B->rmap));
105: PetscCall(PetscLayoutCreate(comm, &B->cmap));
106: PetscCall(PetscStrallocpy(VECSTANDARD, &B->defaultvectype));
107: PetscCall(PetscStrallocpy(PETSCRANDER48, &B->defaultrandtype));
109: B->symmetric = PETSC_BOOL3_UNKNOWN;
110: B->hermitian = PETSC_BOOL3_UNKNOWN;
111: B->structurally_symmetric = PETSC_BOOL3_UNKNOWN;
112: B->spd = PETSC_BOOL3_UNKNOWN;
113: B->symmetry_eternal = PETSC_FALSE;
114: B->structural_symmetry_eternal = PETSC_FALSE;
116: B->congruentlayouts = PETSC_DECIDE;
117: B->preallocated = PETSC_FALSE;
118: #if defined(PETSC_HAVE_DEVICE)
119: B->boundtocpu = PETSC_TRUE;
120: #endif
121: *A = B;
122: PetscFunctionReturn(PETSC_SUCCESS);
123: }
125: /*@
126: MatCreateFromOptions - Creates a matrix whose type is set from the options database
128: Collective
130: Input Parameters:
131: + comm - MPI communicator
132: . prefix - [optional] prefix for the options database
133: . bs - the blocksize (commonly 1)
134: . m - the local number of rows (or `PETSC_DECIDE`)
135: . n - the local number of columns (or `PETSC_DECIDE` or `PETSC_DETERMINE`)
136: . M - the global number of rows (or `PETSC_DETERMINE`)
137: - N - the global number of columns (or `PETSC_DETERMINE`)
139: Output Parameter:
140: . A - the matrix
142: Options Database Key:
143: . -mat_type - see `MatType`, for example `aij`, `aijcusparse`, `baij`, `sbaij`, dense, defaults to `aij`
145: Level: beginner
147: .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`,
148: `MatCreateSeqDense()`, `MatCreateDense()`,
149: `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`,
150: `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`,
151: `MatConvert()`, `MatCreate()`
152: @*/
153: PetscErrorCode MatCreateFromOptions(MPI_Comm comm, const char *prefix, PetscInt bs, PetscInt m, PetscInt n, PetscInt M, PetscInt N, Mat *A)
154: {
155: PetscFunctionBegin;
156: PetscAssertPointer(A, 8);
157: PetscCall(MatCreate(comm, A));
158: if (prefix) PetscCall(MatSetOptionsPrefix(*A, prefix));
159: PetscCall(MatSetBlockSize(*A, bs));
160: PetscCall(MatSetSizes(*A, m, n, M, N));
161: PetscCall(MatSetFromOptions(*A));
162: PetscFunctionReturn(PETSC_SUCCESS);
163: }
165: /*@
166: MatSetErrorIfFailure - Causes `Mat` to generate an immediate error, for example a zero pivot, is detected.
168: Logically Collective
170: Input Parameters:
171: + mat - matrix obtained from `MatCreate()`
172: - flg - `PETSC_TRUE` indicates you want the error generated
174: Level: advanced
176: Note:
177: If this flag is not set then the matrix operation will note the error and continue. The error may cause a later `PC` or `KSP` error
178: or result in a `KSPConvergedReason` indicating the method did not converge.
180: .seealso: [](ch_matrices), `Mat`, `PCSetErrorIfFailure()`, `KSPConvergedReason`, `SNESConvergedReason`
181: @*/
182: PetscErrorCode MatSetErrorIfFailure(Mat mat, PetscBool flg)
183: {
184: PetscFunctionBegin;
187: mat->erroriffailure = flg;
188: PetscFunctionReturn(PETSC_SUCCESS);
189: }
191: /*@
192: MatSetSizes - Sets the local and global sizes, and checks to determine compatibility
194: Collective
196: Input Parameters:
197: + A - the matrix
198: . m - number of local rows (or `PETSC_DECIDE`)
199: . n - number of local columns (or `PETSC_DECIDE`)
200: . M - number of global rows (or `PETSC_DETERMINE`)
201: - N - number of global columns (or `PETSC_DETERMINE`)
203: Level: beginner
205: Notes:
206: `m` (`n`) and `M` (`N`) cannot be both `PETSC_DECIDE`
207: If one processor calls this with `M` (`N`) of `PETSC_DECIDE` then all processors must, otherwise the program will hang.
209: If `PETSC_DECIDE` is not used for the arguments 'm' and 'n', then the
210: user must ensure that they are chosen to be compatible with the
211: vectors. To do this, one first considers the matrix-vector product
212: 'y = A x'. The `m` that is used in the above routine must match the
213: local size used in the vector creation routine `VecCreateMPI()` for 'y'.
214: Likewise, the `n` used must match that used as the local size in
215: `VecCreateMPI()` for 'x'.
217: If `m` and `n` are not `PETSC_DECIDE`, then the values determine the `PetscLayout` of the matrix and the ranges returned by
218: `MatGetOwnershipRange()`, `MatGetOwnershipRanges()`, `MatGetOwnershipRangeColumn()`, and `MatGetOwnershipRangesColumn()`.
220: You cannot change the sizes once they have been set.
222: The sizes must be set before `MatSetUp()` or MatXXXSetPreallocation() is called.
224: .seealso: [](ch_matrices), `Mat`, `MatGetSize()`, `PetscSplitOwnership()`, `MatGetOwnershipRange()`, `MatGetOwnershipRanges()`,
225: `MatGetOwnershipRangeColumn()`, `MatGetOwnershipRangesColumn()`, `PetscLayout`, `VecSetSizes()`
226: @*/
227: PetscErrorCode MatSetSizes(Mat A, PetscInt m, PetscInt n, PetscInt M, PetscInt N)
228: {
229: PetscFunctionBegin;
233: PetscCheck(M <= 0 || m <= M, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local row size %" PetscInt_FMT " cannot be larger than global row size %" PetscInt_FMT, m, M);
234: PetscCheck(N <= 0 || n <= N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local column size %" PetscInt_FMT " cannot be larger than global column size %" PetscInt_FMT, n, N);
235: PetscCheck((A->rmap->n < 0 || A->rmap->N < 0) || (A->rmap->n == m && (M <= 0 || A->rmap->N == M)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset row sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", m, M,
236: A->rmap->n, A->rmap->N);
237: PetscCheck((A->cmap->n < 0 || A->cmap->N < 0) || (A->cmap->n == n && (N <= 0 || A->cmap->N == N)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset column sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", n, N,
238: A->cmap->n, A->cmap->N);
239: A->rmap->n = m;
240: A->cmap->n = n;
241: A->rmap->N = M > -1 ? M : A->rmap->N;
242: A->cmap->N = N > -1 ? N : A->cmap->N;
243: PetscFunctionReturn(PETSC_SUCCESS);
244: }
246: /*@
247: MatSetFromOptions - Creates a matrix where the type is determined
248: from the options database.
250: Collective
252: Input Parameter:
253: . B - the matrix
255: Options Database Keys:
256: + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()`
257: . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()`
258: . -mat_type seqdense - `MATSEQDENSE` type, uses `MatCreateSeqDense()`
259: . -mat_type mpidense - `MATMPIDENSE`, uses `MatCreateDense()`
260: . -mat_type seqbaij - `MATSEQBAIJ`, uses `MatCreateSeqBAIJ()`
261: - -mat_type mpibaij - `MATMPIBAIJ`, uses `MatCreateBAIJ()`
263: See the manpages for particular formats (e.g., `MATSEQAIJ`)
264: for additional format-specific options.
266: Level: beginner
268: Notes:
269: Generates a parallel MPI matrix if the communicator has more than one processor. The default
270: matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` and `MatCreateAIJ()` if you
271: do not select a type in the options database.
273: .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`,
274: `MatCreateSeqDense()`, `MatCreateDense()`,
275: `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`,
276: `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`,
277: `MatConvert()`
278: @*/
279: PetscErrorCode MatSetFromOptions(Mat B)
280: {
281: const char *deft = MATAIJ;
282: char type[256];
283: PetscBool flg, set;
284: PetscInt bind_below = 0;
286: PetscFunctionBegin;
289: PetscObjectOptionsBegin((PetscObject)B);
291: if (B->rmap->bs < 0) {
292: PetscInt newbs = -1;
293: PetscCall(PetscOptionsInt("-mat_block_size", "Set the blocksize used to store the matrix", "MatSetBlockSize", newbs, &newbs, &flg));
294: if (flg) {
295: PetscCall(PetscLayoutSetBlockSize(B->rmap, newbs));
296: PetscCall(PetscLayoutSetBlockSize(B->cmap, newbs));
297: }
298: }
300: PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, deft, type, 256, &flg));
301: if (flg) {
302: PetscCall(MatSetType(B, type));
303: } else if (!((PetscObject)B)->type_name) {
304: PetscCall(MatSetType(B, deft));
305: }
307: PetscCall(PetscOptionsName("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", &B->checksymmetryonassembly));
308: PetscCall(PetscOptionsReal("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", B->checksymmetrytol, &B->checksymmetrytol, NULL));
309: PetscCall(PetscOptionsBool("-mat_null_space_test", "Checks if provided null space is correct in MatAssemblyEnd()", "MatSetNullSpaceTest", B->checknullspaceonassembly, &B->checknullspaceonassembly, NULL));
310: PetscCall(PetscOptionsBool("-mat_error_if_failure", "Generate an error if an error occurs when factoring the matrix", "MatSetErrorIfFailure", B->erroriffailure, &B->erroriffailure, NULL));
312: PetscTryTypeMethod(B, setfromoptions, PetscOptionsObject);
314: flg = PETSC_FALSE;
315: PetscCall(PetscOptionsBool("-mat_new_nonzero_location_err", "Generate an error if new nonzeros are created in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set));
316: if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, flg));
317: flg = PETSC_FALSE;
318: PetscCall(PetscOptionsBool("-mat_new_nonzero_allocation_err", "Generate an error if new nonzeros are allocated in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set));
319: if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, flg));
320: flg = PETSC_FALSE;
321: PetscCall(PetscOptionsBool("-mat_ignore_zero_entries", "For AIJ/IS matrices this will stop zero values from creating a zero location in the matrix", "MatSetOption", flg, &flg, &set));
322: if (set) PetscCall(MatSetOption(B, MAT_IGNORE_ZERO_ENTRIES, flg));
324: flg = PETSC_FALSE;
325: PetscCall(PetscOptionsBool("-mat_form_explicit_transpose", "Hint to form an explicit transpose for operations like MatMultTranspose", "MatSetOption", flg, &flg, &set));
326: if (set) PetscCall(MatSetOption(B, MAT_FORM_EXPLICIT_TRANSPOSE, flg));
328: /* Bind to CPU if below a user-specified size threshold.
329: * This perhaps belongs in the options for the GPU Mat types, but MatBindToCPU() does nothing when called on non-GPU types,
330: * and putting it here makes is more maintainable than duplicating this for all. */
331: PetscCall(PetscOptionsInt("-mat_bind_below", "Set the size threshold (in local rows) below which the Mat is bound to the CPU", "MatBindToCPU", bind_below, &bind_below, &flg));
332: if (flg && B->rmap->n < bind_below) PetscCall(MatBindToCPU(B, PETSC_TRUE));
334: /* process any options handlers added with PetscObjectAddOptionsHandler() */
335: PetscCall(PetscObjectProcessOptionsHandlers((PetscObject)B, PetscOptionsObject));
336: PetscOptionsEnd();
337: PetscFunctionReturn(PETSC_SUCCESS);
338: }
340: /*@
341: MatXAIJSetPreallocation - set preallocation for serial and parallel `MATAIJ`, `MATBAIJ`, and `MATSBAIJ` matrices and their unassembled versions.
343: Collective
345: Input Parameters:
346: + A - matrix being preallocated
347: . bs - block size
348: . dnnz - number of nonzero column blocks per block row of diagonal part of parallel matrix
349: . onnz - number of nonzero column blocks per block row of off-diagonal part of parallel matrix
350: . dnnzu - number of nonzero column blocks per block row of upper-triangular part of diagonal part of parallel matrix
351: - onnzu - number of nonzero column blocks per block row of upper-triangular part of off-diagonal part of parallel matrix
353: Level: beginner
355: .seealso: [](ch_matrices), `Mat`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`,
356: `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`,
357: `PetscSplitOwnership()`
358: @*/
359: PetscErrorCode MatXAIJSetPreallocation(Mat A, PetscInt bs, const PetscInt dnnz[], const PetscInt onnz[], const PetscInt dnnzu[], const PetscInt onnzu[])
360: {
361: PetscInt cbs;
362: PetscBool aij, is, hyp;
364: PetscFunctionBegin;
365: if (bs != PETSC_DECIDE) { /* don't mess with an already set block size */
366: PetscCall(MatSetBlockSize(A, bs));
367: }
368: PetscCall(PetscLayoutSetUp(A->rmap));
369: PetscCall(PetscLayoutSetUp(A->cmap));
370: PetscCall(MatGetBlockSizes(A, &bs, &cbs));
371: /* these routines assumes bs == cbs, this should be checked somehow */
372: PetscCall(MatSeqBAIJSetPreallocation(A, bs, 0, dnnz));
373: PetscCall(MatMPIBAIJSetPreallocation(A, bs, 0, dnnz, 0, onnz));
374: PetscCall(MatSeqSBAIJSetPreallocation(A, bs, 0, dnnzu));
375: PetscCall(MatMPISBAIJSetPreallocation(A, bs, 0, dnnzu, 0, onnzu));
376: /*
377: In general, we have to do extra work to preallocate for scalar (AIJ) or unassembled (IS) matrices so we check whether it will do any
378: good before going on with it.
379: */
380: PetscCall(PetscObjectHasFunction((PetscObject)A, "MatMPIAIJSetPreallocation_C", &aij));
381: PetscCall(PetscObjectHasFunction((PetscObject)A, "MatISSetPreallocation_C", &is));
382: PetscCall(PetscObjectHasFunction((PetscObject)A, "MatHYPRESetPreallocation_C", &hyp));
383: if (!aij && !is && !hyp) PetscCall(PetscObjectHasFunction((PetscObject)A, "MatSeqAIJSetPreallocation_C", &aij));
384: if (aij || is || hyp) {
385: if (bs == cbs && bs == 1) {
386: PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz));
387: PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz, 0, onnz));
388: PetscCall(MatISSetPreallocation(A, 0, dnnz, 0, onnz));
389: #if defined(PETSC_HAVE_HYPRE)
390: PetscCall(MatHYPRESetPreallocation(A, 0, dnnz, 0, onnz));
391: #endif
392: } else { /* Convert block-row precallocation to scalar-row */
393: PetscInt i, m, *sdnnz, *sonnz;
394: PetscCall(MatGetLocalSize(A, &m, NULL));
395: PetscCall(PetscMalloc2((!!dnnz) * m, &sdnnz, (!!onnz) * m, &sonnz));
396: for (i = 0; i < m; i++) {
397: if (dnnz) sdnnz[i] = dnnz[i / bs] * cbs;
398: if (onnz) sonnz[i] = onnz[i / bs] * cbs;
399: }
400: PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL));
401: PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
402: PetscCall(MatISSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
403: #if defined(PETSC_HAVE_HYPRE)
404: PetscCall(MatHYPRESetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL));
405: #endif
406: PetscCall(PetscFree2(sdnnz, sonnz));
407: }
408: }
409: PetscFunctionReturn(PETSC_SUCCESS);
410: }
412: /*@C
413: MatHeaderMerge - Merges some information from the header of `C` to `A`; the `C` object is then destroyed
415: Collective, No Fortran Support
417: Input Parameters:
418: + A - a `Mat` being merged into
419: - C - the `Mat` providing the merge information
421: Level: developer
423: Notes:
424: `A` and `C` must be of the same type.
425: The object list and query function list in `A` are retained, as well as the object name, and prefix.
426: The object state of `A` is increased by 1.
428: Developer Note:
429: This is somewhat different from `MatHeaderReplace()`, it would be nice to merge the code
431: .seealso: `Mat`, `MatHeaderReplace()`
432: @*/
433: PetscErrorCode MatHeaderMerge(Mat A, Mat *C)
434: {
435: PetscInt refct;
436: PetscOps Abops;
437: struct _MatOps Aops;
438: char *mtype, *mname, *mprefix;
439: Mat_Product *product;
440: Mat_Redundant *redundant;
441: PetscObjectState state;
442: PetscObjectList olist;
443: PetscFunctionList qlist;
445: PetscFunctionBegin;
448: if (A == *C) PetscFunctionReturn(PETSC_SUCCESS);
449: PetscCheckSameTypeAndComm(A, 1, *C, 2);
450: /* save the parts of A we need */
451: Abops = ((PetscObject)A)->bops[0];
452: Aops = A->ops[0];
453: refct = ((PetscObject)A)->refct;
454: mtype = ((PetscObject)A)->type_name;
455: mname = ((PetscObject)A)->name;
456: state = ((PetscObject)A)->state;
457: mprefix = ((PetscObject)A)->prefix;
458: product = A->product;
459: redundant = A->redundant;
460: qlist = ((PetscObject)A)->qlist;
461: olist = ((PetscObject)A)->olist;
463: /* zero these so the destroy below does not free them */
464: ((PetscObject)A)->type_name = NULL;
465: ((PetscObject)A)->name = NULL;
466: ((PetscObject)A)->qlist = NULL;
467: ((PetscObject)A)->olist = NULL;
469: /*
470: free all the interior data structures from mat
471: cannot use PetscUseTypeMethod(A,destroy); because compiler
472: thinks it may print NULL type_name and name
473: */
474: PetscTryTypeMethod(A, destroy);
476: PetscCall(PetscFree(A->defaultvectype));
477: PetscCall(PetscFree(A->defaultrandtype));
478: PetscCall(PetscLayoutDestroy(&A->rmap));
479: PetscCall(PetscLayoutDestroy(&A->cmap));
480: PetscCall(PetscComposedQuantitiesDestroy((PetscObject)A));
482: /* copy C over to A */
483: PetscCall(PetscFree(A->factorprefix));
484: PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat)));
486: /* return the parts of A we saved */
487: ((PetscObject)A)->bops[0] = Abops;
488: A->ops[0] = Aops;
489: ((PetscObject)A)->refct = refct;
490: ((PetscObject)A)->type_name = mtype;
491: ((PetscObject)A)->name = mname;
492: ((PetscObject)A)->prefix = mprefix;
493: ((PetscObject)A)->state = state + 1;
494: A->product = product;
495: A->redundant = redundant;
497: /* Append the saved lists */
498: PetscCall(PetscFunctionListDuplicate(qlist, &((PetscObject)A)->qlist));
499: PetscCall(PetscObjectListDuplicate(olist, &((PetscObject)A)->olist));
500: PetscCall(PetscFunctionListDestroy(&qlist));
501: PetscCall(PetscObjectListDestroy(&olist));
503: /* since these two are copied into A we do not want them destroyed in C */
504: ((PetscObject)*C)->qlist = NULL;
505: ((PetscObject)*C)->olist = NULL;
506: PetscCall(PetscHeaderDestroy(C));
507: PetscFunctionReturn(PETSC_SUCCESS);
508: }
510: /*@
511: MatHeaderReplace - Replaces the internal data of matrix `A` by the internal data of matrix `C` while deleting the outer wrapper of `C`
513: Input Parameters:
514: + A - a `Mat` whose internal data is to be replaced
515: - C - the `Mat` providing new internal data for `A`
517: Level: advanced
519: Example Usage\:
520: .vb
521: Mat C;
522: MatCreateSeqAIJWithArrays(..., &C);
523: MatHeaderReplace(A, &C);
524: // C has been destroyed and A contains the matrix entries of C
525: .ve
527: Note:
528: This can be used inside a function provided to `SNESSetJacobian()`, `TSSetRHSJacobian()`, or `TSSetIJacobian()` in cases where the user code computes an entirely new sparse matrix
529: (generally with a different nonzero pattern) for each Newton update. It is usually better to reuse the matrix structure of `A` instead of constructing an entirely new one.
531: Developer Note:
532: This is somewhat different from `MatHeaderMerge()` it would be nice to merge the code
534: .seealso: `Mat`, `MatHeaderMerge()`
535: @*/
536: PetscErrorCode MatHeaderReplace(Mat A, Mat *C)
537: {
538: PetscInt refct;
539: PetscObjectState state;
540: struct _p_Mat buffer;
541: MatStencilInfo stencil;
543: PetscFunctionBegin;
546: if (A == *C) PetscFunctionReturn(PETSC_SUCCESS);
547: PetscCheckSameComm(A, 1, *C, 2);
548: PetscCheck(((PetscObject)*C)->refct == 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "Object C has refct %" PetscInt_FMT " > 1, would leave hanging reference", ((PetscObject)*C)->refct);
550: /* swap C and A */
551: refct = ((PetscObject)A)->refct;
552: state = ((PetscObject)A)->state;
553: stencil = A->stencil;
554: PetscCall(PetscMemcpy(&buffer, A, sizeof(struct _p_Mat)));
555: PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat)));
556: PetscCall(PetscMemcpy(*C, &buffer, sizeof(struct _p_Mat)));
557: ((PetscObject)A)->refct = refct;
558: ((PetscObject)A)->state = state + 1;
559: A->stencil = stencil;
561: ((PetscObject)*C)->refct = 1;
562: PetscCall(MatDestroy(C));
563: PetscFunctionReturn(PETSC_SUCCESS);
564: }
566: /*@
567: MatBindToCPU - marks a matrix to temporarily stay on the CPU and perform computations on the CPU
569: Logically Collective
571: Input Parameters:
572: + A - the matrix
573: - flg - bind to the CPU if value of `PETSC_TRUE`
575: Level: intermediate
577: .seealso: [](ch_matrices), `Mat`, `MatBoundToCPU()`
578: @*/
579: PetscErrorCode MatBindToCPU(Mat A, PetscBool flg)
580: {
581: PetscFunctionBegin;
584: #if defined(PETSC_HAVE_DEVICE)
585: if (A->boundtocpu == flg) PetscFunctionReturn(PETSC_SUCCESS);
586: A->boundtocpu = flg;
587: PetscTryTypeMethod(A, bindtocpu, flg);
588: #endif
589: PetscFunctionReturn(PETSC_SUCCESS);
590: }
592: /*@
593: MatBoundToCPU - query if a matrix is bound to the CPU
595: Input Parameter:
596: . A - the matrix
598: Output Parameter:
599: . flg - the logical flag
601: Level: intermediate
603: .seealso: [](ch_matrices), `Mat`, `MatBindToCPU()`
604: @*/
605: PetscErrorCode MatBoundToCPU(Mat A, PetscBool *flg)
606: {
607: PetscFunctionBegin;
609: PetscAssertPointer(flg, 2);
610: #if defined(PETSC_HAVE_DEVICE)
611: *flg = A->boundtocpu;
612: #else
613: *flg = PETSC_TRUE;
614: #endif
615: PetscFunctionReturn(PETSC_SUCCESS);
616: }
618: PetscErrorCode MatSetValuesCOO_Basic(Mat A, const PetscScalar coo_v[], InsertMode imode)
619: {
620: IS is_coo_i, is_coo_j;
621: const PetscInt *coo_i, *coo_j;
622: PetscInt n, n_i, n_j;
623: PetscScalar zero = 0.;
625: PetscFunctionBegin;
626: PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_i", (PetscObject *)&is_coo_i));
627: PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_j", (PetscObject *)&is_coo_j));
628: PetscCheck(is_coo_i, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_i IS");
629: PetscCheck(is_coo_j, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_j IS");
630: PetscCall(ISGetLocalSize(is_coo_i, &n_i));
631: PetscCall(ISGetLocalSize(is_coo_j, &n_j));
632: PetscCheck(n_i == n_j, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong local size %" PetscInt_FMT " != %" PetscInt_FMT, n_i, n_j);
633: PetscCall(ISGetIndices(is_coo_i, &coo_i));
634: PetscCall(ISGetIndices(is_coo_j, &coo_j));
635: if (imode != ADD_VALUES) PetscCall(MatZeroEntries(A));
636: for (n = 0; n < n_i; n++) PetscCall(MatSetValue(A, coo_i[n], coo_j[n], coo_v ? coo_v[n] : zero, ADD_VALUES));
637: PetscCall(ISRestoreIndices(is_coo_i, &coo_i));
638: PetscCall(ISRestoreIndices(is_coo_j, &coo_j));
639: PetscFunctionReturn(PETSC_SUCCESS);
640: }
642: PetscErrorCode MatSetPreallocationCOO_Basic(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[])
643: {
644: Mat preallocator;
645: IS is_coo_i, is_coo_j;
646: PetscScalar zero = 0.0;
648: PetscFunctionBegin;
649: PetscCheck(ncoo <= PETSC_INT_MAX, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo);
650: PetscCall(PetscLayoutSetUp(A->rmap));
651: PetscCall(PetscLayoutSetUp(A->cmap));
652: PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &preallocator));
653: PetscCall(MatSetType(preallocator, MATPREALLOCATOR));
654: PetscCall(MatSetSizes(preallocator, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
655: PetscCall(MatSetLayouts(preallocator, A->rmap, A->cmap));
656: PetscCall(MatSetUp(preallocator));
657: for (PetscCount n = 0; n < ncoo; n++) PetscCall(MatSetValue(preallocator, coo_i[n], coo_j[n], zero, INSERT_VALUES));
658: PetscCall(MatAssemblyBegin(preallocator, MAT_FINAL_ASSEMBLY));
659: PetscCall(MatAssemblyEnd(preallocator, MAT_FINAL_ASSEMBLY));
660: PetscCall(MatPreallocatorPreallocate(preallocator, PETSC_TRUE, A));
661: PetscCall(MatDestroy(&preallocator));
662: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, (PetscInt)ncoo, coo_i, PETSC_COPY_VALUES, &is_coo_i));
663: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, (PetscInt)ncoo, coo_j, PETSC_COPY_VALUES, &is_coo_j));
664: PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_i", (PetscObject)is_coo_i));
665: PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_j", (PetscObject)is_coo_j));
666: PetscCall(ISDestroy(&is_coo_i));
667: PetscCall(ISDestroy(&is_coo_j));
668: PetscFunctionReturn(PETSC_SUCCESS);
669: }
671: /*@C
672: MatSetPreallocationCOO - set preallocation for matrices using a coordinate format of the entries with global indices
674: Collective
676: Input Parameters:
677: + A - matrix being preallocated
678: . ncoo - number of entries
679: . coo_i - row indices
680: - coo_j - column indices
682: Level: beginner
684: Notes:
685: The indices within `coo_i` and `coo_j` may be modified within this function. The caller should not rely on them
686: having any specific value after this function returns. The arrays can be freed or reused immediately
687: after this function returns.
689: Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed
690: but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries
691: are allowed and will be properly added or inserted to the matrix, unless the matrix option `MAT_IGNORE_OFF_PROC_ENTRIES`
692: is set, in which case remote entries are ignored, or `MAT_NO_OFF_PROC_ENTRIES` is set, in which case an error will be generated.
694: If you just want to create a sequential AIJ matrix (`MATSEQAIJ`), and your matrix entries in COO format are unique, you can also use
695: `MatCreateSeqAIJFromTriple()`. But that is not recommended for iterative applications.
697: .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`,
698: `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOOLocal()`,
699: `DMSetMatrixPreallocateSkip()`, `MatCreateSeqAIJFromTriple()`
700: @*/
701: PetscErrorCode MatSetPreallocationCOO(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[])
702: {
703: PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL;
705: PetscFunctionBegin;
708: if (ncoo) PetscAssertPointer(coo_i, 3);
709: if (ncoo) PetscAssertPointer(coo_j, 4);
710: PetscCall(PetscLayoutSetUp(A->rmap));
711: PetscCall(PetscLayoutSetUp(A->cmap));
712: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOO_C", &f));
714: PetscCall(PetscLogEventBegin(MAT_PreallCOO, A, 0, 0, 0));
715: if (f) {
716: PetscCall((*f)(A, ncoo, coo_i, coo_j));
717: } else { /* allow fallback, very slow */
718: PetscCall(MatSetPreallocationCOO_Basic(A, ncoo, coo_i, coo_j));
719: }
720: PetscCall(PetscLogEventEnd(MAT_PreallCOO, A, 0, 0, 0));
721: A->preallocated = PETSC_TRUE;
722: A->nonzerostate++;
723: PetscFunctionReturn(PETSC_SUCCESS);
724: }
726: /*@C
727: MatSetPreallocationCOOLocal - set preallocation for matrices using a coordinate format of the entries with local indices
729: Collective
731: Input Parameters:
732: + A - matrix being preallocated
733: . ncoo - number of entries
734: . coo_i - row indices (local numbering; may be modified)
735: - coo_j - column indices (local numbering; may be modified)
737: Level: beginner
739: Notes:
740: The local indices are translated using the local to global mapping, thus `MatSetLocalToGlobalMapping()` must have been
741: called prior to this function. For matrices created with `DMCreateMatrix()` the local to global mapping is often already provided.
743: The indices `coo_i` and `coo_j` may be modified within this function. They might be translated to corresponding global
744: indices, but the caller should not rely on them having any specific value after this function returns. The arrays
745: can be freed or reused immediately after this function returns.
747: Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed
748: but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries
749: are allowed and will be properly added or inserted to the matrix.
751: .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`,
752: `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOO()`,
753: `DMSetMatrixPreallocateSkip()`
754: @*/
755: PetscErrorCode MatSetPreallocationCOOLocal(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[])
756: {
757: PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL;
759: PetscFunctionBegin;
762: if (ncoo) PetscAssertPointer(coo_i, 3);
763: if (ncoo) PetscAssertPointer(coo_j, 4);
764: PetscCheck(ncoo <= PETSC_INT_MAX, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo);
765: PetscCall(PetscLayoutSetUp(A->rmap));
766: PetscCall(PetscLayoutSetUp(A->cmap));
768: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOOLocal_C", &f));
769: if (f) {
770: PetscCall((*f)(A, ncoo, coo_i, coo_j));
771: A->nonzerostate++;
772: } else {
773: ISLocalToGlobalMapping ltog_row, ltog_col;
775: PetscCall(MatGetLocalToGlobalMapping(A, <og_row, <og_col));
776: if (ltog_row) PetscCall(ISLocalToGlobalMappingApply(ltog_row, (PetscInt)ncoo, coo_i, coo_i));
777: if (ltog_col) PetscCall(ISLocalToGlobalMappingApply(ltog_col, (PetscInt)ncoo, coo_j, coo_j));
778: PetscCall(MatSetPreallocationCOO(A, ncoo, coo_i, coo_j));
779: }
780: A->preallocated = PETSC_TRUE;
781: PetscFunctionReturn(PETSC_SUCCESS);
782: }
784: /*@
785: MatSetValuesCOO - set values at once in a matrix preallocated using `MatSetPreallocationCOO()`
787: Collective
789: Input Parameters:
790: + A - matrix being preallocated
791: . coo_v - the matrix values (can be `NULL`)
792: - imode - the insert mode
794: Level: beginner
796: Notes:
797: The values must follow the order of the indices prescribed with `MatSetPreallocationCOO()` or `MatSetPreallocationCOOLocal()`.
799: When repeated entries are specified in the COO indices the `coo_v` values are first properly summed, regardless of the value of imode.
800: The imode flag indicates if coo_v must be added to the current values of the matrix (`ADD_VALUES`) or overwritten (`INSERT_VALUES`).
802: `MatAssemblyBegin()` and `MatAssemblyEnd()` do not need to be called after this routine. It automatically handles the assembly process.
804: .seealso: [](ch_matrices), `Mat`, `MatSetPreallocationCOO()`, `MatSetPreallocationCOOLocal()`, `InsertMode`, `INSERT_VALUES`, `ADD_VALUES`
805: @*/
806: PetscErrorCode MatSetValuesCOO(Mat A, const PetscScalar coo_v[], InsertMode imode)
807: {
808: PetscErrorCode (*f)(Mat, const PetscScalar[], InsertMode) = NULL;
809: PetscBool oldFlg;
811: PetscFunctionBegin;
814: MatCheckPreallocated(A, 1);
816: PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetValuesCOO_C", &f));
817: PetscCall(PetscLogEventBegin(MAT_SetVCOO, A, 0, 0, 0));
818: if (f) {
819: PetscCall((*f)(A, coo_v, imode)); // all known COO implementations do not use MatStash. They do their own off-proc communication
820: PetscCall(MatGetOption(A, MAT_NO_OFF_PROC_ENTRIES, &oldFlg));
821: PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); // set A->nooffprocentries to avoid costly MatStash scatter in MatAssembly
822: } else {
823: PetscCall(MatSetValuesCOO_Basic(A, coo_v, imode)); // fall back to MatSetValues, which might use MatStash
824: }
825: PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
826: PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
827: if (f) PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, oldFlg));
828: PetscCall(PetscLogEventEnd(MAT_SetVCOO, A, 0, 0, 0));
829: PetscFunctionReturn(PETSC_SUCCESS);
830: }
832: /*@
833: MatSetBindingPropagates - Sets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects
835: Input Parameters:
836: + A - the matrix
837: - flg - flag indicating whether the boundtocpu flag should be propagated
839: Level: developer
841: Notes:
842: If the value of flg is set to true, the following will occur
843: + `MatCreateSubMatrices()` and `MatCreateRedundantMatrix()` - bind created matrices to CPU if the input matrix is bound to the CPU.
844: - `MatCreateVecs()` - bind created vectors to CPU if the input matrix is bound to the CPU.
846: The bindingpropagates flag itself is also propagated by the above routines.
848: Developer Notes:
849: If the fine-scale `DMDA` has the `-dm_bind_below` option set to true, then `DMCreateInterpolationScale()` calls `MatSetBindingPropagates()`
850: on the restriction/interpolation operator to set the bindingpropagates flag to true.
852: .seealso: [](ch_matrices), `Mat`, `VecSetBindingPropagates()`, `MatGetBindingPropagates()`
853: @*/
854: PetscErrorCode MatSetBindingPropagates(Mat A, PetscBool flg)
855: {
856: PetscFunctionBegin;
858: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
859: A->bindingpropagates = flg;
860: #endif
861: PetscFunctionReturn(PETSC_SUCCESS);
862: }
864: /*@
865: MatGetBindingPropagates - Gets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects
867: Input Parameter:
868: . A - the matrix
870: Output Parameter:
871: . flg - flag indicating whether the boundtocpu flag will be propagated
873: Level: developer
875: .seealso: [](ch_matrices), `Mat`, `MatSetBindingPropagates()`
876: @*/
877: PetscErrorCode MatGetBindingPropagates(Mat A, PetscBool *flg)
878: {
879: PetscFunctionBegin;
881: PetscAssertPointer(flg, 2);
882: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
883: *flg = A->bindingpropagates;
884: #else
885: *flg = PETSC_FALSE;
886: #endif
887: PetscFunctionReturn(PETSC_SUCCESS);
888: }