Actual source code: pinit.c
1: #define PETSC_DESIRE_FEATURE_TEST_MACROS
2: /*
3: This file defines the initialization of PETSc, including PetscInitialize()
4: */
5: #include <petsc/private/petscimpl.h>
6: #include <petsc/private/logimpl.h>
7: #include <petscviewer.h>
8: #include <petsc/private/garbagecollector.h>
10: #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
11: #include <petsc/private/valgrind/valgrind.h>
12: #endif
14: #if defined(PETSC_USE_FORTRAN_BINDINGS)
15: #include <petsc/private/ftnimpl.h>
16: #endif
18: #if PetscDefined(USE_COVERAGE)
19: EXTERN_C_BEGIN
20: #if defined(PETSC_HAVE___GCOV_DUMP)
22: #endif
23: void __gcov_flush(void);
24: EXTERN_C_END
25: #endif
27: #if defined(PETSC_SERIALIZE_FUNCTIONS)
28: PETSC_INTERN PetscFPT PetscFPTData;
29: PetscFPT PetscFPTData = 0;
30: #endif
32: #if PetscDefined(HAVE_SAWS)
33: #include <petscviewersaws.h>
34: #endif
36: PETSC_INTERN FILE *petsc_history;
38: PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
39: PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
40: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
41: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
42: PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);
44: /* user may set these BEFORE calling PetscInitialize() */
45: MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
46: #if PetscDefined(HAVE_MPI_INIT_THREAD)
47: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
48: #else
49: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
50: #endif
52: PetscMPIInt Petsc_Counter_keyval = MPI_KEYVAL_INVALID;
53: PetscMPIInt Petsc_InnerComm_keyval = MPI_KEYVAL_INVALID;
54: PetscMPIInt Petsc_OuterComm_keyval = MPI_KEYVAL_INVALID;
55: PetscMPIInt Petsc_ShmComm_keyval = MPI_KEYVAL_INVALID;
56: PetscMPIInt Petsc_CreationIdx_keyval = MPI_KEYVAL_INVALID;
57: PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;
59: PetscMPIInt Petsc_SharedWD_keyval = MPI_KEYVAL_INVALID;
60: PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;
62: /*
63: Declare and set all the string names of the PETSc enums
64: */
65: const char *const PetscBools[] = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
66: const char *const PetscBool3s[] = {"FALSE", "TRUE", "UNKNOWN", "PetscBool3", "PETSC_", NULL};
67: const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};
69: PetscBool PetscPreLoadingUsed = PETSC_FALSE;
70: PetscBool PetscPreLoadingOn = PETSC_FALSE;
72: PetscInt PetscHotRegionDepth;
74: PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;
76: #if defined(PETSC_HAVE_THREADSAFETY)
77: PetscSpinlock PetscViewerASCIISpinLockOpen;
78: PetscSpinlock PetscViewerASCIISpinLockStdout;
79: PetscSpinlock PetscViewerASCIISpinLockStderr;
80: PetscSpinlock PetscCommSpinLock;
81: #endif
83: extern PetscInt PetscNumBLASThreads;
85: /*@C
86: PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args
88: Collective, No Fortran Support
90: Input Parameters:
91: + argc - number of args
92: . args - array of command line arguments
93: . filename - optional name of the program file, pass `NULL` to ignore
94: - help - optional help, pass `NULL` to ignore
96: Level: advanced
98: Notes:
99: this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
100: indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
101: be called multiple times from Julia without the problem of trying to initialize MPI more than once.
103: Developer Notes:
104: Turns off PETSc signal handling to allow Julia to manage signals
106: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
107: */
108: PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
109: {
110: int myargc = argc;
111: char **myargs = args;
113: PetscFunctionBegin;
114: PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
115: PetscCall(PetscPopSignalHandler());
116: PetscBeganMPI = PETSC_FALSE;
117: PetscFunctionReturn(PETSC_SUCCESS);
118: }
120: /*@C
121: PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
122: the command line arguments.
124: Collective
126: Level: advanced
128: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
129: @*/
130: PetscErrorCode PetscInitializeNoArguments(void) PeNS
131: {
132: int argc = 0;
133: char **args = NULL;
135: PetscFunctionBegin;
136: PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
137: PetscFunctionReturn(PETSC_SUCCESS);
138: }
140: /*@
141: PetscInitialized - Determine whether PETSc is initialized.
143: Output Parameter:
144: . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise
146: Level: beginner
148: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
149: @*/
150: PetscErrorCode PetscInitialized(PetscBool *isInitialized)
151: {
152: PetscFunctionBegin;
153: if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
154: *isInitialized = PetscInitializeCalled;
155: PetscFunctionReturn(PETSC_SUCCESS);
156: }
158: /*@
159: PetscFinalized - Determine whether `PetscFinalize()` has been called yet
161: Output Parameter:
162: . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise
164: Level: developer
166: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
167: @*/
168: PetscErrorCode PetscFinalized(PetscBool *isFinalized)
169: {
170: PetscFunctionBegin;
171: if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
172: *isFinalized = PetscFinalizeCalled;
173: PetscFunctionReturn(PETSC_SUCCESS);
174: }
176: PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);
178: /*
179: This function is the MPI reduction operation used to compute the sum of the
180: first half of the datatype and the max of the second half.
181: */
182: MPI_Op MPIU_MAXSUM_OP = 0;
183: MPI_Op Petsc_Garbage_SetIntersectOp = 0;
185: PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
186: {
187: PetscFunctionBegin;
188: if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
189: #if defined(PETSC_USE_64BIT_INDICES)
190: struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
191: PetscMPIInt count = *cnt;
193: for (PetscMPIInt i = 0; i < count; i++) {
194: xout[i].a = PetscMax(xout[i].a, xin[i].a);
195: xout[i].b += xin[i].b;
196: }
197: #endif
198: } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
199: PetscInt *xin = (PetscInt *)in, *xout = (PetscInt *)out;
200: PetscMPIInt count = *cnt;
202: for (PetscMPIInt i = 0; i < count; i++) {
203: xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
204: xout[2 * i + 1] += xin[2 * i + 1];
205: }
206: } else {
207: PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
208: (void)ierr;
209: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
210: }
211: PetscFunctionReturnVoid();
212: }
214: /*@
215: PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.
217: Collective
219: Input Parameters:
220: + comm - the communicator
221: - array - an arry of length 2 times `size`, the number of MPI processes
223: Output Parameters:
224: + max - the maximum of `array[2*rank]` over all MPI processes
225: - sum - the sum of the `array[2*rank + 1]` over all MPI processes
227: Level: developer
229: .seealso: `PetscInitialize()`
230: @*/
231: PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
232: {
233: PetscFunctionBegin;
234: #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
235: {
236: struct {
237: PetscInt max, sum;
238: } work;
239: PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
240: *max = work.max;
241: *sum = work.sum;
242: }
243: #else
244: {
245: PetscMPIInt size, rank;
246: struct {
247: PetscInt max, sum;
248: } *work;
249: PetscCallMPI(MPI_Comm_size(comm, &size));
250: PetscCallMPI(MPI_Comm_rank(comm, &rank));
251: PetscCall(PetscMalloc1(size, &work));
252: PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
253: *max = work[rank].max;
254: *sum = work[rank].sum;
255: PetscCall(PetscFree(work));
256: }
257: #endif
258: PetscFunctionReturn(PETSC_SUCCESS);
259: }
261: #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
262: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
263: #include <quadmath.h>
264: #endif
265: MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
266: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
267: MPI_Op MPIU_SUM = 0;
268: #endif
270: PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
271: {
272: PetscMPIInt i, count = *cnt;
274: PetscFunctionBegin;
275: if (*datatype == MPIU_REAL) {
276: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
277: for (i = 0; i < count; i++) xout[i] += xin[i];
278: }
279: #if defined(PETSC_HAVE_COMPLEX)
280: else if (*datatype == MPIU_COMPLEX) {
281: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
282: for (i = 0; i < count; i++) xout[i] += xin[i];
283: }
284: #endif
285: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
286: else if (*datatype == MPIU___FLOAT128) {
287: __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
288: for (i = 0; i < count; i++) xout[i] += xin[i];
289: #if defined(PETSC_HAVE_COMPLEX)
290: } else if (*datatype == MPIU___COMPLEX128) {
291: __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
292: for (i = 0; i < count; i++) xout[i] += xin[i];
293: #endif
294: }
295: #endif
296: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
297: else if (*datatype == MPIU___FP16) {
298: __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
299: for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
300: }
301: #endif
302: else {
303: #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
304: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
305: #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
306: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
307: #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
308: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
309: #else
310: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
311: #endif
312: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
313: }
314: PetscFunctionReturnVoid();
315: }
316: #endif
318: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
319: MPI_Op MPIU_MAX = 0;
320: MPI_Op MPIU_MIN = 0;
322: PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
323: {
324: PetscInt i, count = *cnt;
326: PetscFunctionBegin;
327: if (*datatype == MPIU_REAL) {
328: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
329: for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
330: }
331: #if defined(PETSC_HAVE_COMPLEX)
332: else if (*datatype == MPIU_COMPLEX) {
333: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
334: for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
335: }
336: #endif
337: else {
338: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
339: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
340: }
341: PetscFunctionReturnVoid();
342: }
344: PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
345: {
346: PetscInt i, count = *cnt;
348: PetscFunctionBegin;
349: if (*datatype == MPIU_REAL) {
350: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
351: for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
352: }
353: #if defined(PETSC_HAVE_COMPLEX)
354: else if (*datatype == MPIU_COMPLEX) {
355: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
356: for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
357: }
358: #endif
359: else {
360: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
361: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
362: }
363: PetscFunctionReturnVoid();
364: }
365: #endif
367: /*
368: Private routine to delete internal tag/name counter storage when a communicator is freed.
370: This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this data as an attribute is freed.
372: Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
374: */
375: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
376: {
377: PetscCommCounter *counter = (PetscCommCounter *)count_val;
378: struct PetscCommStash *comms = counter->comms, *pcomm;
380: PetscFunctionBegin;
381: PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
382: PetscCallReturnMPI(PetscFree(counter->iflags));
383: while (comms) {
384: PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
385: pcomm = comms;
386: comms = comms->next;
387: PetscCallReturnMPI(PetscFree(pcomm));
388: }
389: PetscCallReturnMPI(PetscFree(counter));
390: PetscFunctionReturn(MPI_SUCCESS);
391: }
393: /*
394: This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
395: calls MPI_Comm_free().
397: This is the only entry point for breaking the links between inner and outer comms.
399: This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
401: Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
403: */
404: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
405: {
406: union
407: {
408: MPI_Comm comm;
409: void *ptr;
410: } icomm;
412: PetscFunctionBegin;
413: PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
414: icomm.ptr = attr_val;
415: if (PetscDefined(USE_DEBUG)) {
416: /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
417: PetscMPIInt flg;
418: union
419: {
420: MPI_Comm comm;
421: void *ptr;
422: } ocomm;
423: PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
424: PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
425: PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
426: }
427: PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
428: PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
429: PetscFunctionReturn(MPI_SUCCESS);
430: }
432: /*
433: * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr(). It should not be reached any other way.
434: */
435: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
436: {
437: PetscFunctionBegin;
438: PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
439: PetscFunctionReturn(MPI_SUCCESS);
440: }
442: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);
444: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
445: PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
446: PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447: PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
448: #endif
450: PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;
452: PETSC_INTERN int PetscGlobalArgc;
453: PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
454: int PetscGlobalArgc = 0;
455: char **PetscGlobalArgs = NULL;
456: char **PetscGlobalArgsFortran = NULL;
457: PetscSegBuffer PetscCitationsList;
459: PetscErrorCode PetscCitationsInitialize(void)
460: {
461: PetscFunctionBegin;
462: PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));
464: PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
465: Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
466: and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
467: and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
468: and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
469: and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
470: and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\
471: and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
472: Title = {{PETSc/TAO} Users Manual},\n\
473: Number = {ANL-21/39 - Revision 3.24},\n\
474: Doi = {10.2172/2998643},\n\
475: Institution = {Argonne National Laboratory},\n\
476: Year = {2025}\n}\n",
477: NULL));
479: PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
480: Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
481: Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
482: Booktitle = {Modern Software Tools in Scientific Computing},\n\
483: Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
484: Pages = {163--202},\n\
485: Publisher = {Birkh{\\\"{a}}user Press},\n\
486: Year = {1997}\n}\n",
487: NULL));
488: PetscFunctionReturn(PETSC_SUCCESS);
489: }
491: static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */
493: PetscErrorCode PetscSetProgramName(const char name[])
494: {
495: PetscFunctionBegin;
496: PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
497: PetscFunctionReturn(PETSC_SUCCESS);
498: }
500: /*@C
501: PetscGetProgramName - Gets the name of the running program.
503: Not Collective
505: Input Parameter:
506: . len - length of the string name
508: Output Parameter:
509: . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`
511: Level: advanced
513: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
514: @*/
515: PetscErrorCode PetscGetProgramName(char name[], size_t len)
516: {
517: PetscFunctionBegin;
518: PetscCall(PetscStrncpy(name, programname, len));
519: PetscFunctionReturn(PETSC_SUCCESS);
520: }
522: /*@C
523: PetscGetArgs - Allows you to access the raw command line arguments anywhere
524: after `PetscInitialize()` is called but before `PetscFinalize()`.
526: Not Collective, No Fortran Support
528: Output Parameters:
529: + argc - count of the number of command line arguments
530: - args - the command line arguments
532: Level: intermediate
534: Notes:
535: This is usually used to pass the command line arguments into other libraries
536: that are called internally deep in PETSc or the application.
538: The first argument contains the program name as is normal for C programs.
540: See `PetscGetArguments()` for a variant of this routine.
542: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
543: @*/
544: PetscErrorCode PetscGetArgs(int *argc, char ***args)
545: {
546: PetscFunctionBegin;
547: PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
548: *argc = PetscGlobalArgc;
549: *args = PetscGlobalArgs;
550: PetscFunctionReturn(PETSC_SUCCESS);
551: }
553: /*@C
554: PetscGetArguments - Allows you to access the command line arguments anywhere
555: after `PetscInitialize()` is called but before `PetscFinalize()`.
557: Not Collective, No Fortran Support
559: Output Parameter:
560: . args - the command line arguments
562: Level: intermediate
564: Note:
565: This does NOT start with the program name and IS `NULL` terminated (the final argument is void)
567: Use `PetscFreeArguments()` to return the memory used by the arguments.
569: This makes a copy of the arguments and the array of arguments, while `PetscGetArgs()` does not make a copy,
570: it returns the array of arguments that was passed into the main program.
572: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
573: @*/
574: PetscErrorCode PetscGetArguments(char ***args)
575: {
576: PetscInt i, argc = PetscGlobalArgc;
578: PetscFunctionBegin;
579: PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
580: if (!argc) {
581: *args = NULL;
582: PetscFunctionReturn(PETSC_SUCCESS);
583: }
584: PetscCall(PetscMalloc1(argc, args));
585: for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
586: (*args)[argc - 1] = NULL;
587: PetscFunctionReturn(PETSC_SUCCESS);
588: }
590: /*@C
591: PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`
593: Not Collective, No Fortran Support
595: Output Parameter:
596: . args - the command line arguments
598: Level: intermediate
600: Developer Note:
601: This should be PetscRestoreArguments()
603: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
604: @*/
605: PetscErrorCode PetscFreeArguments(char **args)
606: {
607: PetscFunctionBegin;
608: if (args) {
609: PetscInt i = 0;
611: while (args[i]) PetscCall(PetscFree(args[i++]));
612: PetscCall(PetscFree(args));
613: }
614: PetscFunctionReturn(PETSC_SUCCESS);
615: }
617: #if PetscDefined(HAVE_SAWS)
618: #include <petscconfiginfo.h>
620: PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
621: {
622: PetscFunctionBegin;
623: if (!PetscGlobalRank) {
624: char cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
625: int port;
626: PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
627: size_t applinelen, introlen;
628: char sawsurl[256];
630: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
631: if (flg) {
632: char sawslog[PETSC_MAX_PATH_LEN];
634: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
635: if (sawslog[0]) PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
636: else PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
637: }
638: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
639: if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
640: PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
641: if (selectport) {
642: PetscCallSAWs(SAWs_Get_Available_Port, (&port));
643: PetscCallSAWs(SAWs_Set_Port, (port));
644: } else {
645: PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
646: if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
647: }
648: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
649: if (flg) {
650: PetscCallSAWs(SAWs_Set_Document_Root, (root));
651: PetscCall(PetscStrcmp(root, ".", &rootlocal));
652: } else {
653: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
654: if (flg) {
655: PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
656: PetscCallSAWs(SAWs_Set_Document_Root, (root));
657: }
658: }
659: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
660: if (flg2) {
661: char jsdir[PETSC_MAX_PATH_LEN];
662: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
663: PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
664: PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
665: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
666: PetscCallSAWs(SAWs_Push_Local_Header, ());
667: }
668: PetscCall(PetscGetProgramName(programname, sizeof(programname)));
669: PetscCall(PetscStrlen(help, &applinelen));
670: introlen = 4096 + applinelen;
671: applinelen += 1024;
672: PetscCall(PetscMalloc(applinelen, &appline));
673: PetscCall(PetscMalloc(introlen, &intro));
675: if (rootlocal) {
676: PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
677: PetscCall(PetscTestFile(appline, 'r', &rootlocal));
678: }
679: PetscCall(PetscOptionsGetAll(NULL, &options));
680: if (rootlocal && help) {
681: PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
682: } else if (help) {
683: PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
684: } else {
685: PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
686: }
687: PetscCall(PetscFree(options));
688: PetscCall(PetscGetVersion(version, sizeof(version)));
689: PetscCall(PetscSNPrintf(intro, introlen,
690: "<body>\n"
691: "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
692: "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
693: "%s",
694: version, petscconfigureoptions, appline));
695: PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
696: PetscCall(PetscFree(intro));
697: PetscCall(PetscFree(appline));
698: if (selectport) {
699: PetscBool silent;
701: /* another process may have grabbed the port so keep trying */
702: while (SAWs_Initialize()) {
703: PetscCallSAWs(SAWs_Get_Available_Port, (&port));
704: PetscCallSAWs(SAWs_Set_Port, (port));
705: }
707: PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
708: if (!silent) {
709: PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
710: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
711: }
712: } else {
713: PetscCallSAWs(SAWs_Initialize, ());
714: }
715: PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
716: " Author = {Matt Otten and Jed Brown and Barry Smith},\n"
717: " Title = {Scientific Application Web Server (SAWs) Users Manual},\n"
718: " Institution = {Argonne National Laboratory},\n"
719: " Year = 2013\n}\n",
720: NULL));
721: }
722: PetscFunctionReturn(PETSC_SUCCESS);
723: }
724: #endif
726: /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
727: PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
728: {
729: PetscFunctionBegin;
730: #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
731: /* see MPI.py for details on this bug */
732: (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
733: #endif
734: PetscFunctionReturn(PETSC_SUCCESS);
735: }
737: #if PetscDefined(HAVE_ADIOS)
738: #include <adios.h>
739: #include <adios_read.h>
740: int64_t Petsc_adios_group;
741: #endif
742: #if PetscDefined(HAVE_OPENMP)
743: #include <omp.h>
744: PetscInt PetscNumOMPThreads;
745: #endif
747: #include <petsc/private/deviceimpl.h>
748: #if PetscDefined(HAVE_CUDA)
749: #include <petscdevice_cuda.h>
750: // REMOVE ME
751: cudaStream_t PetscDefaultCudaStream = NULL;
752: #endif
753: #if PetscDefined(HAVE_HIP)
754: #include <petscdevice_hip.h>
755: // REMOVE ME
756: hipStream_t PetscDefaultHipStream = NULL;
757: #endif
759: #if PetscDefined(HAVE_DLFCN_H)
760: #include <dlfcn.h>
761: #endif
762: PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
763: #if PetscDefined(HAVE_VIENNACL)
764: PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
765: PetscBool PetscViennaCLSynchronize = PETSC_FALSE;
766: #endif
768: PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;
770: /*
771: PetscInitialize_Common - shared code between C and Fortran initialization
773: prog: program name
774: file: optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
775: help: program help message
776: ftn: is it called from Fortran initialization (petscinitializef_)?
777: len: length of file string, used when Fortran is true
778: */
779: PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscInt len)
780: {
781: PetscMPIInt size;
782: PetscBool flg = PETSC_TRUE;
783: char hostname[256];
784: PetscBool blas_view_flag = PETSC_FALSE;
786: PetscFunctionBegin;
787: if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
788: /* these must be initialized in a routine, not as a constant declaration */
789: PETSC_STDOUT = stdout;
790: PETSC_STDERR = stderr;
792: /* PetscCall can be used from now */
793: PetscErrorHandlingInitialized = PETSC_TRUE;
795: /*
796: The checking over compatible runtime libraries is complicated by the MPI ABI initiative
797: https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
798: MPICH v3.1 (Released February 2014)
799: IBM MPI v2.1 (December 2014)
800: Intel MPI Library v5.0 (2014)
801: Cray MPT v7.0.0 (June 2014)
802: As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
803: listed above and since that time are compatible.
805: Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
806: at compile time or runtime. Thus we will need to systematically track the allowed versions
807: and how they are represented in the mpi.h and MPI_Get_library_version() output in order
808: to perform the checking.
810: Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).
812: Questions:
814: Should the checks for ABI incompatibility be only on the major version number below?
815: Presumably the output to stderr will be removed before a release.
816: */
818: #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
819: {
820: char mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
821: PetscMPIInt mpilibraryversionlength;
823: PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
824: /* check for MPICH versions before MPI ABI initiative */
825: #if defined(MPICH_VERSION)
826: #if MPICH_NUMVERSION < 30100000
827: {
828: char *ver, *lf;
829: PetscBool flg = PETSC_FALSE;
831: PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
832: if (ver) {
833: PetscCall(PetscStrchr(ver, '\n', &lf));
834: if (lf) {
835: *lf = 0;
836: PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
837: }
838: }
839: if (!flg) {
840: PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
841: flg = PETSC_TRUE;
842: }
843: }
844: #endif
845: /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
846: #elif defined(PETSC_HAVE_OPENMPI)
847: {
848: char *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
849: PetscBool flg = PETSC_FALSE;
850: #define PSTRSZ 2
851: char ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
852: char ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
853: int i;
854: for (i = 0; i < PSTRSZ; i++) {
855: PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
856: if (ver) {
857: PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
858: PetscCall(PetscStrstr(ver, bs, &bsf));
859: if (bsf) flg = PETSC_TRUE;
860: break;
861: }
862: }
863: if (!flg) {
864: PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
865: flg = PETSC_TRUE;
866: }
867: }
868: #endif
869: }
870: #endif
872: #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
873: /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
874: PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
875: #endif
877: PetscCall(PetscOptionsCreateDefault());
879: PetscFinalizeCalled = PETSC_FALSE;
881: PetscCall(PetscSetProgramName(prog));
882: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
883: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
884: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
885: PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));
887: if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
888: PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));
890: if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
891: PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
892: PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
893: }
895: /* Done after init due to a bug in MPICH-GM? */
896: PetscCall(PetscErrorPrintfInitialize());
898: PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
899: PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));
901: MPIU_ENUM = MPI_INT;
902: MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
903: if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
904: else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
905: #if defined(PETSC_SIZEOF_LONG_LONG)
906: else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
907: #endif
908: else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");
910: /*
911: Initialized the global complex variable; this is because with
912: shared libraries the constructors for global variables
913: are not called; at least on IRIX.
914: */
915: #if defined(PETSC_HAVE_COMPLEX)
916: {
917: #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
918: PetscComplex ic(0.0, 1.0);
919: PETSC_i = ic;
920: #else
921: PETSC_i = _Complex_I;
922: #endif
923: }
924: #endif /* PETSC_HAVE_COMPLEX */
926: /*
927: Create the PETSc MPI reduction operator that sums of the first
928: half of the entries and maxes the second half.
929: */
930: PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));
932: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
933: PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
934: PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
935: #if defined(PETSC_HAVE_COMPLEX)
936: PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
937: PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
938: #endif
939: #endif
940: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
941: PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
942: PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
943: #endif
945: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
946: PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
947: PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
948: PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
949: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
950: PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
951: #endif
953: PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
954: PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
955: PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));
957: /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
958: #if !defined(PETSC_HAVE_MPIUNI)
959: {
960: PetscMPIInt blockSizes[2] = {1, 1};
961: MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
962: MPI_Datatype blockTypes[2] = {MPIU_REAL, MPIU_INT}, tmpStruct;
964: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
965: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
966: PetscCallMPI(MPI_Type_free(&tmpStruct));
967: PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
968: }
969: {
970: PetscMPIInt blockSizes[2] = {1, 1};
971: MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
972: MPI_Datatype blockTypes[2] = {MPIU_SCALAR, MPIU_INT}, tmpStruct;
974: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
975: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
976: PetscCallMPI(MPI_Type_free(&tmpStruct));
977: PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
978: }
979: #endif
981: #if defined(PETSC_USE_64BIT_INDICES)
982: PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
983: PetscCallMPI(MPI_Type_commit(&MPIU_2INT));
985: #if !defined(PETSC_HAVE_MPIUNI)
986: {
987: int blockSizes[] = {1, 1};
988: MPI_Aint blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
989: MPI_Datatype blockTypes[] = {MPIU_INT, MPI_INT}, tmpStruct;
991: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
992: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
993: PetscCallMPI(MPI_Type_free(&tmpStruct));
994: PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
995: }
996: #endif
997: #endif
998: PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
999: PetscCallMPI(MPI_Type_commit(&MPI_4INT));
1000: PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
1001: PetscCallMPI(MPI_Type_commit(&MPIU_4INT));
1003: /*
1004: Attributes to be set on PETSc communicators
1005: */
1006: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1007: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1008: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1009: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1010: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1011: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));
1013: #if defined(PETSC_USE_FORTRAN_BINDINGS)
1014: if (ftn) PetscCall(PetscInitFortran_Private(file, len));
1015: else
1016: #endif
1017: PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));
1019: if (PetscDefined(HAVE_MPIUNI)) {
1020: const char *mpienv = getenv("PMI_SIZE");
1021: if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1022: if (mpienv) {
1023: PetscInt isize;
1024: PetscBool mflag = PETSC_FALSE;
1026: PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1027: PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL));
1028: PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs.");
1029: }
1030: }
1032: /* call a second time so it can look in the options database */
1033: PetscCall(PetscErrorPrintfInitialize());
1035: /*
1036: Check system options and print help
1037: */
1038: PetscCall(PetscOptionsCheckInitial_Private(help));
1040: /*
1041: Creates the logging data structures; this is enabled even if logging is not turned on
1042: This is the last thing we do before returning to the user code to prevent having the
1043: logging numbers contaminated by any startup time associated with MPI
1044: */
1045: PetscCall(PetscLogInitialize());
1047: /*
1048: Initialize PetscDevice and PetscDeviceContext
1050: Note to any future devs thinking of moving this, proper initialization requires:
1051: 1. MPI initialized
1052: 2. Options DB initialized
1053: 3. PETSc error handling initialized, specifically signal handlers. This expects to set up
1054: its own SIGSEV handler via the push/pop interface.
1055: 4. Logging initialized
1056: */
1057: PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));
1059: #if PetscDefined(HAVE_VIENNACL)
1060: flg = PETSC_FALSE;
1061: PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1062: if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1063: PetscViennaCLSynchronize = flg;
1064: PetscCall(PetscViennaCLInit());
1065: #endif
1067: PetscCall(PetscCitationsInitialize());
1069: #if defined(PETSC_HAVE_SAWS)
1070: PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1071: flg = PETSC_FALSE;
1072: PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1073: if (flg) PetscCall(PetscStackViewSAWs());
1074: #endif
1076: /*
1077: Load the dynamic libraries (on machines that support them), this registers all
1078: the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1079: */
1080: PetscCall(PetscInitialize_DynamicLibraries());
1082: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1083: PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1084: PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1085: PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1086: #if defined(PETSC_HAVE_OPENMP)
1087: {
1088: PetscBool omp_view_flag;
1089: char *threads = getenv("OMP_NUM_THREADS");
1091: if (threads) {
1092: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1093: (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1094: } else {
1095: PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1096: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1097: }
1098: PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1099: PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1100: PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1101: PetscOptionsEnd();
1102: if (flg) {
1103: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1104: omp_set_num_threads((int)PetscNumOMPThreads);
1105: }
1106: if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1107: }
1108: #endif
1110: PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1111: PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1112: #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1113: {
1114: char *threads = NULL;
1116: /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1117: #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1118: threads = getenv("BLIS_NUM_THREADS");
1119: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1120: if (!threads) {
1121: threads = getenv("OMP_NUM_THREADS");
1122: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1123: }
1124: #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1125: threads = getenv("MKL_NUM_THREADS");
1126: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1127: if (!threads) {
1128: threads = getenv("OMP_NUM_THREADS");
1129: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1130: }
1131: #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1132: threads = getenv("OPENBLAS_NUM_THREADS");
1133: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1134: if (!threads) {
1135: threads = getenv("OMP_NUM_THREADS");
1136: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1137: }
1138: #endif
1139: if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1140: PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1141: if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1142: if (flg || threads) {
1143: PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1144: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1145: }
1146: }
1147: #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1148: PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1149: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1150: #else
1151: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1152: #endif
1153: PetscOptionsEnd();
1155: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1156: /*
1157: Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI
1159: Currently not used because it is not supported by MPICH.
1160: */
1161: if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1162: #endif
1164: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1165: PetscCall(PetscFPTCreate(10000));
1166: #endif
1168: #if defined(PETSC_HAVE_HWLOC)
1169: {
1170: PetscViewer viewer;
1171: PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1172: if (flg) {
1173: PetscCall(PetscProcessPlacementView(viewer));
1174: PetscCall(PetscViewerDestroy(&viewer));
1175: }
1176: }
1177: #endif
1179: flg = PETSC_TRUE;
1180: PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1181: if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));
1183: #if defined(PETSC_HAVE_ADIOS)
1184: PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1185: PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1186: PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1187: PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1188: #endif
1190: #if defined(__VALGRIND_H)
1191: PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1192: #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1193: if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1194: #endif
1195: #endif
1196: /*
1197: Set flag that we are completely initialized
1198: */
1199: PetscInitializeCalled = PETSC_TRUE;
1201: PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1202: if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));
1204: PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1205: if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1206: if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1207: else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1208: PetscFunctionReturn(PETSC_SUCCESS);
1209: }
1211: // "Unknown section 'Environmental Variables'"
1212: // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1213: /*@C
1214: PetscInitialize - Initializes the PETSc database and MPI.
1215: `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1216: so this routine should always be called near the beginning of
1217: your program -- usually the very first line!
1219: Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set
1221: Input Parameters:
1222: + argc - count of number of command line arguments
1223: . args - the command line arguments
1224: . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1225: Use `NULL` or empty string to not check for code specific file.
1226: Also checks `~/.petscrc`, `.petscrc` and `petscrc`.
1227: Use `-skip_petscrc` in the code specific file (or command line) to skip `~/.petscrc`, `.petscrc` and `petscrc` files.
1228: - help - [optional] Help message to print, use `NULL` for no message
1230: If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1231: communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`.
1232: then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1233: if different subcommunicators of the job are doing different things with PETSc.
1235: Options Database Keys:
1236: + -help [intro] - prints help method for each option; if `intro` is given the program stops after printing the introductory help message
1237: . -start_in_debugger [(noxterm)],[(gdb|lldb|...)] - Starts program in debugger
1238: . -on_error_attach_debugger [(noxterm)],[(gdb|lldb|...)] - Starts debugger when error detected
1239: . -on_error_emacs machinename - causes `emacsclient` to jump to error file if an error is detected
1240: . -on_error_abort - calls `abort()` when error detected (no traceback)
1241: . -on_error_mpiabort - calls `MPI_abort()` when error detected
1242: . -error_output_stdout - prints PETSc error messages to `stdout` instead of the default `stderr`
1243: . -error_output_none - does not print the error messages (but handles errors in the same way as if this was not called)
1244: . -debugger_ranks rank1,rank2,... - Indicates MPI ranks to start in debugger
1245: . -debugger_pause secs - Pauses debugger, use if it takes a long time for the debugger to start up on your system, `sleeptime` is number of seconds to sleep
1246: . -stop_for_debugger - Print message on how to attach debugger manually to
1247: process and wait (`-debugger_pause`) seconds for attachment
1248: . -malloc_dump - prints a list of all unfreed memory at the end of the run
1249: . -malloc_test - like `-malloc_dump` `-malloc_debug`, only active for debugging build, ignored in optimized build. Often set in `PETSC_OPTIONS` environmental variable
1250: . -malloc_view [filename] - show a list of all allocated memory during `PetscFinalize()`
1251: . -malloc_view_threshold t - only list memory allocations of size greater than t with `-malloc_view`
1252: . -malloc_requested_size - malloc logging will record the requested size rather than (possibly large) size after alignment
1253: . -fp_trap - Stops on floating point exceptions
1254: . -no_signal_handler - Indicates not to trap error signals
1255: . -python exe - Initializes Python, and optionally takes a Python executable name
1256: - -mpiuni-allow-multiprocess-launch - allow `mpiexec` to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni
1258: Options Database Keys for Option Database:
1259: + -skip_petscrc - skip the default option files `~/.petscrc`, `.petscrc`, `petscrc`
1260: . -options_monitor - monitor all set options to standard output for the whole program run
1261: - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`
1263: Options -options_monitor_{all,cancel} are
1264: position-independent and apply to all options set since the PETSc start.
1265: They can be used also in option files.
1267: See `PetscOptionsMonitorSet()` to do monitoring programmatically.
1269: Options Database Keys for Profiling:
1270: See Users-Manual: ch_profiling for details.
1271: + -info [filename][:[~]c1,c2,...[:[~]self]] - Prints verbose information for classes c1, c2, etc. See `PetscInfo()`.
1272: . -log_sync - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1273: however it slows things down and gives a distorted view of the overall runtime.
1274: . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program
1275: hangs without running in the debugger). See `PetscLogTraceBegin()`.
1276: . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1277: . -log_view_memory - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1278: . -log_view_gpu_time - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1279: . -log_view_gpu_energy - Includes in the summary from -log_view the energy (estimated with power*gtime) consumed in each GPU kernel, see `PetscLogView()`.
1280: . -log_view_gpu_energy_meter - Includes in the summary from -log_view the energy (readings from meters) consumed in each GPU kernel, see `PetscLogView()`.
1281: . -log_exclude: c1,c2,... - excludes subset of object classes from logging, for example vec,ksp would exclude the `Vec` and `KSP` classes
1282: . -log [filename] - Logs profiling information in a dump file, see `PetscLogDump()`.
1283: . -log_all [filename] - Same as `-log`.
1284: . -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1285: . -log_perfstubs - Starts a log handler with the perfstubs interface (which is used by TAU)
1286: . -log_nvtx - Starts an nvtx log handler for use with Nsight
1287: . -log_roctx - Starts an roctx log handler for use with rocprof on AMD GPUs
1288: . -viewfromoptions on,off - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1289: . -get_total_flops - Returns total flops done by all processors
1290: . -memory_view - Print memory usage at end of run
1291: - -check_pointer_intensity 0,1,2 - if pointers are checked for validity (debug version only), using 0 will result in faster code
1293: Options Database Keys for SAWs:
1294: + -saws_port portnumber - port number to publish SAWs data, default is 8080
1295: . -saws_port_auto_select - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1296: this is useful when you are running many jobs that utilize SAWs at the same time
1297: . -saws_log filename - save a log of all SAWs communication
1298: . -saws_https certificate_file - have SAWs use HTTPS instead of HTTP
1299: - -saws_root directory - allow SAWs to have access to the given directory to search for requested resources and files
1301: Environmental Variables:
1302: + `PETSC_TMP` - alternative directory to use instead of `/tmp`
1303: . `PETSC_SHARED_TMP` - `/tmp` is shared by all processes
1304: . `PETSC_NOT_SHARED_TMP` - each process has its own private `/tmp`
1305: . `PETSC_OPTIONS` - a string containing additional options for PETSc in the form of command line "-key value" pairs
1306: . `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml with `--download-yaml`) a string containing additional options for PETSc in the form of a YAML document
1307: . `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer
1308: - `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to
1310: Level: beginner
1312: Note:
1313: If for some reason you must call `MPI_Init()` separately from `PetscInitialize()`, call
1314: it before `PetscInitialize()`.
1316: Fortran Notes:
1317: In Fortran this routine can be called with
1318: .vb
1319: call PetscInitialize(ierr)
1320: call PetscInitialize(file,ierr) or
1321: call PetscInitialize(file,help,ierr)
1322: .ve
1324: If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1325: calling `PetscInitialize()`.
1327: Options Database Key for Developers:
1328: . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1329: "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)
1331: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1332: @*/
1333: PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1334: {
1335: PetscMPIInt flag;
1336: const char *prog = "Unknown Name";
1338: PetscFunctionBegin;
1339: if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1340: PetscCallMPI(MPI_Initialized(&flag));
1341: if (!flag) {
1342: PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1343: PetscCall(PetscPreMPIInit_Private());
1344: #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1345: {
1346: PetscMPIInt provided;
1347: PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1348: PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1349: if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1350: }
1351: #else
1352: PetscCallMPI(MPI_Init(argc, args));
1353: #endif
1354: PetscBeganMPI = PETSC_TRUE;
1355: }
1357: if (argc && *argc) prog = **args;
1358: if (argc && args) {
1359: PetscGlobalArgc = *argc;
1360: PetscGlobalArgs = *args;
1361: }
1362: PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, 0));
1363: PetscFunctionReturn(PETSC_SUCCESS);
1364: }
1366: PETSC_INTERN PetscObject *PetscObjects;
1367: PETSC_INTERN PetscInt PetscObjectsCounts;
1368: PETSC_INTERN PetscInt PetscObjectsMaxCounts;
1369: PETSC_INTERN PetscBool PetscObjectsLog;
1371: /*
1372: Frees all the MPI types and operations that PETSc may have created
1373: */
1374: PetscErrorCode PetscFreeMPIResources(void)
1375: {
1376: PetscFunctionBegin;
1377: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1378: PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1379: #if defined(PETSC_HAVE_COMPLEX)
1380: PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1381: #endif
1382: #endif
1383: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1384: PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1385: #endif
1387: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1388: PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1389: PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1390: PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1391: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1392: PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1393: #endif
1395: PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1396: PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1397: PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1398: #if defined(PETSC_USE_64BIT_INDICES)
1399: PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1400: PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1401: #endif
1402: PetscCallMPI(MPI_Type_free(&MPI_4INT));
1403: PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1404: PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1405: PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1406: PetscFunctionReturn(PETSC_SUCCESS);
1407: }
1409: PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1410: PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);
1412: /*@
1413: PetscFinalize - Checks for options to be called at the conclusion of a PETSc program and frees any remaining PETSc objects and data structures.
1414: of the program. Automatically calls `MPI_Finalize()` if the user had not called `MPI_Init()` before calling `PetscInitialize()`.
1416: Collective on `PETSC_COMM_WORLD`
1418: Options Database Keys:
1419: + -options_view - Calls `PetscOptionsView()` to display all options in the database
1420: . -options_left - Prints unused options that remain in the database (default value is `true`)
1421: . -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1422: . -mpidump - Calls PetscMPIDump()
1423: . -malloc_dump [filename] - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1424: . -memory_view - Prints total memory usage
1425: - -malloc_view [filename] - Prints list of all memory allocated and in what functions
1427: Level: beginner
1429: Note:
1430: See `PetscInitialize()` for other runtime options.
1432: You can call `PetscInitialize()` after `PetscFinalize()` but only with MPI-Uni or if you called `MPI_Init()` before ever calling `PetscInitialize()`.
1434: .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1435: @*/
1436: PetscErrorCode PetscFinalize(void)
1437: {
1438: PetscMPIInt rank;
1439: PetscInt nopt;
1440: PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1441: PetscBool flg;
1442: char mname[PETSC_MAX_PATH_LEN];
1444: PetscFunctionBegin;
1445: PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1446: PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));
1448: PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1449: if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());
1451: PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1452: PetscGlobalArgc = 0;
1453: PetscGlobalArgs = NULL;
1455: /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1456: {
1457: union
1458: {
1459: MPI_Comm comm;
1460: void *ptr;
1461: } ucomm;
1462: PetscMPIInt flg;
1463: void *tmp;
1465: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1466: if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1467: if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1468: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1469: if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1470: if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1471: }
1473: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1474: #if defined(PETSC_HAVE_ADIOS)
1475: PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1476: PetscCallExternal(adios_finalize, rank);
1477: #endif
1478: PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1479: if (flg) {
1480: char *cits, filename[PETSC_MAX_PATH_LEN];
1481: FILE *fd = PETSC_STDOUT;
1483: PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1484: if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1485: PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1486: cits[0] = 0;
1487: PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1488: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1489: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1490: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1491: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1492: PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1493: PetscCall(PetscFree(cits));
1494: }
1495: PetscCall(PetscSegBufferDestroy(&PetscCitationsList));
1497: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1498: PetscCall(PetscFPTDestroy());
1499: #endif
1501: #if defined(PETSC_HAVE_X)
1502: flg1 = PETSC_FALSE;
1503: PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1504: if (flg1) {
1505: /* this is a crude hack, but better than nothing */
1506: PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1507: }
1508: #endif
1510: #if !defined(PETSC_HAVE_THREADSAFETY)
1511: PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1512: if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1513: #endif
1515: if (PetscDefined(USE_LOG)) {
1516: flg1 = PETSC_FALSE;
1517: PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1518: if (flg1) {
1519: PetscLogDouble flops = 0;
1520: PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1521: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1522: }
1523: }
1525: if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1526: mname[0] = 0;
1527: PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1528: if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1529: }
1531: #if defined(PETSC_HAVE_KOKKOS)
1532: // Free PETSc/kokkos stuff before the potentially non-null PETSc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1533: if (PetscKokkosInitialized) {
1534: PetscCall(PetscKokkosFinalize_Private());
1535: PetscKokkosInitialized = PETSC_FALSE;
1536: }
1537: #endif
1539: // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1540: PetscCall(PetscObjectRegisterDestroyAll());
1542: if (PetscDefined(USE_LOG)) {
1543: PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1544: PetscCall(PetscLogViewFromOptions());
1545: PetscCall(PetscOptionsPopCreateViewerOff());
1546: // It should be turned on with PetscLogGpuTime() and never turned off except in this place
1547: PetscLogGpuTimeFlag = PETSC_FALSE;
1549: // Free any objects created by the last block of code.
1550: PetscCall(PetscObjectRegisterDestroyAll());
1552: mname[0] = 0;
1553: PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1554: PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1555: if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1556: }
1558: flg1 = PETSC_FALSE;
1559: PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1560: if (!flg1) PetscCall(PetscPopSignalHandler());
1561: flg1 = PETSC_FALSE;
1562: PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1563: if (flg1) PetscCall(PetscMPIDump(stdout));
1564: flg1 = PETSC_FALSE;
1565: flg2 = PETSC_FALSE;
1566: /* preemptive call to avoid listing this option in options table as unused */
1567: PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1568: PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1569: PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));
1571: if (flg2) PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1573: /* to prevent PETSc -options_left from warning */
1574: PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1575: PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));
1577: flg3 = PETSC_FALSE; /* default value is required */
1578: PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1579: if (!flg1) flg3 = PETSC_TRUE;
1580: if (flg3) {
1581: PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1582: if (nopt) {
1583: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1584: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1585: if (nopt == 1) {
1586: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1587: } else {
1588: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1589: }
1590: } else if (flg3 && flg1) {
1591: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1592: }
1593: PetscCall(PetscOptionsLeft(NULL));
1594: }
1596: #if defined(PETSC_HAVE_SAWS)
1597: if (!PetscGlobalRank) {
1598: PetscCall(PetscStackSAWsViewOff());
1599: PetscCallSAWs(SAWs_Finalize, ());
1600: }
1601: #endif
1603: /*
1604: List all objects the user may have forgot to free
1605: */
1606: if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1607: PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1608: if (flg1) {
1609: MPI_Comm local_comm;
1610: char string[64];
1612: PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1613: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1614: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1615: PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1616: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1617: PetscCallMPI(MPI_Comm_free(&local_comm));
1618: }
1619: }
1621: PetscObjectsCounts = 0;
1622: PetscObjectsMaxCounts = 0;
1623: PetscCall(PetscFree(PetscObjects));
1625: /*
1626: Destroy any packages that registered a finalize
1627: */
1628: PetscCall(PetscRegisterFinalizeAll());
1630: PetscCall(PetscLogFinalize());
1632: /*
1633: Print PetscFunctionLists that have not been properly freed
1634: */
1635: if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());
1637: if (petsc_history) {
1638: PetscCall(PetscCloseHistoryFile(&petsc_history));
1639: petsc_history = NULL;
1640: }
1641: PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1642: PetscCall(PetscInfoDestroy());
1644: #if !defined(PETSC_HAVE_THREADSAFETY)
1645: if (!(PETSC_RUNNING_ON_VALGRIND)) {
1646: char fname[PETSC_MAX_PATH_LEN];
1647: char sname[PETSC_MAX_PATH_LEN];
1648: FILE *fd;
1649: int err;
1651: flg2 = PETSC_FALSE;
1652: flg3 = PETSC_FALSE;
1653: if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1654: PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1655: fname[0] = 0;
1656: PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1657: if (flg1 && fname[0]) {
1658: PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1659: fd = fopen(sname, "w");
1660: PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1661: PetscCall(PetscMallocDump(fd));
1662: err = fclose(fd);
1663: PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1664: } else if (flg1 || flg2 || flg3) {
1665: MPI_Comm local_comm;
1667: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1668: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1669: PetscCall(PetscMallocDump(stdout));
1670: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1671: PetscCallMPI(MPI_Comm_free(&local_comm));
1672: }
1673: fname[0] = 0;
1674: PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1675: if (flg1 && fname[0]) {
1676: PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1677: fd = fopen(sname, "w");
1678: PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1679: PetscCall(PetscMallocView(fd));
1680: err = fclose(fd);
1681: PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1682: } else if (flg1) {
1683: MPI_Comm local_comm;
1685: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1686: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1687: PetscCall(PetscMallocView(stdout));
1688: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1689: PetscCallMPI(MPI_Comm_free(&local_comm));
1690: }
1691: }
1692: #endif
1694: /*
1695: Close any open dynamic libraries
1696: */
1697: PetscCall(PetscFinalize_DynamicLibraries());
1699: /* Can be destroyed only after all the options are used */
1700: PetscCall(PetscOptionsDestroyDefault());
1702: #if defined(PETSC_HAVE_NVSHMEM)
1703: if (PetscBeganNvshmem) {
1704: PetscCall(PetscNvshmemFinalize());
1705: PetscBeganNvshmem = PETSC_FALSE;
1706: }
1707: #endif
1709: PetscCall(PetscFreeMPIResources());
1711: /*
1712: Destroy any known inner MPI_Comm's and attributes pointing to them
1713: Note this will not destroy any new communicators the user has created.
1715: If all PETSc objects were not destroyed those left over objects will have hanging references to
1716: the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1717: */
1718: {
1719: PetscCommCounter *counter;
1720: PetscMPIInt flg;
1721: MPI_Comm icomm;
1722: union
1723: {
1724: MPI_Comm comm;
1725: void *ptr;
1726: } ucomm;
1727: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1728: if (flg) {
1729: icomm = ucomm.comm;
1730: PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1731: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1733: PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1734: PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1735: PetscCallMPI(MPI_Comm_free(&icomm));
1736: }
1737: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1738: if (flg) {
1739: icomm = ucomm.comm;
1740: PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1741: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1743: PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1744: PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1745: PetscCallMPI(MPI_Comm_free(&icomm));
1746: }
1747: }
1749: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1750: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1751: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1752: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1753: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1754: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));
1756: // Free keyvals which may be silently created by some routines
1757: if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1758: if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));
1760: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1761: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1762: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1763: PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));
1765: if (PetscBeganMPI) {
1766: PetscMPIInt flag;
1767: PetscCallMPI(MPI_Finalized(&flag));
1768: PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1769: /* wait until the very last moment to disable error handling */
1770: PetscErrorHandlingInitialized = PETSC_FALSE;
1771: PetscCallMPI(MPI_Finalize());
1772: } else PetscErrorHandlingInitialized = PETSC_FALSE;
1774: /*
1775: Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1776: the communicator has some outstanding requests on it. Specifically if the
1777: flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1778: src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1779: is never freed as it should be. Thus one may obtain messages of the form
1780: [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1781: memory was not freed.
1783: */
1784: PetscCall(PetscMallocClear());
1785: PetscCall(PetscStackReset());
1787: PetscInitializeCalled = PETSC_FALSE;
1788: PetscFinalizeCalled = PETSC_TRUE;
1789: #if defined(PETSC_USE_COVERAGE)
1790: /*
1791: flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1792: gcov files are still being added to the directories as git tries to remove the directories.
1793: */
1794: __gcov_flush();
1795: #endif
1796: /* To match PetscFunctionBegin() at the beginning of this function */
1797: PetscStackClearTop;
1798: return PETSC_SUCCESS;
1799: }
1801: #if defined(PETSC_MISSING_LAPACK_lsame_)
1802: PETSC_EXTERN int lsame_(char *a, char *b)
1803: {
1804: if (*a == *b) return 1;
1805: if (*a + 32 == *b) return 1;
1806: if (*a - 32 == *b) return 1;
1807: return 0;
1808: }
1809: #endif
1811: #if defined(PETSC_MISSING_LAPACK_lsame)
1812: PETSC_EXTERN int lsame(char *a, char *b)
1813: {
1814: if (*a == *b) return 1;
1815: if (*a + 32 == *b) return 1;
1816: if (*a - 32 == *b) return 1;
1817: return 0;
1818: }
1819: #endif
1821: static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1822: {
1823: PetscMPIInt err;
1824: #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1825: PetscMPIInt count2;
1827: PetscMPIIntCast_Internal(count, &count2);
1828: err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1829: #else
1830: err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1831: #endif
1832: return err;
1833: }
1835: /*
1836: When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1837: */
1838: PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1839: {
1840: PetscMPIInt err;
1841: if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) {
1842: PetscInt64 incnt, outcnt;
1843: void *inbufd, *outbufd;
1845: if (inbuf != MPI_IN_PLACE) {
1846: incnt = *(PetscInt32 *)inbuf;
1847: inbufd = &incnt;
1848: outbufd = &outcnt;
1849: err = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1850: } else {
1851: outcnt = *(PetscInt32 *)outbuf;
1852: outbufd = &outcnt;
1853: err = MPIU_Allreduce_Count(MPI_IN_PLACE, outbufd, count, MPIU_INT64, op, comm);
1854: }
1855: if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1856: *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1857: } else {
1858: err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1859: }
1860: return err;
1861: }
1863: // Check if MPIU_Allreduce() is called on the same filename:lineno and with the same data count across all processes. Error out if otherwise.
1864: PetscErrorCode PetscCheckAllreduceSameLineAndCount_Private(MPI_Comm comm, const char *filename, PetscMPIInt lineno, PetscMPIInt count)
1865: {
1866: PetscMPIInt rbuf[4];
1868: PetscFunctionBegin;
1869: rbuf[0] = lineno;
1870: rbuf[1] = -rbuf[0];
1871: rbuf[2] = count;
1872: rbuf[3] = -rbuf[2];
1873: PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, rbuf, 4, MPI_INT, MPI_MAX, comm));
1875: if (rbuf[0] != -rbuf[1]) {
1876: size_t len;
1877: PetscMPIInt size, rank, ilen, *recvcounts = NULL, *displs = NULL;
1878: char *str = NULL, *str0 = NULL;
1880: PetscCallMPI(MPI_Comm_size(comm, &size));
1881: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1882: PetscCall(PetscStrlen(filename, &len));
1883: len += 128; /* add enough space for the leading and trailing chars in PetscSNPrintf around __FILE__ */
1884: PetscCall(PetscMalloc1(len, &str));
1885: PetscCall(PetscSNPrintf(str, len, " On process %d, %s:%d\n", rank, filename, lineno));
1886: PetscCall(PetscStrlen(str, &len)); /* string length exclusive of the NULL terminator */
1887: ilen = (PetscMPIInt)len;
1888: if (rank == 0) PetscCall(PetscMalloc2(size, &recvcounts, size + 1, &displs));
1889: PetscCallMPI(MPI_Gather(&ilen, 1, MPI_INT, recvcounts, 1, MPI_INT, 0, comm));
1890: if (rank == 0) {
1891: displs[0] = 0;
1892: for (PetscMPIInt i = 0; i < size; i++) displs[i + 1] = displs[i] + recvcounts[i];
1893: PetscCall(PetscMalloc1(displs[size], &str0));
1894: }
1895: PetscCallMPI(MPI_Gatherv(str, ilen, MPI_CHAR, str0, recvcounts, displs, MPI_CHAR, 0, comm));
1896: if (rank == 0) str0[displs[size] - 1] = 0; /* replace the ending \n with NULL */
1897: PetscCall(PetscFree(str));
1898: if (rank == 0) PetscCall(PetscFree2(recvcounts, displs));
1899: SETERRQ(comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called in different locations on different processes:\n%s", str0);
1900: }
1901: PetscCheck(rbuf[2] == -rbuf[3], comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called with different counts %d on different processes", count);
1902: PetscFunctionReturn(PETSC_SUCCESS);
1903: }
1905: /*@C
1906: PetscCtxDestroyDefault - An implementation of a `PetscCtxDestroyFn` that uses `PetscFree()` to free the context
1908: Input Parameter:
1909: . ctx - the context to be destroyed
1911: Level: intermediate
1913: Note:
1914: This is not called directly, rather it is passed to `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`,
1915: `PetscObjectContainterCreate()` and similar routines and then called by the destructor of the associated object.
1917: .seealso: `PetscObject`, `PetscCtxDestroyFn`, `PetscObjectDestroy()`, `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`,
1918: `PetscObjectContainterCreate()`
1919: @*/
1920: PETSC_EXTERN PetscErrorCode PetscCtxDestroyDefault(PetscCtxRt ctx)
1921: {
1922: PetscFunctionBegin;
1923: PetscCall(PetscFree(*(void **)ctx));
1924: PetscFunctionReturn(PETSC_SUCCESS);
1925: }