Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be `NULL` (to clear the callback)
10: - ctx - context for callback evaluation, may be `NULL`
12: Level: advanced
14: Notes:
15: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
19: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
25: PetscFunctionBegin;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: PetscFunctionReturn(PETSC_SUCCESS);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The `DM` object
38: Output Parameters:
39: + user - The callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
50: PetscFunctionBegin;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: PetscFunctionReturn(PETSC_SUCCESS);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The `DM` object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *)dm->data;
72: PetscFunctionBegin;
74: mesh->useAnchors = useAnchors;
75: PetscFunctionReturn(PETSC_SUCCESS);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The `DM` object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *)dm->data;
95: PetscFunctionBegin;
97: PetscAssertPointer(useAnchors, 2);
98: *useAnchors = mesh->useAnchors;
99: PetscFunctionReturn(PETSC_SUCCESS);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
107: PetscFunctionBeginHot;
108: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109: PetscCall(DMPlexGetCone(dm, p, &cone));
110: for (c = 0; c <= coneSize; ++c) {
111: const PetscInt point = !c ? p : cone[c - 1];
112: const PetscInt *support = NULL;
113: PetscInt supportSize, s, q;
115: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116: PetscCall(DMPlexGetSupport(dm, point, &support));
117: for (s = 0; s < supportSize; ++s) {
118: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
119: if (support[s] == adj[q]) break;
120: }
121: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
122: }
123: }
124: *adjSize = numAdj;
125: PetscFunctionReturn(PETSC_SUCCESS);
126: }
128: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129: {
130: const PetscInt *support = NULL;
131: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscFunctionBeginHot;
134: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135: PetscCall(DMPlexGetSupport(dm, p, &support));
136: for (s = 0; s <= supportSize; ++s) {
137: const PetscInt point = !s ? p : support[s - 1];
138: const PetscInt *cone = NULL;
139: PetscInt coneSize, c, q;
141: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142: PetscCall(DMPlexGetCone(dm, point, &cone));
143: for (c = 0; c < coneSize; ++c) {
144: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
145: if (cone[c] == adj[q]) break;
146: }
147: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
148: }
149: }
150: *adjSize = numAdj;
151: PetscFunctionReturn(PETSC_SUCCESS);
152: }
154: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155: {
156: PetscInt *star = NULL;
157: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
159: PetscFunctionBeginHot;
160: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161: for (s = 0; s < starSize * 2; s += 2) {
162: const PetscInt *closure = NULL;
163: PetscInt closureSize, c, q;
165: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
166: for (c = 0; c < closureSize * 2; c += 2) {
167: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
168: if (closure[c] == adj[q]) break;
169: }
170: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
171: }
172: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
173: }
174: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175: *adjSize = numAdj;
176: PetscFunctionReturn(PETSC_SUCCESS);
177: }
179: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180: {
181: static PetscInt asiz = 0;
182: PetscInt maxAnchors = 1;
183: PetscInt aStart = -1, aEnd = -1;
184: PetscInt maxAdjSize;
185: PetscSection aSec = NULL;
186: IS aIS = NULL;
187: const PetscInt *anchors;
188: DM_Plex *mesh = (DM_Plex *)dm->data;
190: PetscFunctionBeginHot;
191: if (useAnchors) {
192: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
193: if (aSec) {
194: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
195: maxAnchors = PetscMax(1, maxAnchors);
196: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
197: PetscCall(ISGetIndices(aIS, &anchors));
198: }
199: }
200: if (!*adj) {
201: PetscInt depth, maxC, maxS, maxP, pStart, pEnd;
203: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
204: PetscCall(DMPlexGetDepth(dm, &depth));
205: depth = PetscMax(depth, -depth);
206: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207: maxP = maxS * maxC;
208: /* Adjacency can be as large as supp(cl(cell)) or cl(supp(vertex)),
209: supp(cell) + supp(maxC faces) + supp(maxC^2 edges) + supp(maxC^3 vertices)
210: = 0 + maxS*maxC + maxS^2*maxC^2 + maxS^3*maxC^3
211: = \sum^d_{i=0} (maxS*maxC)^i - 1
212: = (maxS*maxC)^{d+1} - 1 / (maxS*maxC - 1) - 1
213: We could improve this by getting the max by strata:
214: supp[d](cell) + supp[d-1](maxC[d] faces) + supp[1](maxC[d]*maxC[d-1] edges) + supp[0](maxC[d]*maxC[d-1]*maxC[d-2] vertices)
215: = 0 + maxS[d-1]*maxC[d] + maxS[1]*maxC[d]*maxC[d-1] + maxS[0]*maxC[d]*maxC[d-1]*maxC[d-2]
216: and the same with S and C reversed
217: */
218: if ((depth == 3 && maxP > 200) || (depth == 2 && maxP > 580)) asiz = pEnd - pStart;
219: else asiz = (maxP > 1) ? ((PetscPowInt(maxP, depth + 1) - 1) / (maxP - 1)) : depth + 1;
220: asiz *= maxAnchors;
221: asiz = PetscMin(asiz, pEnd - pStart);
222: PetscCall(PetscMalloc1(asiz, adj));
223: }
224: if (*adjSize < 0) *adjSize = asiz;
225: maxAdjSize = *adjSize;
226: if (mesh->useradjacency) {
227: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
228: } else if (useTransitiveClosure) {
229: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
230: } else if (useCone) {
231: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
232: } else {
233: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
234: }
235: if (useAnchors && aSec) {
236: PetscInt origSize = *adjSize;
237: PetscInt numAdj = origSize;
238: PetscInt i = 0, j;
239: PetscInt *orig = *adj;
241: while (i < origSize) {
242: PetscInt p = orig[i];
243: PetscInt aDof = 0;
245: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
246: if (aDof) {
247: PetscInt aOff;
248: PetscInt s, q;
250: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
251: origSize--;
252: numAdj--;
253: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
254: for (s = 0; s < aDof; ++s) {
255: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
256: if (anchors[aOff + s] == orig[q]) break;
257: }
258: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
259: }
260: } else {
261: i++;
262: }
263: }
264: *adjSize = numAdj;
265: PetscCall(ISRestoreIndices(aIS, &anchors));
266: }
267: PetscFunctionReturn(PETSC_SUCCESS);
268: }
270: /*@
271: DMPlexGetAdjacency - Return all points adjacent to the given point
273: Input Parameters:
274: + dm - The `DM` object
275: - p - The point
277: Input/Output Parameters:
278: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
279: on output the number of adjacent points
280: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
281: on output contains the adjacent points
283: Level: advanced
285: Notes:
286: The user must `PetscFree()` the `adj` array if it was not passed in.
288: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
289: @*/
290: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
291: {
292: PetscBool useCone, useClosure, useAnchors;
294: PetscFunctionBeginHot;
296: PetscAssertPointer(adjSize, 3);
297: PetscAssertPointer(adj, 4);
298: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
299: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
300: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
301: PetscFunctionReturn(PETSC_SUCCESS);
302: }
304: /*@
305: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
307: Collective
309: Input Parameters:
310: + dm - The `DM`
311: . sfPoint - The `PetscSF` which encodes point connectivity
312: . rootRankSection - to be documented
313: . rootRanks - to be documented
314: . leafRankSection - to be documented
315: - leafRanks - to be documented
317: Output Parameters:
318: + processRanks - A list of process neighbors, or `NULL`
319: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
321: Level: developer
323: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
324: @*/
325: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
326: {
327: const PetscSFNode *remotePoints;
328: PetscInt *localPointsNew;
329: PetscSFNode *remotePointsNew;
330: const PetscInt *nranks;
331: PetscInt *ranksNew;
332: PetscBT neighbors;
333: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
334: PetscMPIInt size, proc, rank;
336: PetscFunctionBegin;
339: if (processRanks) PetscAssertPointer(processRanks, 7);
340: if (sfProcess) PetscAssertPointer(sfProcess, 8);
341: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
342: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
343: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
344: PetscCall(PetscBTCreate(size, &neighbors));
345: PetscCall(PetscBTMemzero(size, neighbors));
346: /* Compute root-to-leaf process connectivity */
347: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
348: PetscCall(ISGetIndices(rootRanks, &nranks));
349: for (p = pStart; p < pEnd; ++p) {
350: PetscInt ndof, noff, n;
352: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
353: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
354: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
355: }
356: PetscCall(ISRestoreIndices(rootRanks, &nranks));
357: /* Compute leaf-to-neighbor process connectivity */
358: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
359: PetscCall(ISGetIndices(leafRanks, &nranks));
360: for (p = pStart; p < pEnd; ++p) {
361: PetscInt ndof, noff, n;
363: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
364: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
365: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
366: }
367: PetscCall(ISRestoreIndices(leafRanks, &nranks));
368: /* Compute leaf-to-root process connectivity */
369: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
370: /* Calculate edges */
371: PetscCall(PetscBTClear(neighbors, rank));
372: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
373: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
374: }
375: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
376: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
377: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
378: for (proc = 0, n = 0; proc < size; ++proc) {
379: if (PetscBTLookup(neighbors, proc)) {
380: ranksNew[n] = proc;
381: localPointsNew[n] = proc;
382: remotePointsNew[n].index = rank;
383: remotePointsNew[n].rank = proc;
384: ++n;
385: }
386: }
387: PetscCall(PetscBTDestroy(&neighbors));
388: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
389: else PetscCall(PetscFree(ranksNew));
390: if (sfProcess) {
391: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
392: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
393: PetscCall(PetscSFSetFromOptions(*sfProcess));
394: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
395: }
396: PetscFunctionReturn(PETSC_SUCCESS);
397: }
399: /*@
400: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
402: Collective
404: Input Parameter:
405: . dm - The `DM`
407: Output Parameters:
408: + rootSection - The number of leaves for a given root point
409: . rootrank - The rank of each edge into the root point
410: . leafSection - The number of processes sharing a given leaf point
411: - leafrank - The rank of each process sharing a leaf point
413: Level: developer
415: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
416: @*/
417: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
418: {
419: MPI_Comm comm;
420: PetscSF sfPoint;
421: const PetscInt *rootdegree;
422: PetscInt *myrank, *remoterank;
423: PetscInt pStart, pEnd, p, nedges;
424: PetscMPIInt rank;
426: PetscFunctionBegin;
427: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
428: PetscCallMPI(MPI_Comm_rank(comm, &rank));
429: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
430: PetscCall(DMGetPointSF(dm, &sfPoint));
431: /* Compute number of leaves for each root */
432: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
433: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
434: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
435: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
436: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
437: PetscCall(PetscSectionSetUp(rootSection));
438: /* Gather rank of each leaf to root */
439: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
440: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
441: PetscCall(PetscMalloc1(nedges, &remoterank));
442: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
443: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
444: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
445: PetscCall(PetscFree(myrank));
446: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
447: /* Distribute remote ranks to leaves */
448: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
449: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
450: PetscFunctionReturn(PETSC_SUCCESS);
451: }
453: /*@C
454: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
456: Collective
458: Input Parameters:
459: + dm - The `DM`
460: . levels - Number of overlap levels
461: . rootSection - The number of leaves for a given root point
462: . rootrank - The rank of each edge into the root point
463: . leafSection - The number of processes sharing a given leaf point
464: - leafrank - The rank of each process sharing a leaf point
466: Output Parameter:
467: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
469: Level: developer
471: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
472: @*/
473: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
474: {
475: MPI_Comm comm;
476: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
477: PetscSF sfPoint;
478: const PetscSFNode *remote;
479: const PetscInt *local;
480: const PetscInt *nrank, *rrank;
481: PetscInt *adj = NULL;
482: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
483: PetscMPIInt rank, size;
484: PetscBool flg;
486: PetscFunctionBegin;
487: *ovLabel = NULL;
488: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
489: PetscCallMPI(MPI_Comm_size(comm, &size));
490: PetscCallMPI(MPI_Comm_rank(comm, &rank));
491: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
492: PetscCall(DMGetPointSF(dm, &sfPoint));
493: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
494: if (!levels) {
495: /* Add owned points */
496: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
497: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
498: PetscFunctionReturn(PETSC_SUCCESS);
499: }
500: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
501: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
502: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
503: /* Handle leaves: shared with the root point */
504: for (l = 0; l < nleaves; ++l) {
505: PetscInt adjSize = PETSC_DETERMINE, a;
507: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
508: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
509: }
510: PetscCall(ISGetIndices(rootrank, &rrank));
511: PetscCall(ISGetIndices(leafrank, &nrank));
512: /* Handle roots */
513: for (p = pStart; p < pEnd; ++p) {
514: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
516: if ((p >= sStart) && (p < sEnd)) {
517: /* Some leaves share a root with other leaves on different processes */
518: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
519: if (neighbors) {
520: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
521: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
522: for (n = 0; n < neighbors; ++n) {
523: const PetscInt remoteRank = nrank[noff + n];
525: if (remoteRank == rank) continue;
526: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
527: }
528: }
529: }
530: /* Roots are shared with leaves */
531: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
532: if (!neighbors) continue;
533: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
534: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
535: for (n = 0; n < neighbors; ++n) {
536: const PetscInt remoteRank = rrank[noff + n];
538: if (remoteRank == rank) continue;
539: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
540: }
541: }
542: PetscCall(PetscFree(adj));
543: PetscCall(ISRestoreIndices(rootrank, &rrank));
544: PetscCall(ISRestoreIndices(leafrank, &nrank));
545: /* Add additional overlap levels */
546: for (l = 1; l < levels; l++) {
547: /* Propagate point donations over SF to capture remote connections */
548: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
549: /* Add next level of point donations to the label */
550: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
551: }
552: /* We require the closure in the overlap */
553: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
554: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
555: if (flg) {
556: PetscViewer viewer;
557: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
558: PetscCall(DMLabelView(ovAdjByRank, viewer));
559: }
560: /* Invert sender to receiver label */
561: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
562: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
563: /* Add owned points, except for shared local points */
564: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
565: for (l = 0; l < nleaves; ++l) {
566: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
567: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
568: }
569: /* Clean up */
570: PetscCall(DMLabelDestroy(&ovAdjByRank));
571: PetscFunctionReturn(PETSC_SUCCESS);
572: }
574: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
575: {
576: PetscInt neighbors, el;
578: PetscFunctionBegin;
579: PetscCall(PetscSectionGetDof(section, p, &neighbors));
580: if (neighbors) {
581: PetscInt *adj = NULL;
582: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
583: PetscMPIInt rank;
585: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
586: PetscCall(PetscSectionGetOffset(section, p, &noff));
587: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
588: for (n = 0; n < neighbors; ++n) {
589: const PetscInt remoteRank = ranks[noff + n];
591: if (remoteRank == rank) continue;
592: for (a = 0; a < adjSize; ++a) {
593: PetscBool insert = PETSC_TRUE;
595: for (el = 0; el < numExLabels; ++el) {
596: PetscInt exVal;
597: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
598: if (exVal == exValue[el]) {
599: insert = PETSC_FALSE;
600: break;
601: }
602: }
603: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
604: }
605: }
606: PetscCall(PetscFree(adj));
607: }
608: PetscFunctionReturn(PETSC_SUCCESS);
609: }
611: /*@C
612: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
614: Collective
616: Input Parameters:
617: + dm - The `DM`
618: . numLabels - The number of labels to draw candidate points from
619: . label - An array of labels containing candidate points
620: . value - An array of label values marking the candidate points
621: . numExLabels - The number of labels to use for exclusion
622: . exLabel - An array of labels indicating points to be excluded, or `NULL`
623: . exValue - An array of label values to be excluded, or `NULL`
624: . rootSection - The number of leaves for a given root point
625: . rootrank - The rank of each edge into the root point
626: . leafSection - The number of processes sharing a given leaf point
627: - leafrank - The rank of each process sharing a leaf point
629: Output Parameter:
630: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
632: Level: developer
634: Note:
635: The candidate points are only added to the overlap if they are adjacent to a shared point
637: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
638: @*/
639: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
640: {
641: MPI_Comm comm;
642: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
643: PetscSF sfPoint;
644: const PetscSFNode *remote;
645: const PetscInt *local;
646: const PetscInt *nrank, *rrank;
647: PetscInt *adj = NULL;
648: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
649: PetscMPIInt rank, size;
650: PetscBool flg;
652: PetscFunctionBegin;
653: *ovLabel = NULL;
654: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
655: PetscCallMPI(MPI_Comm_size(comm, &size));
656: PetscCallMPI(MPI_Comm_rank(comm, &rank));
657: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
658: PetscCall(DMGetPointSF(dm, &sfPoint));
659: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
660: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
661: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
662: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
663: PetscCall(ISGetIndices(rootrank, &rrank));
664: PetscCall(ISGetIndices(leafrank, &nrank));
665: for (l = 0; l < numLabels; ++l) {
666: IS valIS;
667: const PetscInt *points;
668: PetscInt n;
670: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
671: if (!valIS) continue;
672: PetscCall(ISGetIndices(valIS, &points));
673: PetscCall(ISGetLocalSize(valIS, &n));
674: for (PetscInt i = 0; i < n; ++i) {
675: const PetscInt p = points[i];
677: if ((p >= sStart) && (p < sEnd)) {
678: PetscInt loc, adjSize = PETSC_DETERMINE;
680: /* Handle leaves: shared with the root point */
681: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
682: else loc = (p >= 0 && p < nleaves) ? p : -1;
683: if (loc >= 0) {
684: const PetscInt remoteRank = remote[loc].rank;
686: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
687: for (PetscInt a = 0; a < adjSize; ++a) {
688: PetscBool insert = PETSC_TRUE;
690: for (el = 0; el < numExLabels; ++el) {
691: PetscInt exVal;
692: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
693: if (exVal == exValue[el]) {
694: insert = PETSC_FALSE;
695: break;
696: }
697: }
698: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
699: }
700: }
701: /* Some leaves share a root with other leaves on different processes */
702: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
703: }
704: /* Roots are shared with leaves */
705: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
706: }
707: PetscCall(ISRestoreIndices(valIS, &points));
708: PetscCall(ISDestroy(&valIS));
709: }
710: PetscCall(PetscFree(adj));
711: PetscCall(ISRestoreIndices(rootrank, &rrank));
712: PetscCall(ISRestoreIndices(leafrank, &nrank));
713: /* We require the closure in the overlap */
714: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
715: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
716: if (flg) {
717: PetscViewer viewer;
718: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
719: PetscCall(DMLabelView(ovAdjByRank, viewer));
720: }
721: /* Invert sender to receiver label */
722: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
723: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
724: /* Add owned points, except for shared local points */
725: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
726: for (l = 0; l < nleaves; ++l) {
727: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
728: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
729: }
730: /* Clean up */
731: PetscCall(DMLabelDestroy(&ovAdjByRank));
732: PetscFunctionReturn(PETSC_SUCCESS);
733: }
735: /*@
736: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
738: Collective
740: Input Parameters:
741: + dm - The `DM`
742: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
744: Output Parameter:
745: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
747: Level: developer
749: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
750: @*/
751: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
752: {
753: MPI_Comm comm;
754: PetscMPIInt rank, size;
755: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
756: PetscInt *pointDepths, *remoteDepths, *ilocal;
757: PetscInt *depthRecv, *depthShift, *depthIdx;
758: PetscSFNode *iremote;
759: PetscSF pointSF;
760: const PetscInt *sharedLocal;
761: const PetscSFNode *overlapRemote, *sharedRemote;
763: PetscFunctionBegin;
765: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
766: PetscCallMPI(MPI_Comm_rank(comm, &rank));
767: PetscCallMPI(MPI_Comm_size(comm, &size));
768: PetscCall(DMGetDimension(dm, &dim));
770: /* Before building the migration SF we need to know the new stratum offsets */
771: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
772: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
773: for (d = 0; d < dim + 1; d++) {
774: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
775: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
776: }
777: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
778: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
779: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
781: /* Count received points in each stratum and compute the internal strata shift */
782: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
783: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
784: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
785: depthShift[dim] = 0;
786: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
787: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
788: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
789: for (d = 0; d < dim + 1; d++) {
790: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
791: depthIdx[d] = pStart + depthShift[d];
792: }
794: /* Form the overlap SF build an SF that describes the full overlap migration SF */
795: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
796: newLeaves = pEnd - pStart + nleaves;
797: PetscCall(PetscMalloc1(newLeaves, &ilocal));
798: PetscCall(PetscMalloc1(newLeaves, &iremote));
799: /* First map local points to themselves */
800: for (d = 0; d < dim + 1; d++) {
801: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
802: for (p = pStart; p < pEnd; p++) {
803: point = p + depthShift[d];
804: ilocal[point] = point;
805: iremote[point].index = p;
806: iremote[point].rank = rank;
807: depthIdx[d]++;
808: }
809: }
811: /* Add in the remote roots for currently shared points */
812: PetscCall(DMGetPointSF(dm, &pointSF));
813: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
814: for (d = 0; d < dim + 1; d++) {
815: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
816: for (p = 0; p < numSharedPoints; p++) {
817: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
818: point = sharedLocal[p] + depthShift[d];
819: iremote[point].index = sharedRemote[p].index;
820: iremote[point].rank = sharedRemote[p].rank;
821: }
822: }
823: }
825: /* Now add the incoming overlap points */
826: for (p = 0; p < nleaves; p++) {
827: point = depthIdx[remoteDepths[p]];
828: ilocal[point] = point;
829: iremote[point].index = overlapRemote[p].index;
830: iremote[point].rank = overlapRemote[p].rank;
831: depthIdx[remoteDepths[p]]++;
832: }
833: PetscCall(PetscFree2(pointDepths, remoteDepths));
835: PetscCall(PetscSFCreate(comm, migrationSF));
836: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
837: PetscCall(PetscSFSetFromOptions(*migrationSF));
838: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
839: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
841: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
842: PetscFunctionReturn(PETSC_SUCCESS);
843: }
845: /*@
846: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
848: Input Parameters:
849: + dm - The DM
850: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
852: Output Parameter:
853: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
855: Level: developer
857: Note:
858: This lexicographically sorts by (depth, cellType)
860: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
861: @*/
862: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
863: {
864: MPI_Comm comm;
865: PetscMPIInt rank, size;
866: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
867: PetscSFNode *pointDepths, *remoteDepths;
868: PetscInt *ilocal;
869: PetscInt *depthRecv, *depthShift, *depthIdx;
870: PetscInt *ctRecv, *ctShift, *ctIdx;
871: const PetscSFNode *iremote;
873: PetscFunctionBegin;
875: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
876: PetscCallMPI(MPI_Comm_rank(comm, &rank));
877: PetscCallMPI(MPI_Comm_size(comm, &size));
878: PetscCall(DMPlexGetDepth(dm, &ldepth));
879: PetscCall(DMGetDimension(dm, &dim));
880: PetscCallMPI(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
881: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
882: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
884: /* Before building the migration SF we need to know the new stratum offsets */
885: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
886: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
887: for (d = 0; d < depth + 1; ++d) {
888: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
889: for (p = pStart; p < pEnd; ++p) {
890: DMPolytopeType ct;
892: PetscCall(DMPlexGetCellType(dm, p, &ct));
893: pointDepths[p].index = d;
894: pointDepths[p].rank = ct;
895: }
896: }
897: for (p = 0; p < nleaves; ++p) {
898: remoteDepths[p].index = -1;
899: remoteDepths[p].rank = -1;
900: }
901: PetscCall(PetscSFBcastBegin(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
902: PetscCall(PetscSFBcastEnd(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
903: /* Count received points in each stratum and compute the internal strata shift */
904: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
905: for (p = 0; p < nleaves; ++p) {
906: if (remoteDepths[p].rank < 0) {
907: ++depthRecv[remoteDepths[p].index];
908: } else {
909: ++ctRecv[remoteDepths[p].rank];
910: }
911: }
912: {
913: PetscInt depths[4], dims[4], shift = 0, i, c;
915: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
916: Consider DM_POLYTOPE_FV_GHOST, DM_POLYTOPE_INTERIOR_GHOST and DM_POLYTOPE_UNKNOWN_CELL as cells
917: */
918: depths[0] = depth;
919: depths[1] = 0;
920: depths[2] = depth - 1;
921: depths[3] = 1;
922: dims[0] = dim;
923: dims[1] = 0;
924: dims[2] = dim - 1;
925: dims[3] = 1;
926: for (i = 0; i <= depth; ++i) {
927: const PetscInt dep = depths[i];
928: const PetscInt dim = dims[i];
930: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
931: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST || c == DM_POLYTOPE_UNKNOWN_CELL))) continue;
932: ctShift[c] = shift;
933: shift += ctRecv[c];
934: }
935: depthShift[dep] = shift;
936: shift += depthRecv[dep];
937: }
938: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
939: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
941: if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST && c != DM_POLYTOPE_UNKNOWN_CELL)) {
942: ctShift[c] = shift;
943: shift += ctRecv[c];
944: }
945: }
946: }
947: /* Derive a new local permutation based on stratified indices */
948: PetscCall(PetscMalloc1(nleaves, &ilocal));
949: for (p = 0; p < nleaves; ++p) {
950: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
952: ilocal[p] = ctShift[ct] + ctIdx[ct];
953: ++ctIdx[ct];
954: }
955: PetscCall(PetscSFCreate(comm, migrationSF));
956: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
957: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
958: PetscCall(PetscFree2(pointDepths, remoteDepths));
959: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
960: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
961: PetscFunctionReturn(PETSC_SUCCESS);
962: }
964: /*@
965: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
967: Collective
969: Input Parameters:
970: + dm - The `DMPLEX` object
971: . pointSF - The `PetscSF` describing the communication pattern
972: . originalSection - The `PetscSection` for existing data layout
973: - originalVec - The existing data in a local vector
975: Output Parameters:
976: + newSection - The `PetscSF` describing the new data layout
977: - newVec - The new data in a local vector
979: Level: developer
981: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
982: @*/
983: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
984: {
985: PetscSF fieldSF;
986: PetscInt *remoteOffsets, fieldSize;
987: PetscScalar *originalValues, *newValues;
989: PetscFunctionBegin;
990: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
991: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
993: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
994: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
995: PetscCall(VecSetType(newVec, dm->vectype));
997: PetscCall(VecGetArray(originalVec, &originalValues));
998: PetscCall(VecGetArray(newVec, &newValues));
999: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1000: PetscCall(PetscFree(remoteOffsets));
1001: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1002: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1003: PetscCall(PetscSFDestroy(&fieldSF));
1004: PetscCall(VecRestoreArray(newVec, &newValues));
1005: PetscCall(VecRestoreArray(originalVec, &originalValues));
1006: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1007: PetscFunctionReturn(PETSC_SUCCESS);
1008: }
1010: /*@
1011: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1013: Collective
1015: Input Parameters:
1016: + dm - The `DMPLEX` object
1017: . pointSF - The `PetscSF` describing the communication pattern
1018: . originalSection - The `PetscSection` for existing data layout
1019: - originalIS - The existing data
1021: Output Parameters:
1022: + newSection - The `PetscSF` describing the new data layout
1023: - newIS - The new data
1025: Level: developer
1027: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1028: @*/
1029: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1030: {
1031: PetscSF fieldSF;
1032: PetscInt *newValues, *remoteOffsets, fieldSize;
1033: const PetscInt *originalValues;
1035: PetscFunctionBegin;
1036: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1037: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1039: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1040: PetscCall(PetscMalloc1(fieldSize, &newValues));
1042: PetscCall(ISGetIndices(originalIS, &originalValues));
1043: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1044: PetscCall(PetscFree(remoteOffsets));
1045: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1046: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1047: PetscCall(PetscSFDestroy(&fieldSF));
1048: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1049: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1050: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1051: PetscFunctionReturn(PETSC_SUCCESS);
1052: }
1054: /*@
1055: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1057: Collective
1059: Input Parameters:
1060: + dm - The `DMPLEX` object
1061: . pointSF - The `PetscSF` describing the communication pattern
1062: . originalSection - The `PetscSection` for existing data layout
1063: . datatype - The type of data
1064: - originalData - The existing data
1066: Output Parameters:
1067: + newSection - The `PetscSection` describing the new data layout
1068: - newData - The new data
1070: Level: developer
1072: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`
1073: @*/
1074: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1075: {
1076: PetscSF fieldSF;
1077: PetscInt *remoteOffsets, fieldSize;
1078: PetscMPIInt dataSize;
1080: PetscFunctionBegin;
1081: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1082: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1084: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1085: PetscCallMPI(MPI_Type_size(datatype, &dataSize));
1086: PetscCall(PetscMalloc(fieldSize * dataSize, newData));
1088: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1089: PetscCall(PetscFree(remoteOffsets));
1090: PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1091: PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1092: PetscCall(PetscSFDestroy(&fieldSF));
1093: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1094: PetscFunctionReturn(PETSC_SUCCESS);
1095: }
1097: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1098: {
1099: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1100: MPI_Comm comm;
1101: PetscSF coneSF;
1102: PetscSection originalConeSection, newConeSection;
1103: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1104: PetscBool flg;
1106: PetscFunctionBegin;
1109: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1110: /* Distribute cone section */
1111: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1112: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1113: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1114: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1115: PetscCall(DMSetUp(dmParallel));
1116: /* Communicate and renumber cones */
1117: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1118: PetscCall(PetscFree(remoteOffsets));
1119: PetscCall(DMPlexGetCones(dm, &cones));
1120: if (original) {
1121: PetscInt numCones;
1123: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1124: PetscCall(PetscMalloc1(numCones, &globCones));
1125: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1126: } else {
1127: globCones = cones;
1128: }
1129: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1130: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1131: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1132: if (original) PetscCall(PetscFree(globCones));
1133: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1134: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1135: if (PetscDefined(USE_DEBUG)) {
1136: PetscInt p;
1137: PetscBool valid = PETSC_TRUE;
1138: for (p = 0; p < newConesSize; ++p) {
1139: if (newCones[p] < 0) {
1140: valid = PETSC_FALSE;
1141: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1142: }
1143: }
1144: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1145: }
1146: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1147: if (flg) {
1148: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1149: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1150: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1151: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1152: PetscCall(PetscSFView(coneSF, NULL));
1153: }
1154: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1155: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1156: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1157: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1158: PetscCall(PetscSFDestroy(&coneSF));
1159: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1160: /* Create supports and stratify DMPlex */
1161: {
1162: PetscInt pStart, pEnd;
1164: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1165: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1166: }
1167: PetscCall(DMPlexSymmetrize(dmParallel));
1168: PetscCall(DMPlexStratify(dmParallel));
1169: {
1170: PetscBool useCone, useClosure, useAnchors;
1172: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1173: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1174: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1175: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1176: }
1177: PetscFunctionReturn(PETSC_SUCCESS);
1178: }
1180: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1181: {
1182: MPI_Comm comm;
1183: DM cdm, cdmParallel;
1184: PetscSection originalCoordSection, newCoordSection;
1185: Vec originalCoordinates, newCoordinates;
1186: PetscInt bs;
1187: const char *name;
1188: const PetscReal *maxCell, *Lstart, *L;
1190: PetscFunctionBegin;
1194: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1195: PetscCall(DMGetCoordinateDM(dm, &cdm));
1196: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1197: PetscCall(DMCopyDisc(cdm, cdmParallel));
1198: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1199: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1200: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1201: if (originalCoordinates) {
1202: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1203: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1204: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1206: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1207: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1208: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1209: PetscCall(VecSetBlockSize(newCoordinates, bs));
1210: PetscCall(VecDestroy(&newCoordinates));
1211: }
1213: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1214: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1215: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1216: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1217: if (cdm) {
1218: PetscCall(DMClone(dmParallel, &cdmParallel));
1219: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1220: PetscCall(DMCopyDisc(cdm, cdmParallel));
1221: PetscCall(DMDestroy(&cdmParallel));
1222: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1223: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1224: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1225: if (originalCoordinates) {
1226: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1227: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1228: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1230: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1231: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1232: PetscCall(VecSetBlockSize(newCoordinates, bs));
1233: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1234: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1235: PetscCall(VecDestroy(&newCoordinates));
1236: }
1237: PetscCall(PetscSectionDestroy(&newCoordSection));
1238: }
1239: PetscFunctionReturn(PETSC_SUCCESS);
1240: }
1242: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1243: {
1244: DM_Plex *mesh = (DM_Plex *)dm->data;
1245: MPI_Comm comm;
1246: DMLabel depthLabel;
1247: PetscMPIInt rank;
1248: PetscInt depth, d, numLabels, numLocalLabels, l;
1249: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1250: PetscObjectState depthState = -1;
1252: PetscFunctionBegin;
1256: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1257: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1258: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1260: /* If the user has changed the depth label, communicate it instead */
1261: PetscCall(DMPlexGetDepth(dm, &depth));
1262: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1263: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1264: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1265: PetscCallMPI(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1266: if (sendDepth) {
1267: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1268: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1269: }
1270: /* Everyone must have either the same number of labels, or none */
1271: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1272: numLabels = numLocalLabels;
1273: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1274: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1275: for (l = 0; l < numLabels; ++l) {
1276: DMLabel label = NULL, labelNew = NULL;
1277: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1278: const char *name = NULL;
1280: if (hasLabels) {
1281: PetscCall(DMGetLabelByNum(dm, l, &label));
1282: /* Skip "depth" because it is recreated */
1283: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1284: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1285: } else {
1286: isDepth = PETSC_FALSE;
1287: }
1288: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1289: if (isDepth && !sendDepth) continue;
1290: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1291: if (isDepth) {
1292: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1293: PetscInt gdepth;
1295: PetscCallMPI(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1296: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1297: for (d = 0; d <= gdepth; ++d) {
1298: PetscBool has;
1300: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1301: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1302: }
1303: }
1304: PetscCall(DMAddLabel(dmParallel, labelNew));
1305: /* Put the output flag in the new label */
1306: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1307: PetscCallMPI(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1308: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1309: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1310: PetscCall(DMLabelDestroy(&labelNew));
1311: }
1312: {
1313: DMLabel ctLabel;
1315: // Reset label for fast lookup
1316: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1317: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1318: }
1319: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1320: PetscFunctionReturn(PETSC_SUCCESS);
1321: }
1323: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1324: {
1325: DM_Plex *mesh = (DM_Plex *)dm->data;
1326: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1327: MPI_Comm comm;
1328: DM refTree;
1329: PetscSection origParentSection, newParentSection;
1330: PetscInt *origParents, *origChildIDs;
1331: PetscBool flg;
1333: PetscFunctionBegin;
1336: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1338: /* Set up tree */
1339: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1340: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1341: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1342: if (origParentSection) {
1343: PetscInt pStart, pEnd;
1344: PetscInt *newParents, *newChildIDs, *globParents;
1345: PetscInt *remoteOffsetsParents, newParentSize;
1346: PetscSF parentSF;
1348: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1349: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1350: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1351: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1352: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1353: PetscCall(PetscFree(remoteOffsetsParents));
1354: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1355: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1356: if (original) {
1357: PetscInt numParents;
1359: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1360: PetscCall(PetscMalloc1(numParents, &globParents));
1361: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1362: } else {
1363: globParents = origParents;
1364: }
1365: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1366: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1367: if (original) PetscCall(PetscFree(globParents));
1368: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1369: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1370: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1371: if (PetscDefined(USE_DEBUG)) {
1372: PetscInt p;
1373: PetscBool valid = PETSC_TRUE;
1374: for (p = 0; p < newParentSize; ++p) {
1375: if (newParents[p] < 0) {
1376: valid = PETSC_FALSE;
1377: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1378: }
1379: }
1380: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1381: }
1382: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1383: if (flg) {
1384: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1385: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1386: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1387: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1388: PetscCall(PetscSFView(parentSF, NULL));
1389: }
1390: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1391: PetscCall(PetscSectionDestroy(&newParentSection));
1392: PetscCall(PetscFree2(newParents, newChildIDs));
1393: PetscCall(PetscSFDestroy(&parentSF));
1394: }
1395: pmesh->useAnchors = mesh->useAnchors;
1396: PetscFunctionReturn(PETSC_SUCCESS);
1397: }
1399: /*@
1400: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1402: Input Parameters:
1403: + dm - The `DMPLEX` object
1404: - flg - Balance the partition?
1406: Level: intermediate
1408: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1409: @*/
1410: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1411: {
1412: DM_Plex *mesh = (DM_Plex *)dm->data;
1414: PetscFunctionBegin;
1415: mesh->partitionBalance = flg;
1416: PetscFunctionReturn(PETSC_SUCCESS);
1417: }
1419: /*@
1420: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1422: Input Parameter:
1423: . dm - The `DMPLEX` object
1425: Output Parameter:
1426: . flg - Balance the partition?
1428: Level: intermediate
1430: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1431: @*/
1432: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1433: {
1434: DM_Plex *mesh = (DM_Plex *)dm->data;
1436: PetscFunctionBegin;
1438: PetscAssertPointer(flg, 2);
1439: *flg = mesh->partitionBalance;
1440: PetscFunctionReturn(PETSC_SUCCESS);
1441: }
1443: typedef struct {
1444: PetscInt vote, rank, index;
1445: } Petsc3Int;
1447: /* MaxLoc, but carry a third piece of information around */
1448: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1449: {
1450: Petsc3Int *a = (Petsc3Int *)inout_;
1451: Petsc3Int *b = (Petsc3Int *)in_;
1452: PetscInt i, len = *len_;
1453: for (i = 0; i < len; i++) {
1454: if (a[i].vote < b[i].vote) {
1455: a[i].vote = b[i].vote;
1456: a[i].rank = b[i].rank;
1457: a[i].index = b[i].index;
1458: } else if (a[i].vote <= b[i].vote) {
1459: if (a[i].rank >= b[i].rank) {
1460: a[i].rank = b[i].rank;
1461: a[i].index = b[i].index;
1462: }
1463: }
1464: }
1465: }
1467: /*@
1468: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1470: Input Parameters:
1471: + dm - The source `DMPLEX` object
1472: . migrationSF - The star forest that describes the parallel point remapping
1473: - ownership - Flag causing a vote to determine point ownership
1475: Output Parameter:
1476: . pointSF - The star forest describing the point overlap in the remapped `DM`
1478: Level: developer
1480: Note:
1481: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1483: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1484: @*/
1485: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1486: {
1487: PetscMPIInt rank, size;
1488: PetscInt p, nroots, nleaves, idx, npointLeaves;
1489: PetscInt *pointLocal;
1490: const PetscInt *leaves;
1491: const PetscSFNode *roots;
1492: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1493: Vec shifts;
1494: const PetscInt numShifts = 13759;
1495: const PetscScalar *shift = NULL;
1496: const PetscBool shiftDebug = PETSC_FALSE;
1497: PetscBool balance;
1499: PetscFunctionBegin;
1501: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1502: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1503: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1504: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1505: PetscCall(PetscSFSetFromOptions(*pointSF));
1506: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1508: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1509: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1510: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1511: if (ownership) {
1512: MPI_Op op;
1513: MPI_Datatype datatype;
1514: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1516: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1517: if (balance) {
1518: PetscRandom r;
1520: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1521: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1522: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1523: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1524: PetscCall(VecSetType(shifts, VECSTANDARD));
1525: PetscCall(VecSetRandom(shifts, r));
1526: PetscCall(PetscRandomDestroy(&r));
1527: PetscCall(VecGetArrayRead(shifts, &shift));
1528: }
1530: PetscCall(PetscMalloc1(nroots, &rootVote));
1531: PetscCall(PetscMalloc1(nleaves, &leafVote));
1532: /* Point ownership vote: Process with highest rank owns shared points */
1533: for (p = 0; p < nleaves; ++p) {
1534: if (shiftDebug) {
1535: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1536: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1537: }
1538: /* Either put in a bid or we know we own it */
1539: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1540: leafVote[p].rank = rank;
1541: leafVote[p].index = p;
1542: }
1543: for (p = 0; p < nroots; p++) {
1544: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1545: rootVote[p].vote = -3;
1546: rootVote[p].rank = -3;
1547: rootVote[p].index = -3;
1548: }
1549: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1550: PetscCallMPI(MPI_Type_commit(&datatype));
1551: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1552: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1553: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1554: PetscCallMPI(MPI_Op_free(&op));
1555: PetscCallMPI(MPI_Type_free(&datatype));
1556: for (p = 0; p < nroots; p++) {
1557: rootNodes[p].rank = rootVote[p].rank;
1558: rootNodes[p].index = rootVote[p].index;
1559: }
1560: PetscCall(PetscFree(leafVote));
1561: PetscCall(PetscFree(rootVote));
1562: } else {
1563: for (p = 0; p < nroots; p++) {
1564: rootNodes[p].index = -1;
1565: rootNodes[p].rank = rank;
1566: }
1567: for (p = 0; p < nleaves; p++) {
1568: /* Write new local id into old location */
1569: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1570: }
1571: }
1572: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1573: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1575: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1576: if (leafNodes[p].rank != rank) npointLeaves++;
1577: }
1578: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1579: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1580: for (idx = 0, p = 0; p < nleaves; p++) {
1581: if (leafNodes[p].rank != rank) {
1582: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1583: pointLocal[idx] = p;
1584: pointRemote[idx] = leafNodes[p];
1585: idx++;
1586: }
1587: }
1588: if (shift) {
1589: PetscCall(VecRestoreArrayRead(shifts, &shift));
1590: PetscCall(VecDestroy(&shifts));
1591: }
1592: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1593: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1594: PetscCall(PetscFree2(rootNodes, leafNodes));
1595: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1596: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1597: PetscFunctionReturn(PETSC_SUCCESS);
1598: }
1600: /*@
1601: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1603: Collective
1605: Input Parameters:
1606: + dm - The source `DMPLEX` object
1607: - sf - The star forest communication context describing the migration pattern
1609: Output Parameter:
1610: . targetDM - The target `DMPLEX` object
1612: Level: intermediate
1614: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1615: @*/
1616: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1617: {
1618: MPI_Comm comm;
1619: PetscInt dim, cdim, nroots;
1620: PetscSF sfPoint;
1621: ISLocalToGlobalMapping ltogMigration;
1622: ISLocalToGlobalMapping ltogOriginal = NULL;
1624: PetscFunctionBegin;
1626: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1627: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1628: PetscCall(DMGetDimension(dm, &dim));
1629: PetscCall(DMSetDimension(targetDM, dim));
1630: PetscCall(DMGetCoordinateDim(dm, &cdim));
1631: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1633: /* Check for a one-to-all distribution pattern */
1634: PetscCall(DMGetPointSF(dm, &sfPoint));
1635: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1636: if (nroots >= 0) {
1637: IS isOriginal;
1638: PetscInt n, size, nleaves;
1639: PetscInt *numbering_orig, *numbering_new;
1641: /* Get the original point numbering */
1642: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1643: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1644: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1645: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1646: /* Convert to positive global numbers */
1647: for (n = 0; n < size; n++) {
1648: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1649: }
1650: /* Derive the new local-to-global mapping from the old one */
1651: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1652: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1653: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1654: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1655: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1656: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1657: PetscCall(ISDestroy(&isOriginal));
1658: } else {
1659: /* One-to-all distribution pattern: We can derive LToG from SF */
1660: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1661: }
1662: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1663: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1664: /* Migrate DM data to target DM */
1665: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1666: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1667: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1668: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1669: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1670: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1671: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1672: PetscFunctionReturn(PETSC_SUCCESS);
1673: }
1675: /*@
1676: DMPlexRemapMigrationSF - Rewrite the distribution SF to account for overlap
1678: Collective
1680: Input Parameters:
1681: + sfOverlap - The `PetscSF` object just for the overlap
1682: - sfMigration - The original distribution `PetscSF` object
1684: Output Parameters:
1685: . sfMigrationNew - A rewritten `PetscSF` object that incorporates the overlap
1687: Level: developer
1689: .seealso: `DMPLEX`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexGetOverlap()`
1690: @*/
1691: PetscErrorCode DMPlexRemapMigrationSF(PetscSF sfOverlap, PetscSF sfMigration, PetscSF *sfMigrationNew)
1692: {
1693: PetscSFNode *newRemote, *permRemote = NULL;
1694: const PetscInt *oldLeaves;
1695: const PetscSFNode *oldRemote;
1696: PetscInt nroots, nleaves, noldleaves;
1698: PetscFunctionBegin;
1699: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1700: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1701: PetscCall(PetscMalloc1(nleaves, &newRemote));
1702: /* oldRemote: original root point mapping to original leaf point
1703: newRemote: original leaf point mapping to overlapped leaf point */
1704: if (oldLeaves) {
1705: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1706: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1707: for (PetscInt l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1708: oldRemote = permRemote;
1709: }
1710: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1711: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1712: PetscCall(PetscFree(permRemote));
1713: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)sfOverlap), sfMigrationNew));
1714: PetscCall(PetscSFSetGraph(*sfMigrationNew, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1715: PetscFunctionReturn(PETSC_SUCCESS);
1716: }
1718: /*@
1719: DMPlexDistribute - Distributes the mesh and any associated sections.
1721: Collective
1723: Input Parameters:
1724: + dm - The original `DMPLEX` object
1725: - overlap - The overlap of partitions, 0 is the default
1727: Output Parameters:
1728: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1729: - dmParallel - The distributed `DMPLEX` object
1731: Level: intermediate
1733: Note:
1734: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1736: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1737: representation on the mesh.
1739: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1740: @*/
1741: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1742: {
1743: MPI_Comm comm;
1744: PetscPartitioner partitioner;
1745: IS cellPart;
1746: PetscSection cellPartSection;
1747: DM dmCoord;
1748: DMLabel lblPartition, lblMigration;
1749: PetscSF sfMigration, sfStratified, sfPoint;
1750: PetscBool flg, balance;
1751: PetscMPIInt rank, size;
1753: PetscFunctionBegin;
1756: if (sf) PetscAssertPointer(sf, 3);
1757: PetscAssertPointer(dmParallel, 4);
1759: if (sf) *sf = NULL;
1760: *dmParallel = NULL;
1761: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1762: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1763: PetscCallMPI(MPI_Comm_size(comm, &size));
1764: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1766: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1767: /* Create cell partition */
1768: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1769: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1770: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1771: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1772: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1773: {
1774: /* Convert partition to DMLabel */
1775: IS is;
1776: PetscHSetI ht;
1777: const PetscInt *points;
1778: PetscInt *iranks;
1779: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1781: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1782: /* Preallocate strata */
1783: PetscCall(PetscHSetICreate(&ht));
1784: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1785: for (proc = pStart; proc < pEnd; proc++) {
1786: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1787: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1788: }
1789: PetscCall(PetscHSetIGetSize(ht, &nranks));
1790: PetscCall(PetscMalloc1(nranks, &iranks));
1791: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1792: PetscCall(PetscHSetIDestroy(&ht));
1793: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1794: PetscCall(PetscFree(iranks));
1795: /* Inline DMPlexPartitionLabelClosure() */
1796: PetscCall(ISGetIndices(cellPart, &points));
1797: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1798: for (proc = pStart; proc < pEnd; proc++) {
1799: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1800: if (!npoints) continue;
1801: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1802: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1803: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1804: PetscCall(ISDestroy(&is));
1805: }
1806: PetscCall(ISRestoreIndices(cellPart, &points));
1807: }
1808: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1810: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1811: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1812: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, PETSC_TRUE, &sfMigration));
1813: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1814: PetscCall(PetscSFDestroy(&sfMigration));
1815: sfMigration = sfStratified;
1816: PetscCall(PetscSFSetUp(sfMigration));
1817: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1818: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1819: if (flg) {
1820: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1821: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1822: }
1824: /* Create non-overlapping parallel DM and migrate internal data */
1825: PetscCall(DMPlexCreate(comm, dmParallel));
1826: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1827: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1829: /* Build the point SF without overlap */
1830: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1831: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1832: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1833: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1834: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1835: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1836: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1837: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1839: if (overlap > 0) {
1840: DM dmOverlap;
1841: PetscSF sfOverlap, sfOverlapPoint;
1843: /* Add the partition overlap to the distributed DM */
1844: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1845: PetscCall(DMDestroy(dmParallel));
1846: *dmParallel = dmOverlap;
1847: if (flg) {
1848: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1849: PetscCall(PetscSFView(sfOverlap, NULL));
1850: }
1852: /* Re-map the migration SF to establish the full migration pattern */
1853: PetscCall(DMPlexRemapMigrationSF(sfOverlap, sfMigration, &sfOverlapPoint));
1854: PetscCall(PetscSFDestroy(&sfOverlap));
1855: PetscCall(PetscSFDestroy(&sfMigration));
1856: sfMigration = sfOverlapPoint;
1857: }
1858: /* Cleanup Partition */
1859: PetscCall(DMLabelDestroy(&lblPartition));
1860: PetscCall(DMLabelDestroy(&lblMigration));
1861: PetscCall(PetscSectionDestroy(&cellPartSection));
1862: PetscCall(ISDestroy(&cellPart));
1863: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1864: // Create sfNatural, need discretization information
1865: PetscCall(DMCopyDisc(dm, *dmParallel));
1866: if (dm->useNatural) {
1867: PetscSection section;
1869: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1870: PetscCall(DMGetLocalSection(dm, §ion));
1872: /* If DM has useNatural = PETSC_TRUE, but no sfNatural is set, the DM's Section is considered natural */
1873: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1874: /* Compose with a previous sfNatural if present */
1875: if (dm->sfNatural) {
1876: PetscCall(DMPlexMigrateGlobalToNaturalSF(dm, *dmParallel, dm->sfNatural, sfMigration, &(*dmParallel)->sfNatural));
1877: } else {
1878: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1879: }
1880: /* Compose with a previous sfMigration if present */
1881: if (dm->sfMigration) {
1882: PetscSF naturalPointSF;
1884: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
1885: PetscCall(PetscSFDestroy(&sfMigration));
1886: sfMigration = naturalPointSF;
1887: }
1888: }
1889: /* Cleanup */
1890: if (sf) {
1891: *sf = sfMigration;
1892: } else PetscCall(PetscSFDestroy(&sfMigration));
1893: PetscCall(PetscSFDestroy(&sfPoint));
1894: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
1895: PetscFunctionReturn(PETSC_SUCCESS);
1896: }
1898: PetscErrorCode DMPlexDistributeOverlap_Internal(DM dm, PetscInt overlap, MPI_Comm newcomm, const char *ovlboundary, PetscSF *sf, DM *dmOverlap)
1899: {
1900: DM_Plex *mesh = (DM_Plex *)dm->data;
1901: MPI_Comm comm;
1902: PetscMPIInt size, rank;
1903: PetscSection rootSection, leafSection;
1904: IS rootrank, leafrank;
1905: DM dmCoord;
1906: DMLabel lblOverlap;
1907: PetscSF sfOverlap, sfStratified, sfPoint;
1908: PetscBool clear_ovlboundary;
1910: PetscFunctionBegin;
1911: if (sf) *sf = NULL;
1912: *dmOverlap = NULL;
1913: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1914: PetscCallMPI(MPI_Comm_size(comm, &size));
1915: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1916: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1917: {
1918: // We need to get options for the _already_distributed mesh, so it must be done here
1919: PetscInt overlap;
1920: const char *prefix;
1921: char oldPrefix[PETSC_MAX_PATH_LEN];
1923: oldPrefix[0] = '\0';
1924: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
1925: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
1926: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
1927: PetscCall(DMPlexGetOverlap(dm, &overlap));
1928: PetscObjectOptionsBegin((PetscObject)dm);
1929: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
1930: PetscOptionsEnd();
1931: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
1932: }
1933: if (ovlboundary) {
1934: PetscBool flg;
1935: PetscCall(DMHasLabel(dm, ovlboundary, &flg));
1936: if (!flg) {
1937: DMLabel label;
1939: PetscCall(DMCreateLabel(dm, ovlboundary));
1940: PetscCall(DMGetLabel(dm, ovlboundary, &label));
1941: PetscCall(DMPlexMarkBoundaryFaces(dm, 1, label));
1942: clear_ovlboundary = PETSC_TRUE;
1943: }
1944: }
1945: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1946: /* Compute point overlap with neighbouring processes on the distributed DM */
1947: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1948: PetscCall(PetscSectionCreate(newcomm, &rootSection));
1949: PetscCall(PetscSectionCreate(newcomm, &leafSection));
1950: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1951: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1952: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1953: /* Convert overlap label to stratified migration SF */
1954: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, PETSC_FALSE, &sfOverlap));
1955: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1956: PetscCall(PetscSFDestroy(&sfOverlap));
1957: sfOverlap = sfStratified;
1958: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
1959: PetscCall(PetscSFSetFromOptions(sfOverlap));
1961: PetscCall(PetscSectionDestroy(&rootSection));
1962: PetscCall(PetscSectionDestroy(&leafSection));
1963: PetscCall(ISDestroy(&rootrank));
1964: PetscCall(ISDestroy(&leafrank));
1965: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1967: /* Build the overlapping DM */
1968: PetscCall(DMPlexCreate(newcomm, dmOverlap));
1969: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
1970: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1971: /* Store the overlap in the new DM */
1972: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
1973: /* Build the new point SF */
1974: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1975: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1976: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1977: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1978: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
1979: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1980: PetscCall(PetscSFDestroy(&sfPoint));
1981: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmOverlap));
1982: /* TODO: labels stored inside the DS for regions should be handled here */
1983: PetscCall(DMCopyDisc(dm, *dmOverlap));
1984: /* Cleanup overlap partition */
1985: PetscCall(DMLabelDestroy(&lblOverlap));
1986: if (sf) *sf = sfOverlap;
1987: else PetscCall(PetscSFDestroy(&sfOverlap));
1988: if (ovlboundary) {
1989: DMLabel label;
1990: PetscBool flg;
1992: if (clear_ovlboundary) PetscCall(DMRemoveLabel(dm, ovlboundary, NULL));
1993: PetscCall(DMHasLabel(*dmOverlap, ovlboundary, &flg));
1994: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing %s label", ovlboundary);
1995: PetscCall(DMGetLabel(*dmOverlap, ovlboundary, &label));
1996: PetscCall(DMPlexMarkBoundaryFaces_Internal(*dmOverlap, 1, 0, label, PETSC_TRUE));
1997: }
1998: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1999: PetscFunctionReturn(PETSC_SUCCESS);
2000: }
2002: /*@
2003: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
2005: Collective
2007: Input Parameters:
2008: + dm - The non-overlapping distributed `DMPLEX` object
2009: - overlap - The overlap of partitions (the same on all ranks)
2011: Output Parameters:
2012: + sf - The `PetscSF` used for point distribution, or pass `NULL` if not needed
2013: - dmOverlap - The overlapping distributed `DMPLEX` object
2015: Options Database Keys:
2016: + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
2017: . -dm_plex_overlap_values <int1,int2,...> - List of overlap label values
2018: . -dm_plex_overlap_exclude_label <name> - Label used to exclude points from overlap
2019: - -dm_plex_overlap_exclude_value <int> - Label value used to exclude points from overlap
2021: Level: advanced
2023: Notes:
2024: If the mesh was not distributed, the return value is `NULL`.
2026: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
2027: representation on the mesh.
2029: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
2030: @*/
2031: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
2032: {
2033: PetscFunctionBegin;
2036: if (sf) PetscAssertPointer(sf, 3);
2037: PetscAssertPointer(dmOverlap, 4);
2038: PetscCall(DMPlexDistributeOverlap_Internal(dm, overlap, PetscObjectComm((PetscObject)dm), NULL, sf, dmOverlap));
2039: PetscFunctionReturn(PETSC_SUCCESS);
2040: }
2042: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2043: {
2044: DM_Plex *mesh = (DM_Plex *)dm->data;
2046: PetscFunctionBegin;
2047: *overlap = mesh->overlap;
2048: PetscFunctionReturn(PETSC_SUCCESS);
2049: }
2051: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2052: {
2053: DM_Plex *mesh = NULL;
2054: DM_Plex *meshSrc = NULL;
2056: PetscFunctionBegin;
2058: mesh = (DM_Plex *)dm->data;
2059: if (dmSrc) {
2061: meshSrc = (DM_Plex *)dmSrc->data;
2062: mesh->overlap = meshSrc->overlap;
2063: } else {
2064: mesh->overlap = 0;
2065: }
2066: mesh->overlap += overlap;
2067: PetscFunctionReturn(PETSC_SUCCESS);
2068: }
2070: /*@
2071: DMPlexGetOverlap - Get the width of the cell overlap
2073: Not Collective
2075: Input Parameter:
2076: . dm - The `DM`
2078: Output Parameter:
2079: . overlap - the width of the cell overlap
2081: Level: intermediate
2083: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2084: @*/
2085: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2086: {
2087: PetscFunctionBegin;
2089: PetscAssertPointer(overlap, 2);
2090: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2091: PetscFunctionReturn(PETSC_SUCCESS);
2092: }
2094: /*@
2095: DMPlexSetOverlap - Set the width of the cell overlap
2097: Logically Collective
2099: Input Parameters:
2100: + dm - The `DM`
2101: . dmSrc - The `DM` that produced this one, or `NULL`
2102: - overlap - the width of the cell overlap
2104: Level: intermediate
2106: Note:
2107: The overlap from `dmSrc` is added to `dm`
2109: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2110: @*/
2111: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2112: {
2113: PetscFunctionBegin;
2116: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2117: PetscFunctionReturn(PETSC_SUCCESS);
2118: }
2120: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2121: {
2122: DM_Plex *mesh = (DM_Plex *)dm->data;
2124: PetscFunctionBegin;
2125: mesh->distDefault = dist;
2126: PetscFunctionReturn(PETSC_SUCCESS);
2127: }
2129: /*@
2130: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2132: Logically Collective
2134: Input Parameters:
2135: + dm - The `DM`
2136: - dist - Flag for distribution
2138: Level: intermediate
2140: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2141: @*/
2142: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2143: {
2144: PetscFunctionBegin;
2147: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2148: PetscFunctionReturn(PETSC_SUCCESS);
2149: }
2151: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2152: {
2153: DM_Plex *mesh = (DM_Plex *)dm->data;
2155: PetscFunctionBegin;
2156: *dist = mesh->distDefault;
2157: PetscFunctionReturn(PETSC_SUCCESS);
2158: }
2160: /*@
2161: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2163: Not Collective
2165: Input Parameter:
2166: . dm - The `DM`
2168: Output Parameter:
2169: . dist - Flag for distribution
2171: Level: intermediate
2173: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2174: @*/
2175: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2176: {
2177: PetscFunctionBegin;
2179: PetscAssertPointer(dist, 2);
2180: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2181: PetscFunctionReturn(PETSC_SUCCESS);
2182: }
2184: /*@C
2185: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2186: root process of the original's communicator.
2188: Collective
2190: Input Parameter:
2191: . dm - the original `DMPLEX` object
2193: Output Parameters:
2194: + sf - the `PetscSF` used for point distribution (optional)
2195: - gatherMesh - the gathered `DM` object, or `NULL`
2197: Level: intermediate
2199: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2200: @*/
2201: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
2202: {
2203: MPI_Comm comm;
2204: PetscMPIInt size;
2205: PetscPartitioner oldPart, gatherPart;
2207: PetscFunctionBegin;
2209: PetscAssertPointer(gatherMesh, 3);
2210: *gatherMesh = NULL;
2211: if (sf) *sf = NULL;
2212: comm = PetscObjectComm((PetscObject)dm);
2213: PetscCallMPI(MPI_Comm_size(comm, &size));
2214: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2215: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2216: PetscCall(PetscObjectReference((PetscObject)oldPart));
2217: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2218: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2219: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2220: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2222: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2223: PetscCall(PetscPartitionerDestroy(&gatherPart));
2224: PetscCall(PetscPartitionerDestroy(&oldPart));
2225: PetscFunctionReturn(PETSC_SUCCESS);
2226: }
2228: /*@C
2229: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2231: Collective
2233: Input Parameter:
2234: . dm - the original `DMPLEX` object
2236: Output Parameters:
2237: + sf - the `PetscSF` used for point distribution (optional)
2238: - redundantMesh - the redundant `DM` object, or `NULL`
2240: Level: intermediate
2242: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2243: @*/
2244: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
2245: {
2246: MPI_Comm comm;
2247: PetscMPIInt size, rank;
2248: PetscInt pStart, pEnd, p;
2249: PetscInt numPoints = -1;
2250: PetscSF migrationSF, sfPoint, gatherSF;
2251: DM gatherDM, dmCoord;
2252: PetscSFNode *points;
2254: PetscFunctionBegin;
2256: PetscAssertPointer(redundantMesh, 3);
2257: *redundantMesh = NULL;
2258: comm = PetscObjectComm((PetscObject)dm);
2259: PetscCallMPI(MPI_Comm_size(comm, &size));
2260: if (size == 1) {
2261: PetscCall(PetscObjectReference((PetscObject)dm));
2262: *redundantMesh = dm;
2263: if (sf) *sf = NULL;
2264: PetscFunctionReturn(PETSC_SUCCESS);
2265: }
2266: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2267: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2268: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2269: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2270: numPoints = pEnd - pStart;
2271: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2272: PetscCall(PetscMalloc1(numPoints, &points));
2273: PetscCall(PetscSFCreate(comm, &migrationSF));
2274: for (p = 0; p < numPoints; p++) {
2275: points[p].index = p;
2276: points[p].rank = 0;
2277: }
2278: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2279: PetscCall(DMPlexCreate(comm, redundantMesh));
2280: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2281: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2282: /* This is to express that all point are in overlap */
2283: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_INT_MAX));
2284: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2285: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2286: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2287: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2288: PetscCall(PetscSFDestroy(&sfPoint));
2289: if (sf) {
2290: PetscSF tsf;
2292: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2293: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2294: PetscCall(PetscSFDestroy(&tsf));
2295: }
2296: PetscCall(PetscSFDestroy(&migrationSF));
2297: PetscCall(PetscSFDestroy(&gatherSF));
2298: PetscCall(DMDestroy(&gatherDM));
2299: PetscCall(DMCopyDisc(dm, *redundantMesh));
2300: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2301: PetscFunctionReturn(PETSC_SUCCESS);
2302: }
2304: /*@
2305: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2307: Collective
2309: Input Parameter:
2310: . dm - The `DM` object
2312: Output Parameter:
2313: . distributed - Flag whether the `DM` is distributed
2315: Level: intermediate
2317: Notes:
2318: This currently finds out whether at least two ranks have any DAG points.
2319: This involves `MPI_Allreduce()` with one integer.
2320: The result is currently not stashed so every call to this routine involves this global communication.
2322: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2323: @*/
2324: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2325: {
2326: PetscInt pStart, pEnd, count;
2327: MPI_Comm comm;
2328: PetscMPIInt size;
2330: PetscFunctionBegin;
2332: PetscAssertPointer(distributed, 2);
2333: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2334: PetscCallMPI(MPI_Comm_size(comm, &size));
2335: if (size == 1) {
2336: *distributed = PETSC_FALSE;
2337: PetscFunctionReturn(PETSC_SUCCESS);
2338: }
2339: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2340: count = (pEnd - pStart) > 0 ? 1 : 0;
2341: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2342: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2343: PetscFunctionReturn(PETSC_SUCCESS);
2344: }
2346: /*@
2347: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2349: Input Parameters:
2350: + dm - The `DM`
2351: - name - The name of the specific parallel distribution
2353: Level: developer
2355: Note:
2356: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2357: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2358: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2359: loads the parallel distribution stored in file under this name.
2361: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2362: @*/
2363: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2364: {
2365: DM_Plex *mesh = (DM_Plex *)dm->data;
2367: PetscFunctionBegin;
2369: if (name) PetscAssertPointer(name, 2);
2370: PetscCall(PetscFree(mesh->distributionName));
2371: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2372: PetscFunctionReturn(PETSC_SUCCESS);
2373: }
2375: /*@
2376: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2378: Input Parameter:
2379: . dm - The `DM`
2381: Output Parameter:
2382: . name - The name of the specific parallel distribution
2384: Level: developer
2386: Note:
2387: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2388: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2389: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2390: loads the parallel distribution stored in file under this name.
2392: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2393: @*/
2394: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2395: {
2396: DM_Plex *mesh = (DM_Plex *)dm->data;
2398: PetscFunctionBegin;
2400: PetscAssertPointer(name, 2);
2401: *name = mesh->distributionName;
2402: PetscFunctionReturn(PETSC_SUCCESS);
2403: }