Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
3: #include <petsc/private/partitionerimpl.h>
5: /*@C
6: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
8: Input Parameters:
9: + dm - The DM object
10: . user - The user callback, may be `NULL` (to clear the callback)
11: - ctx - context for callback evaluation, may be `NULL`
13: Level: advanced
15: Notes:
16: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
18: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
20: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
21: @*/
22: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), PetscCtx ctx)
23: {
24: DM_Plex *mesh = (DM_Plex *)dm->data;
26: PetscFunctionBegin;
28: mesh->useradjacency = user;
29: mesh->useradjacencyctx = ctx;
30: PetscFunctionReturn(PETSC_SUCCESS);
31: }
33: /*@C
34: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
36: Input Parameter:
37: . dm - The `DM` object
39: Output Parameters:
40: + user - The callback
41: - ctx - context for callback evaluation
43: Level: advanced
45: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
46: @*/
47: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
48: {
49: DM_Plex *mesh = (DM_Plex *)dm->data;
51: PetscFunctionBegin;
53: if (user) *user = mesh->useradjacency;
54: if (ctx) *ctx = mesh->useradjacencyctx;
55: PetscFunctionReturn(PETSC_SUCCESS);
56: }
58: /*@
59: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
61: Input Parameters:
62: + dm - The `DM` object
63: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
65: Level: intermediate
67: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
68: @*/
69: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
70: {
71: DM_Plex *mesh = (DM_Plex *)dm->data;
73: PetscFunctionBegin;
75: mesh->useAnchors = useAnchors;
76: PetscFunctionReturn(PETSC_SUCCESS);
77: }
79: /*@
80: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
82: Input Parameter:
83: . dm - The `DM` object
85: Output Parameter:
86: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
88: Level: intermediate
90: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
91: @*/
92: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
93: {
94: DM_Plex *mesh = (DM_Plex *)dm->data;
96: PetscFunctionBegin;
98: PetscAssertPointer(useAnchors, 2);
99: *useAnchors = mesh->useAnchors;
100: PetscFunctionReturn(PETSC_SUCCESS);
101: }
103: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
104: {
105: const PetscInt *cone = NULL;
106: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
108: PetscFunctionBeginHot;
109: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
110: PetscCall(DMPlexGetCone(dm, p, &cone));
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c - 1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
117: PetscCall(DMPlexGetSupport(dm, point, &support));
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: PetscFunctionReturn(PETSC_SUCCESS);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
134: PetscFunctionBeginHot;
135: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
136: PetscCall(DMPlexGetSupport(dm, p, &support));
137: for (s = 0; s <= supportSize; ++s) {
138: const PetscInt point = !s ? p : support[s - 1];
139: const PetscInt *cone = NULL;
140: PetscInt coneSize, c, q;
142: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
143: PetscCall(DMPlexGetCone(dm, point, &cone));
144: for (c = 0; c < coneSize; ++c) {
145: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
146: if (cone[c] == adj[q]) break;
147: }
148: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
149: }
150: }
151: *adjSize = numAdj;
152: PetscFunctionReturn(PETSC_SUCCESS);
153: }
155: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
156: {
157: PetscInt *star = NULL;
158: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
160: PetscFunctionBeginHot;
161: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
162: for (s = 0; s < starSize * 2; s += 2) {
163: const PetscInt *closure = NULL;
164: PetscInt closureSize, c, q;
166: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
167: for (c = 0; c < closureSize * 2; c += 2) {
168: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
169: if (closure[c] == adj[q]) break;
170: }
171: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
172: }
173: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
174: }
175: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
176: *adjSize = numAdj;
177: PetscFunctionReturn(PETSC_SUCCESS);
178: }
180: // Returns the maximum number of adjacent points in the DMPlex
181: PetscErrorCode DMPlexGetMaxAdjacencySize_Internal(DM dm, PetscBool useAnchors, PetscInt *max_adjacency_size)
182: {
183: PetscInt depth, maxC, maxS, maxP, pStart, pEnd, asiz, maxAnchors = 1;
185: PetscFunctionBeginUser;
186: if (useAnchors) {
187: PetscSection aSec = NULL;
188: IS aIS = NULL;
189: PetscInt aStart, aEnd;
190: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
191: if (aSec) {
192: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
193: maxAnchors = PetscMax(1, maxAnchors);
194: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
195: }
196: }
198: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
199: PetscCall(DMPlexGetDepth(dm, &depth));
200: depth = PetscMax(depth, -depth);
201: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
202: maxP = maxS * maxC;
203: /* Adjacency can be as large as supp(cl(cell)) or cl(supp(vertex)),
204: supp(cell) + supp(maxC faces) + supp(maxC^2 edges) + supp(maxC^3 vertices)
205: = 0 + maxS*maxC + maxS^2*maxC^2 + maxS^3*maxC^3
206: = \sum^d_{i=0} (maxS*maxC)^i - 1
207: = (maxS*maxC)^{d+1} - 1 / (maxS*maxC - 1) - 1
208: We could improve this by getting the max by strata:
209: supp[d](cell) + supp[d-1](maxC[d] faces) + supp[1](maxC[d]*maxC[d-1] edges) + supp[0](maxC[d]*maxC[d-1]*maxC[d-2] vertices)
210: = 0 + maxS[d-1]*maxC[d] + maxS[1]*maxC[d]*maxC[d-1] + maxS[0]*maxC[d]*maxC[d-1]*maxC[d-2]
211: and the same with S and C reversed
212: */
213: if ((depth == 3 && maxP > 200) || (depth == 2 && maxP > 580)) asiz = pEnd - pStart;
214: else asiz = (maxP > 1) ? ((PetscPowInt(maxP, depth + 1) - 1) / (maxP - 1)) : depth + 1;
215: asiz *= maxAnchors;
216: *max_adjacency_size = PetscMin(asiz, pEnd - pStart);
217: PetscFunctionReturn(PETSC_SUCCESS);
218: }
220: // Returns Adjacent mesh points to the selected point given specific criteria
221: //
222: // + adjSize - Number of adjacent points
223: // - adj - Array of the adjacent points
224: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
225: {
226: static PetscInt asiz = 0;
227: PetscInt aStart = -1, aEnd = -1;
228: PetscInt maxAdjSize;
229: PetscSection aSec = NULL;
230: IS aIS = NULL;
231: const PetscInt *anchors;
232: DM_Plex *mesh = (DM_Plex *)dm->data;
234: PetscFunctionBeginHot;
235: if (useAnchors) {
236: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
237: if (aSec) {
238: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
239: PetscCall(ISGetIndices(aIS, &anchors));
240: }
241: }
242: if (!*adj) {
243: PetscCall(DMPlexGetMaxAdjacencySize_Internal(dm, useAnchors, &asiz));
244: PetscCall(PetscMalloc1(asiz, adj));
245: }
246: if (*adjSize < 0) *adjSize = asiz;
247: maxAdjSize = *adjSize;
248: if (mesh->useradjacency) {
249: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
250: } else if (useTransitiveClosure) {
251: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
252: } else if (useCone) {
253: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
254: } else {
255: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
256: }
257: if (useAnchors && aSec) {
258: PetscInt origSize = *adjSize;
259: PetscInt numAdj = origSize;
260: PetscInt i = 0, j;
261: PetscInt *orig = *adj;
263: while (i < origSize) {
264: PetscInt p = orig[i];
265: PetscInt aDof = 0;
267: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
268: if (aDof) {
269: PetscInt aOff;
270: PetscInt s, q;
272: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
273: origSize--;
274: numAdj--;
275: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
276: for (s = 0; s < aDof; ++s) {
277: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
278: if (anchors[aOff + s] == orig[q]) break;
279: }
280: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
281: }
282: } else {
283: i++;
284: }
285: }
286: *adjSize = numAdj;
287: PetscCall(ISRestoreIndices(aIS, &anchors));
288: }
289: PetscFunctionReturn(PETSC_SUCCESS);
290: }
292: /*@
293: DMPlexGetAdjacency - Return all points adjacent to the given point
295: Input Parameters:
296: + dm - The `DM` object
297: - p - The point
299: Input/Output Parameters:
300: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
301: on output the number of adjacent points
302: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
303: on output contains the adjacent points
305: Level: advanced
307: Notes:
308: The user must `PetscFree()` the `adj` array if it was not passed in.
310: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
311: @*/
312: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
313: {
314: PetscBool useCone, useClosure, useAnchors;
316: PetscFunctionBeginHot;
318: PetscAssertPointer(adjSize, 3);
319: PetscAssertPointer(adj, 4);
320: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
321: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
322: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
323: PetscFunctionReturn(PETSC_SUCCESS);
324: }
326: /*@
327: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
329: Collective
331: Input Parameters:
332: + dm - The `DM`
333: . sfPoint - The `PetscSF` which encodes point connectivity
334: . rootRankSection - to be documented
335: . rootRanks - to be documented
336: . leafRankSection - to be documented
337: - leafRanks - to be documented
339: Output Parameters:
340: + processRanks - A list of process neighbors, or `NULL`
341: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
343: Level: developer
345: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
346: @*/
347: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, PeOp IS *processRanks, PeOp PetscSF *sfProcess)
348: {
349: const PetscSFNode *remotePoints;
350: PetscInt *localPointsNew;
351: PetscSFNode *remotePointsNew;
352: const PetscInt *nranks;
353: PetscInt *ranksNew;
354: PetscBT neighbors;
355: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
356: PetscMPIInt size, proc, rank;
358: PetscFunctionBegin;
361: if (processRanks) PetscAssertPointer(processRanks, 7);
362: if (sfProcess) PetscAssertPointer(sfProcess, 8);
363: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
364: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
365: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
366: PetscCall(PetscBTCreate(size, &neighbors));
367: PetscCall(PetscBTMemzero(size, neighbors));
368: /* Compute root-to-leaf process connectivity */
369: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
370: PetscCall(ISGetIndices(rootRanks, &nranks));
371: for (p = pStart; p < pEnd; ++p) {
372: PetscInt ndof, noff, n;
374: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
375: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
376: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
377: }
378: PetscCall(ISRestoreIndices(rootRanks, &nranks));
379: /* Compute leaf-to-neighbor process connectivity */
380: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
381: PetscCall(ISGetIndices(leafRanks, &nranks));
382: for (p = pStart; p < pEnd; ++p) {
383: PetscInt ndof, noff, n;
385: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
386: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
387: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
388: }
389: PetscCall(ISRestoreIndices(leafRanks, &nranks));
390: /* Compute leaf-to-root process connectivity */
391: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
392: /* Calculate edges */
393: PetscCall(PetscBTClear(neighbors, rank));
394: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
395: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
396: }
397: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
398: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
399: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
400: for (proc = 0, n = 0; proc < size; ++proc) {
401: if (PetscBTLookup(neighbors, proc)) {
402: ranksNew[n] = proc;
403: localPointsNew[n] = proc;
404: remotePointsNew[n].index = rank;
405: remotePointsNew[n].rank = proc;
406: ++n;
407: }
408: }
409: PetscCall(PetscBTDestroy(&neighbors));
410: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
411: else PetscCall(PetscFree(ranksNew));
412: if (sfProcess) {
413: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
414: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
415: PetscCall(PetscSFSetFromOptions(*sfProcess));
416: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
417: }
418: PetscFunctionReturn(PETSC_SUCCESS);
419: }
421: /*@
422: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
424: Collective
426: Input Parameter:
427: . dm - The `DM`
429: Output Parameters:
430: + rootSection - The number of leaves for a given root point
431: . rootrank - The rank of each edge into the root point
432: . leafSection - The number of processes sharing a given leaf point
433: - leafrank - The rank of each process sharing a leaf point
435: Level: developer
437: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
438: @*/
439: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
440: {
441: MPI_Comm comm;
442: PetscSF sfPoint;
443: const PetscInt *rootdegree;
444: PetscInt *myrank, *remoterank;
445: PetscInt pStart, pEnd, p, nedges;
446: PetscMPIInt rank;
448: PetscFunctionBegin;
449: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
450: PetscCallMPI(MPI_Comm_rank(comm, &rank));
451: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
452: PetscCall(DMGetPointSF(dm, &sfPoint));
453: /* Compute number of leaves for each root */
454: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
455: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
456: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
457: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
458: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
459: PetscCall(PetscSectionSetUp(rootSection));
460: /* Gather rank of each leaf to root */
461: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
462: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
463: PetscCall(PetscMalloc1(nedges, &remoterank));
464: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
465: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
466: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
467: PetscCall(PetscFree(myrank));
468: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
469: /* Distribute remote ranks to leaves */
470: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
471: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
472: PetscFunctionReturn(PETSC_SUCCESS);
473: }
475: /*@
476: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
478: Collective
480: Input Parameters:
481: + dm - The `DM`
482: . levels - Number of overlap levels
483: . rootSection - The number of leaves for a given root point
484: . rootrank - The rank of each edge into the root point
485: . leafSection - The number of processes sharing a given leaf point
486: - leafrank - The rank of each process sharing a leaf point
488: Output Parameter:
489: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
491: Level: developer
493: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
494: @*/
495: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
496: {
497: MPI_Comm comm;
498: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
499: PetscSF sfPoint;
500: const PetscSFNode *remote;
501: const PetscInt *local;
502: const PetscInt *nrank, *rrank;
503: PetscInt *adj = NULL;
504: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
505: PetscMPIInt rank, size;
506: PetscBool flg;
508: PetscFunctionBegin;
509: *ovLabel = NULL;
510: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
511: PetscCallMPI(MPI_Comm_size(comm, &size));
512: PetscCallMPI(MPI_Comm_rank(comm, &rank));
513: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
514: PetscCall(DMGetPointSF(dm, &sfPoint));
515: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
516: if (!levels) {
517: /* Add owned points */
518: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
519: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
520: PetscFunctionReturn(PETSC_SUCCESS);
521: }
522: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
523: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
524: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
525: /* Handle leaves: shared with the root point */
526: for (l = 0; l < nleaves; ++l) {
527: PetscInt adjSize = PETSC_DETERMINE, a;
529: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
530: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
531: }
532: PetscCall(ISGetIndices(rootrank, &rrank));
533: PetscCall(ISGetIndices(leafrank, &nrank));
534: /* Handle roots */
535: for (p = pStart; p < pEnd; ++p) {
536: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
538: if ((p >= sStart) && (p < sEnd)) {
539: /* Some leaves share a root with other leaves on different processes */
540: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
541: if (neighbors) {
542: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
543: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
544: for (n = 0; n < neighbors; ++n) {
545: const PetscInt remoteRank = nrank[noff + n];
547: if (remoteRank == rank) continue;
548: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
549: }
550: }
551: }
552: /* Roots are shared with leaves */
553: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
554: if (!neighbors) continue;
555: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
556: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
557: for (n = 0; n < neighbors; ++n) {
558: const PetscInt remoteRank = rrank[noff + n];
560: if (remoteRank == rank) continue;
561: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
562: }
563: }
564: PetscCall(PetscFree(adj));
565: PetscCall(ISRestoreIndices(rootrank, &rrank));
566: PetscCall(ISRestoreIndices(leafrank, &nrank));
567: /* Add additional overlap levels */
568: for (l = 1; l < levels; l++) {
569: /* Propagate point donations over SF to capture remote connections */
570: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
571: /* Add next level of point donations to the label */
572: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
573: }
574: /* We require the closure in the overlap */
575: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
576: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
577: if (flg) {
578: PetscViewer viewer;
579: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
580: PetscCall(DMLabelView(ovAdjByRank, viewer));
581: }
582: /* Invert sender to receiver label */
583: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
584: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
585: /* Add owned points, except for shared local points */
586: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
587: for (l = 0; l < nleaves; ++l) {
588: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
589: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
590: }
591: /* Clean up */
592: PetscCall(DMLabelDestroy(&ovAdjByRank));
593: PetscFunctionReturn(PETSC_SUCCESS);
594: }
596: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
597: {
598: PetscInt neighbors, el;
600: PetscFunctionBegin;
601: PetscCall(PetscSectionGetDof(section, p, &neighbors));
602: if (neighbors) {
603: PetscInt *adj = NULL;
604: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
605: PetscMPIInt rank;
607: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
608: PetscCall(PetscSectionGetOffset(section, p, &noff));
609: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
610: for (n = 0; n < neighbors; ++n) {
611: const PetscInt remoteRank = ranks[noff + n];
613: if (remoteRank == rank) continue;
614: for (a = 0; a < adjSize; ++a) {
615: PetscBool insert = PETSC_TRUE;
617: for (el = 0; el < numExLabels; ++el) {
618: PetscInt exVal;
619: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
620: if (exVal == exValue[el]) {
621: insert = PETSC_FALSE;
622: break;
623: }
624: }
625: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
626: }
627: }
628: PetscCall(PetscFree(adj));
629: }
630: PetscFunctionReturn(PETSC_SUCCESS);
631: }
633: /*@C
634: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
636: Collective
638: Input Parameters:
639: + dm - The `DM`
640: . numLabels - The number of labels to draw candidate points from
641: . label - An array of labels containing candidate points
642: . value - An array of label values marking the candidate points
643: . numExLabels - The number of labels to use for exclusion
644: . exLabel - An array of labels indicating points to be excluded, or `NULL`
645: . exValue - An array of label values to be excluded, or `NULL`
646: . rootSection - The number of leaves for a given root point
647: . rootrank - The rank of each edge into the root point
648: . leafSection - The number of processes sharing a given leaf point
649: - leafrank - The rank of each process sharing a leaf point
651: Output Parameter:
652: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
654: Level: developer
656: Note:
657: The candidate points are only added to the overlap if they are adjacent to a shared point
659: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
660: @*/
661: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
662: {
663: MPI_Comm comm;
664: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
665: PetscSF sfPoint;
666: const PetscSFNode *remote;
667: const PetscInt *local;
668: const PetscInt *nrank, *rrank;
669: PetscInt *adj = NULL;
670: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
671: PetscMPIInt rank, size;
672: PetscBool flg;
674: PetscFunctionBegin;
675: *ovLabel = NULL;
676: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
677: PetscCallMPI(MPI_Comm_size(comm, &size));
678: PetscCallMPI(MPI_Comm_rank(comm, &rank));
679: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
680: PetscCall(DMGetPointSF(dm, &sfPoint));
681: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
682: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
683: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
684: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
685: PetscCall(ISGetIndices(rootrank, &rrank));
686: PetscCall(ISGetIndices(leafrank, &nrank));
687: for (l = 0; l < numLabels; ++l) {
688: IS valIS;
689: const PetscInt *points;
690: PetscInt n;
692: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
693: if (!valIS) continue;
694: PetscCall(ISGetIndices(valIS, &points));
695: PetscCall(ISGetLocalSize(valIS, &n));
696: for (PetscInt i = 0; i < n; ++i) {
697: const PetscInt p = points[i];
699: if ((p >= sStart) && (p < sEnd)) {
700: PetscInt loc, adjSize = PETSC_DETERMINE;
702: /* Handle leaves: shared with the root point */
703: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
704: else loc = (p >= 0 && p < nleaves) ? p : -1;
705: if (loc >= 0) {
706: const PetscInt remoteRank = remote[loc].rank;
708: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
709: for (PetscInt a = 0; a < adjSize; ++a) {
710: PetscBool insert = PETSC_TRUE;
712: for (el = 0; el < numExLabels; ++el) {
713: PetscInt exVal;
714: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
715: if (exVal == exValue[el]) {
716: insert = PETSC_FALSE;
717: break;
718: }
719: }
720: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
721: }
722: }
723: /* Some leaves share a root with other leaves on different processes */
724: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
725: }
726: /* Roots are shared with leaves */
727: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
728: }
729: PetscCall(ISRestoreIndices(valIS, &points));
730: PetscCall(ISDestroy(&valIS));
731: }
732: PetscCall(PetscFree(adj));
733: PetscCall(ISRestoreIndices(rootrank, &rrank));
734: PetscCall(ISRestoreIndices(leafrank, &nrank));
735: /* We require the closure in the overlap */
736: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
737: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
738: if (flg) {
739: PetscViewer viewer;
740: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
741: PetscCall(DMLabelView(ovAdjByRank, viewer));
742: }
743: /* Invert sender to receiver label */
744: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
745: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
746: /* Add owned points, except for shared local points */
747: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
748: for (l = 0; l < nleaves; ++l) {
749: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
750: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
751: }
752: /* Clean up */
753: PetscCall(DMLabelDestroy(&ovAdjByRank));
754: PetscFunctionReturn(PETSC_SUCCESS);
755: }
757: /*@
758: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
760: Collective
762: Input Parameters:
763: + dm - The `DM`
764: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
766: Output Parameter:
767: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
769: Level: developer
771: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
772: @*/
773: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
774: {
775: MPI_Comm comm;
776: PetscMPIInt rank, size;
777: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
778: PetscInt *pointDepths, *remoteDepths, *ilocal;
779: PetscInt *depthRecv, *depthShift, *depthIdx;
780: PetscSFNode *iremote;
781: PetscSF pointSF;
782: const PetscInt *sharedLocal;
783: const PetscSFNode *overlapRemote, *sharedRemote;
785: PetscFunctionBegin;
787: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
788: PetscCallMPI(MPI_Comm_rank(comm, &rank));
789: PetscCallMPI(MPI_Comm_size(comm, &size));
790: PetscCall(DMGetDimension(dm, &dim));
792: /* Before building the migration SF we need to know the new stratum offsets */
793: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
794: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
795: for (d = 0; d < dim + 1; d++) {
796: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
797: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
798: }
799: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
800: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
801: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
803: /* Count received points in each stratum and compute the internal strata shift */
804: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
805: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
806: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
807: depthShift[dim] = 0;
808: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
809: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
810: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
811: for (d = 0; d < dim + 1; d++) {
812: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
813: depthIdx[d] = pStart + depthShift[d];
814: }
816: /* Form the overlap SF build an SF that describes the full overlap migration SF */
817: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
818: newLeaves = pEnd - pStart + nleaves;
819: PetscCall(PetscMalloc1(newLeaves, &ilocal));
820: PetscCall(PetscMalloc1(newLeaves, &iremote));
821: /* First map local points to themselves */
822: for (d = 0; d < dim + 1; d++) {
823: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
824: for (p = pStart; p < pEnd; p++) {
825: point = p + depthShift[d];
826: ilocal[point] = point;
827: iremote[point].index = p;
828: iremote[point].rank = rank;
829: depthIdx[d]++;
830: }
831: }
833: /* Add in the remote roots for currently shared points */
834: PetscCall(DMGetPointSF(dm, &pointSF));
835: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
836: for (d = 0; d < dim + 1; d++) {
837: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
838: for (p = 0; p < numSharedPoints; p++) {
839: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
840: point = sharedLocal[p] + depthShift[d];
841: iremote[point].index = sharedRemote[p].index;
842: iremote[point].rank = sharedRemote[p].rank;
843: }
844: }
845: }
847: /* Now add the incoming overlap points */
848: for (p = 0; p < nleaves; p++) {
849: point = depthIdx[remoteDepths[p]];
850: ilocal[point] = point;
851: iremote[point].index = overlapRemote[p].index;
852: iremote[point].rank = overlapRemote[p].rank;
853: depthIdx[remoteDepths[p]]++;
854: }
855: PetscCall(PetscFree2(pointDepths, remoteDepths));
857: PetscCall(PetscSFCreate(comm, migrationSF));
858: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
859: PetscCall(PetscSFSetFromOptions(*migrationSF));
860: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
861: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
863: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
864: PetscFunctionReturn(PETSC_SUCCESS);
865: }
867: /*@
868: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
870: Input Parameters:
871: + dm - The DM
872: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
874: Output Parameter:
875: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
877: Level: developer
879: Note:
880: This lexicographically sorts by (depth, cellType)
882: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
883: @*/
884: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
885: {
886: MPI_Comm comm;
887: PetscMPIInt rank, size;
888: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
889: PetscSFNode *pointDepths, *remoteDepths;
890: PetscInt *ilocal;
891: PetscInt *depthRecv, *depthShift, *depthIdx;
892: PetscInt *ctRecv, *ctShift, *ctIdx;
893: const PetscSFNode *iremote;
895: PetscFunctionBegin;
897: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
898: PetscCallMPI(MPI_Comm_rank(comm, &rank));
899: PetscCallMPI(MPI_Comm_size(comm, &size));
900: PetscCall(DMPlexGetDepth(dm, &ldepth));
901: PetscCall(DMGetDimension(dm, &dim));
902: PetscCallMPI(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
903: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
904: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
906: /* Before building the migration SF we need to know the new stratum offsets */
907: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
908: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
909: for (d = 0; d < depth + 1; ++d) {
910: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
911: for (p = pStart; p < pEnd; ++p) {
912: DMPolytopeType ct;
914: PetscCall(DMPlexGetCellType(dm, p, &ct));
915: pointDepths[p].index = d;
916: pointDepths[p].rank = ct;
917: }
918: }
919: for (p = 0; p < nleaves; ++p) {
920: remoteDepths[p].index = -1;
921: remoteDepths[p].rank = -1;
922: }
923: PetscCall(PetscSFBcastBegin(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
924: PetscCall(PetscSFBcastEnd(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
925: /* Count received points in each stratum and compute the internal strata shift */
926: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
927: for (p = 0; p < nleaves; ++p) {
928: if (remoteDepths[p].rank < 0) {
929: ++depthRecv[remoteDepths[p].index];
930: } else {
931: ++ctRecv[remoteDepths[p].rank];
932: }
933: }
934: {
935: PetscInt depths[4], dims[4], shift = 0, i, c;
937: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
938: Consider DM_POLYTOPE_FV_GHOST, DM_POLYTOPE_INTERIOR_GHOST and DM_POLYTOPE_UNKNOWN_CELL as cells
939: */
940: depths[0] = depth;
941: depths[1] = 0;
942: depths[2] = depth - 1;
943: depths[3] = 1;
944: dims[0] = dim;
945: dims[1] = 0;
946: dims[2] = dim - 1;
947: dims[3] = 1;
948: for (i = 0; i <= depth; ++i) {
949: const PetscInt dep = depths[i];
950: const PetscInt dim = dims[i];
952: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
953: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST || c == DM_POLYTOPE_UNKNOWN_CELL))) continue;
954: ctShift[c] = shift;
955: shift += ctRecv[c];
956: }
957: depthShift[dep] = shift;
958: shift += depthRecv[dep];
959: }
960: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
961: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
963: if ((ctDim < 0 || ctDim > dim) && c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST && c != DM_POLYTOPE_UNKNOWN_CELL) {
964: ctShift[c] = shift;
965: shift += ctRecv[c];
966: }
967: }
968: }
969: /* Derive a new local permutation based on stratified indices */
970: PetscCall(PetscMalloc1(nleaves, &ilocal));
971: for (p = 0; p < nleaves; ++p) {
972: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
974: ilocal[p] = ctShift[ct] + ctIdx[ct];
975: ++ctIdx[ct];
976: }
977: PetscCall(PetscSFCreate(comm, migrationSF));
978: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
979: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
980: PetscCall(PetscFree2(pointDepths, remoteDepths));
981: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
982: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
983: PetscFunctionReturn(PETSC_SUCCESS);
984: }
986: /*@
987: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
989: Collective
991: Input Parameters:
992: + dm - The `DMPLEX` object
993: . pointSF - The `PetscSF` describing the communication pattern
994: . originalSection - The `PetscSection` for existing data layout
995: - originalVec - The existing data in a local vector
997: Output Parameters:
998: + newSection - The `PetscSF` describing the new data layout
999: - newVec - The new data in a local vector
1001: Level: developer
1003: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`, `PetscSectionMigrateData()`
1004: @*/
1005: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
1006: {
1007: PetscSF fieldSF;
1008: PetscInt *remoteOffsets, fieldSize;
1009: PetscScalar *originalValues, *newValues;
1011: PetscFunctionBegin;
1012: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1013: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1015: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1016: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
1017: PetscCall(VecSetType(newVec, dm->vectype));
1019: PetscCall(VecGetArray(originalVec, &originalValues));
1020: PetscCall(VecGetArray(newVec, &newValues));
1021: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1022: PetscCall(PetscFree(remoteOffsets));
1023: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1024: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1025: PetscCall(PetscSFDestroy(&fieldSF));
1026: PetscCall(VecRestoreArray(newVec, &newValues));
1027: PetscCall(VecRestoreArray(originalVec, &originalValues));
1028: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1029: PetscFunctionReturn(PETSC_SUCCESS);
1030: }
1032: /*@
1033: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1035: Collective
1037: Input Parameters:
1038: + dm - The `DMPLEX` object
1039: . pointSF - The `PetscSF` describing the communication pattern
1040: . originalSection - The `PetscSection` for existing data layout
1041: - originalIS - The existing data
1043: Output Parameters:
1044: + newSection - The `PetscSF` describing the new data layout
1045: - newIS - The new data
1047: Level: developer
1049: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`, `PetscSectionMigrateData()`
1050: @*/
1051: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1052: {
1053: PetscInt *newValues, fieldSize;
1054: const PetscInt *originalValues;
1056: PetscFunctionBegin;
1057: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1058: PetscCall(ISGetIndices(originalIS, &originalValues));
1059: PetscCall(PetscSectionMigrateData(pointSF, MPIU_INT, originalSection, originalValues, newSection, (void **)&newValues, NULL));
1060: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1062: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1063: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1064: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1065: PetscFunctionReturn(PETSC_SUCCESS);
1066: }
1068: /*@
1069: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1071: Collective
1073: Input Parameters:
1074: + dm - The `DMPLEX` object
1075: . pointSF - The `PetscSF` describing the communication pattern
1076: . originalSection - The `PetscSection` for existing data layout
1077: . datatype - The type of data
1078: - originalData - The existing data
1080: Output Parameters:
1081: + newSection - The `PetscSection` describing the new data layout
1082: - newData - The new data
1084: Level: developer
1086: Note:
1087: This is simply a wrapper around `PetscSectionMigrateData()`, but includes DM-specific logging.
1089: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `PetscSectionMigrateData()`
1090: @*/
1091: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1092: {
1093: PetscFunctionBegin;
1094: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1095: PetscCall(PetscSectionMigrateData(pointSF, datatype, originalSection, originalData, newSection, newData, NULL));
1096: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1097: PetscFunctionReturn(PETSC_SUCCESS);
1098: }
1100: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1101: {
1102: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1103: MPI_Comm comm;
1104: PetscSF coneSF;
1105: PetscSection originalConeSection, newConeSection;
1106: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1107: PetscBool flg;
1109: PetscFunctionBegin;
1112: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1113: /* Distribute cone section */
1114: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1115: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1116: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1117: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1118: PetscCall(DMSetUp(dmParallel));
1119: /* Communicate and renumber cones */
1120: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1121: PetscCall(PetscFree(remoteOffsets));
1122: PetscCall(DMPlexGetCones(dm, &cones));
1123: if (original) {
1124: PetscInt numCones;
1126: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1127: PetscCall(PetscMalloc1(numCones, &globCones));
1128: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1129: } else {
1130: globCones = cones;
1131: }
1132: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1133: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1134: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1135: if (original) PetscCall(PetscFree(globCones));
1136: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1137: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1138: if (PetscDefined(USE_DEBUG)) {
1139: PetscInt p;
1140: PetscBool valid = PETSC_TRUE;
1141: for (p = 0; p < newConesSize; ++p) {
1142: if (newCones[p] < 0) {
1143: valid = PETSC_FALSE;
1144: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1145: }
1146: }
1147: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1148: }
1149: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1150: if (flg) {
1151: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1152: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1153: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1154: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1155: PetscCall(PetscSFView(coneSF, NULL));
1156: }
1157: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1158: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1159: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1160: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1161: PetscCall(PetscSFDestroy(&coneSF));
1162: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1163: /* Create supports and stratify DMPlex */
1164: {
1165: PetscInt pStart, pEnd;
1167: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1168: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1169: }
1170: PetscCall(DMPlexSymmetrize(dmParallel));
1171: PetscCall(DMPlexStratify(dmParallel));
1172: {
1173: PetscBool useCone, useClosure, useAnchors;
1175: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1176: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1177: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1178: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1179: }
1180: PetscFunctionReturn(PETSC_SUCCESS);
1181: }
1183: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1184: {
1185: MPI_Comm comm;
1186: DM cdm, cdmParallel;
1187: PetscSection originalCoordSection, newCoordSection;
1188: Vec originalCoordinates, newCoordinates;
1189: PetscInt bs;
1190: PetscBool sparse;
1191: const char *name;
1192: const PetscReal *maxCell, *Lstart, *L;
1194: PetscFunctionBegin;
1198: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1199: PetscCall(DMGetCoordinateDM(dm, &cdm));
1200: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1201: PetscCall(DMCopyDisc(cdm, cdmParallel));
1202: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1203: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1204: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1205: if (originalCoordinates) {
1206: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1207: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1208: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1210: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1211: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1212: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1213: PetscCall(VecSetBlockSize(newCoordinates, bs));
1214: PetscCall(VecDestroy(&newCoordinates));
1215: }
1217: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1218: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1219: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1220: PetscCall(DMGetSparseLocalize(dm, &sparse));
1221: PetscCall(DMSetSparseLocalize(dmParallel, sparse));
1222: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1223: if (cdm) {
1224: PetscCall(DMClone(dmParallel, &cdmParallel));
1225: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1226: PetscCall(DMCopyDisc(cdm, cdmParallel));
1227: PetscCall(DMDestroy(&cdmParallel));
1228: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1229: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1230: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1231: if (originalCoordinates) {
1232: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1233: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1234: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1236: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1237: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1238: PetscCall(VecSetBlockSize(newCoordinates, bs));
1239: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1240: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1241: PetscCall(VecDestroy(&newCoordinates));
1242: }
1243: PetscCall(PetscSectionDestroy(&newCoordSection));
1244: }
1245: PetscFunctionReturn(PETSC_SUCCESS);
1246: }
1248: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1249: {
1250: DM_Plex *mesh = (DM_Plex *)dm->data;
1251: MPI_Comm comm;
1252: DMLabel depthLabel;
1253: PetscMPIInt rank;
1254: PetscInt depth, d, numLabels, numLocalLabels, l;
1255: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1256: PetscObjectState depthState = -1;
1258: PetscFunctionBegin;
1262: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1263: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1264: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1266: /* If the user has changed the depth label, communicate it instead */
1267: PetscCall(DMPlexGetDepth(dm, &depth));
1268: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1269: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1270: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1271: PetscCallMPI(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPI_C_BOOL, MPI_LOR, comm));
1272: if (sendDepth) {
1273: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1274: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1275: }
1276: /* Everyone must have either the same number of labels, or none */
1277: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1278: numLabels = numLocalLabels;
1279: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1280: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1281: for (l = 0; l < numLabels; ++l) {
1282: DMLabel label = NULL, labelNew = NULL;
1283: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1284: const char *name = NULL;
1286: if (hasLabels) {
1287: PetscCall(DMGetLabelByNum(dm, l, &label));
1288: /* Skip "depth" because it is recreated */
1289: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1290: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1291: } else {
1292: isDepth = PETSC_FALSE;
1293: }
1294: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPI_C_BOOL, 0, comm));
1295: if (isDepth && !sendDepth) continue;
1296: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1297: if (isDepth) {
1298: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1299: PetscInt gdepth;
1301: PetscCallMPI(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1302: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1303: for (d = 0; d <= gdepth; ++d) {
1304: PetscBool has;
1306: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1307: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1308: }
1309: }
1310: PetscCall(DMAddLabel(dmParallel, labelNew));
1311: /* Put the output flag in the new label */
1312: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1313: PetscCallMPI(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPI_C_BOOL, MPI_LAND, comm));
1314: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1315: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1316: PetscCall(DMLabelDestroy(&labelNew));
1317: }
1318: {
1319: DMLabel ctLabel;
1321: // Reset label for fast lookup
1322: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1323: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1324: }
1325: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1326: PetscFunctionReturn(PETSC_SUCCESS);
1327: }
1329: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1330: {
1331: DM_Plex *mesh = (DM_Plex *)dm->data;
1332: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1333: MPI_Comm comm;
1334: DM refTree;
1335: PetscSection origParentSection, newParentSection;
1336: PetscInt *origParents, *origChildIDs;
1337: PetscBool flg;
1339: PetscFunctionBegin;
1342: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1344: /* Set up tree */
1345: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1346: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1347: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1348: if (origParentSection) {
1349: PetscInt pStart, pEnd;
1350: PetscInt *newParents, *newChildIDs, *globParents;
1351: PetscInt *remoteOffsetsParents, newParentSize;
1352: PetscSF parentSF;
1354: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1355: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1356: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1357: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1358: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1359: PetscCall(PetscFree(remoteOffsetsParents));
1360: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1361: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1362: if (original) {
1363: PetscInt numParents;
1365: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1366: PetscCall(PetscMalloc1(numParents, &globParents));
1367: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1368: } else {
1369: globParents = origParents;
1370: }
1371: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1372: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1373: if (original) PetscCall(PetscFree(globParents));
1374: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1375: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1376: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1377: if (PetscDefined(USE_DEBUG)) {
1378: PetscInt p;
1379: PetscBool valid = PETSC_TRUE;
1380: for (p = 0; p < newParentSize; ++p) {
1381: if (newParents[p] < 0) {
1382: valid = PETSC_FALSE;
1383: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1384: }
1385: }
1386: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1387: }
1388: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1389: if (flg) {
1390: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1391: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1392: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1393: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1394: PetscCall(PetscSFView(parentSF, NULL));
1395: }
1396: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1397: PetscCall(PetscSectionDestroy(&newParentSection));
1398: PetscCall(PetscFree2(newParents, newChildIDs));
1399: PetscCall(PetscSFDestroy(&parentSF));
1400: }
1401: pmesh->useAnchors = mesh->useAnchors;
1402: PetscFunctionReturn(PETSC_SUCCESS);
1403: }
1405: /*@
1406: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1408: Input Parameters:
1409: + dm - The `DMPLEX` object
1410: - flg - Balance the partition?
1412: Level: intermediate
1414: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1415: @*/
1416: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1417: {
1418: DM_Plex *mesh = (DM_Plex *)dm->data;
1420: PetscFunctionBegin;
1421: mesh->partitionBalance = flg;
1422: PetscFunctionReturn(PETSC_SUCCESS);
1423: }
1425: /*@
1426: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1428: Input Parameter:
1429: . dm - The `DMPLEX` object
1431: Output Parameter:
1432: . flg - Balance the partition?
1434: Level: intermediate
1436: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1437: @*/
1438: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1439: {
1440: DM_Plex *mesh = (DM_Plex *)dm->data;
1442: PetscFunctionBegin;
1444: PetscAssertPointer(flg, 2);
1445: *flg = mesh->partitionBalance;
1446: PetscFunctionReturn(PETSC_SUCCESS);
1447: }
1449: typedef struct {
1450: PetscInt vote, rank, index;
1451: } Petsc3Int;
1453: /* MaxLoc, but carry a third piece of information around */
1454: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1455: {
1456: Petsc3Int *a = (Petsc3Int *)inout_;
1457: Petsc3Int *b = (Petsc3Int *)in_;
1458: PetscInt i, len = *len_;
1459: for (i = 0; i < len; i++) {
1460: if (a[i].vote < b[i].vote) {
1461: a[i].vote = b[i].vote;
1462: a[i].rank = b[i].rank;
1463: a[i].index = b[i].index;
1464: } else if (a[i].vote <= b[i].vote) {
1465: if (a[i].rank >= b[i].rank) {
1466: a[i].rank = b[i].rank;
1467: a[i].index = b[i].index;
1468: }
1469: }
1470: }
1471: }
1473: /*@
1474: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1476: Input Parameters:
1477: + dm - The source `DMPLEX` object
1478: . migrationSF - The star forest that describes the parallel point remapping
1479: - ownership - Flag causing a vote to determine point ownership
1481: Output Parameter:
1482: . pointSF - The star forest describing the point overlap in the remapped `DM`
1484: Level: developer
1486: Note:
1487: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1489: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1490: @*/
1491: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1492: {
1493: PetscMPIInt rank, size;
1494: PetscInt p, nroots, nleaves, idx, npointLeaves;
1495: PetscInt *pointLocal;
1496: const PetscInt *leaves;
1497: const PetscSFNode *roots;
1498: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1499: Vec shifts;
1500: const PetscInt numShifts = 13759;
1501: const PetscScalar *shift = NULL;
1502: const PetscBool shiftDebug = PETSC_FALSE;
1503: PetscBool balance;
1505: PetscFunctionBegin;
1507: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1508: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1509: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1510: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1511: PetscCall(PetscSFSetFromOptions(*pointSF));
1512: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1514: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1515: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1516: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1517: if (ownership) {
1518: MPI_Op op;
1519: MPI_Datatype datatype;
1520: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1522: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1523: if (balance) {
1524: PetscRandom r;
1526: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1527: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1528: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1529: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1530: PetscCall(VecSetType(shifts, VECSTANDARD));
1531: PetscCall(VecSetRandom(shifts, r));
1532: PetscCall(PetscRandomDestroy(&r));
1533: PetscCall(VecGetArrayRead(shifts, &shift));
1534: }
1536: PetscCall(PetscMalloc1(nroots, &rootVote));
1537: PetscCall(PetscMalloc1(nleaves, &leafVote));
1538: /* Point ownership vote: Process with highest rank owns shared points */
1539: for (p = 0; p < nleaves; ++p) {
1540: if (shiftDebug) {
1541: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1542: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1543: }
1544: /* Either put in a bid or we know we own it */
1545: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1546: leafVote[p].rank = rank;
1547: leafVote[p].index = p;
1548: }
1549: for (p = 0; p < nroots; p++) {
1550: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1551: rootVote[p].vote = -3;
1552: rootVote[p].rank = -3;
1553: rootVote[p].index = -3;
1554: }
1555: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1556: PetscCallMPI(MPI_Type_commit(&datatype));
1557: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1558: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1559: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1560: PetscCallMPI(MPI_Op_free(&op));
1561: PetscCallMPI(MPI_Type_free(&datatype));
1562: for (p = 0; p < nroots; p++) {
1563: rootNodes[p].rank = rootVote[p].rank;
1564: rootNodes[p].index = rootVote[p].index;
1565: }
1566: PetscCall(PetscFree(leafVote));
1567: PetscCall(PetscFree(rootVote));
1568: } else {
1569: for (p = 0; p < nroots; p++) {
1570: rootNodes[p].index = -1;
1571: rootNodes[p].rank = rank;
1572: }
1573: for (p = 0; p < nleaves; p++) {
1574: /* Write new local id into old location */
1575: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1576: }
1577: }
1578: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1579: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1581: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1582: if (leafNodes[p].rank != rank) npointLeaves++;
1583: }
1584: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1585: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1586: for (idx = 0, p = 0; p < nleaves; p++) {
1587: if (leafNodes[p].rank != rank) {
1588: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1589: pointLocal[idx] = p;
1590: pointRemote[idx] = leafNodes[p];
1591: idx++;
1592: }
1593: }
1594: if (shift) {
1595: PetscCall(VecRestoreArrayRead(shifts, &shift));
1596: PetscCall(VecDestroy(&shifts));
1597: }
1598: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1599: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1600: PetscCall(PetscFree2(rootNodes, leafNodes));
1601: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1602: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1603: PetscFunctionReturn(PETSC_SUCCESS);
1604: }
1606: /*@
1607: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1609: Collective
1611: Input Parameters:
1612: + dm - The source `DMPLEX` object
1613: - sf - The star forest communication context describing the migration pattern
1615: Output Parameter:
1616: . targetDM - The target `DMPLEX` object
1618: Level: intermediate
1620: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1621: @*/
1622: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1623: {
1624: MPI_Comm comm;
1625: PetscInt dim, cdim, nroots;
1626: PetscSF sfPoint;
1627: ISLocalToGlobalMapping ltogMigration;
1628: ISLocalToGlobalMapping ltogOriginal = NULL;
1630: PetscFunctionBegin;
1632: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1633: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1634: PetscCall(DMGetDimension(dm, &dim));
1635: PetscCall(DMSetDimension(targetDM, dim));
1636: PetscCall(DMGetCoordinateDim(dm, &cdim));
1637: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1639: /* Check for a one-to-all distribution pattern */
1640: PetscCall(DMGetPointSF(dm, &sfPoint));
1641: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1642: if (nroots >= 0) {
1643: IS isOriginal;
1644: PetscInt n, size, nleaves;
1645: PetscInt *numbering_orig, *numbering_new;
1647: /* Get the original point numbering */
1648: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1649: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1650: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1651: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1652: /* Convert to positive global numbers */
1653: for (n = 0; n < size; n++) {
1654: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1655: }
1656: /* Derive the new local-to-global mapping from the old one */
1657: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1658: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1659: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1660: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1661: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1662: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1663: PetscCall(ISDestroy(&isOriginal));
1664: } else {
1665: /* One-to-all distribution pattern: We can derive LToG from SF */
1666: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1667: }
1668: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1669: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1670: /* Migrate DM data to target DM */
1671: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1672: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1673: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1674: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1675: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1676: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1677: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1678: PetscFunctionReturn(PETSC_SUCCESS);
1679: }
1681: /*@
1682: DMPlexRemapMigrationSF - Rewrite the distribution SF to account for overlap
1684: Collective
1686: Input Parameters:
1687: + sfOverlap - The `PetscSF` object just for the overlap
1688: - sfMigration - The original distribution `PetscSF` object
1690: Output Parameters:
1691: . sfMigrationNew - A rewritten `PetscSF` object that incorporates the overlap
1693: Level: developer
1695: .seealso: `DMPLEX`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexGetOverlap()`
1696: @*/
1697: PetscErrorCode DMPlexRemapMigrationSF(PetscSF sfOverlap, PetscSF sfMigration, PetscSF *sfMigrationNew)
1698: {
1699: PetscSFNode *newRemote, *permRemote = NULL;
1700: const PetscInt *oldLeaves;
1701: const PetscSFNode *oldRemote;
1702: PetscInt nroots, nleaves, noldleaves;
1704: PetscFunctionBegin;
1705: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1706: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1707: PetscCall(PetscMalloc1(nleaves, &newRemote));
1708: /* oldRemote: original root point mapping to original leaf point
1709: newRemote: original leaf point mapping to overlapped leaf point */
1710: if (oldLeaves) {
1711: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1712: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1713: for (PetscInt l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1714: oldRemote = permRemote;
1715: }
1716: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1717: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1718: PetscCall(PetscFree(permRemote));
1719: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)sfOverlap), sfMigrationNew));
1720: PetscCall(PetscSFSetGraph(*sfMigrationNew, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1721: PetscFunctionReturn(PETSC_SUCCESS);
1722: }
1724: /* For DG-like methods, the code below is equivalent (but faster) than calling
1725: DMPlexCreateClosureIndex(dm,section) */
1726: static PetscErrorCode DMPlexCreateClosureIndex_CELL(DM dm, PetscSection section)
1727: {
1728: PetscSection clSection;
1729: IS clPoints;
1730: PetscInt pStart, pEnd, point;
1731: PetscInt *closure, pos = 0;
1733: PetscFunctionBegin;
1734: if (!section) PetscCall(DMGetLocalSection(dm, §ion));
1735: PetscCall(DMPlexGetHeightStratum(dm, 0, &pStart, &pEnd));
1736: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &clSection));
1737: PetscCall(PetscSectionSetChart(clSection, pStart, pEnd));
1738: PetscCall(PetscMalloc1((2 * (pEnd - pStart)), &closure));
1739: for (point = pStart; point < pEnd; point++) {
1740: PetscCall(PetscSectionSetDof(clSection, point, 2));
1741: closure[pos++] = point; /* point */
1742: closure[pos++] = 0; /* orientation */
1743: }
1744: PetscCall(PetscSectionSetUp(clSection));
1745: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 2 * (pEnd - pStart), closure, PETSC_OWN_POINTER, &clPoints));
1746: PetscCall(PetscSectionSetClosureIndex(section, (PetscObject)dm, clSection, clPoints));
1747: PetscCall(PetscSectionDestroy(&clSection));
1748: PetscCall(ISDestroy(&clPoints));
1749: PetscFunctionReturn(PETSC_SUCCESS);
1750: }
1752: static PetscErrorCode DMPlexDistribute_Multistage(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1753: {
1754: MPI_Comm comm = PetscObjectComm((PetscObject)dm);
1755: PetscPartitioner part;
1756: PetscBool balance, printHeader;
1757: PetscInt nl = 0;
1759: PetscFunctionBegin;
1760: if (sf) *sf = NULL;
1761: *dmParallel = NULL;
1763: PetscCall(DMPlexGetPartitioner(dm, &part));
1764: printHeader = part->printHeader;
1765: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1766: PetscCall(PetscPartitionerSetUp(part));
1767: PetscCall(PetscLogEventBegin(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1768: PetscCall(PetscPartitionerMultistageGetStages_Multistage(part, &nl, NULL));
1769: for (PetscInt l = 0; l < nl; l++) {
1770: PetscInt ovl = (l < nl - 1) ? 0 : overlap;
1771: PetscSF sfDist;
1772: DM dmDist;
1774: PetscCall(DMPlexSetPartitionBalance(dm, balance));
1775: PetscCall(DMViewFromOptions(dm, (PetscObject)part, "-petscpartitioner_multistage_dm_view"));
1776: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, l, (PetscObject)dm));
1777: PetscCall(DMPlexSetPartitioner(dm, part));
1778: PetscCall(DMPlexDistribute(dm, ovl, &sfDist, &dmDist));
1779: PetscCheck(dmDist, comm, PETSC_ERR_PLIB, "No distributed DM generated (stage %" PetscInt_FMT ")", l);
1780: PetscCheck(sfDist, comm, PETSC_ERR_PLIB, "No SF generated (stage %" PetscInt_FMT ")", l);
1781: part->printHeader = PETSC_FALSE;
1783: /* Propagate cell weights to the next level (if any, and if not the final dm) */
1784: if (part->usevwgt && dm->localSection && l != nl - 1) {
1785: PetscSection oldSection, newSection;
1787: PetscCall(DMGetLocalSection(dm, &oldSection));
1788: PetscCall(DMGetLocalSection(dmDist, &newSection));
1789: PetscCall(PetscSFDistributeSection(sfDist, oldSection, NULL, newSection));
1790: PetscCall(DMPlexCreateClosureIndex_CELL(dmDist, newSection));
1791: }
1792: if (!sf) PetscCall(PetscSFDestroy(&sfDist));
1793: if (l > 0) PetscCall(DMDestroy(&dm));
1795: if (sf && *sf) {
1796: PetscSF sfA = *sf, sfB = sfDist;
1797: PetscCall(PetscSFCompose(sfA, sfB, &sfDist));
1798: PetscCall(PetscSFDestroy(&sfA));
1799: PetscCall(PetscSFDestroy(&sfB));
1800: }
1802: if (sf) *sf = sfDist;
1803: dm = *dmParallel = dmDist;
1804: }
1805: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, 0, NULL)); /* reset */
1806: PetscCall(PetscLogEventEnd(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1807: part->printHeader = printHeader;
1808: PetscFunctionReturn(PETSC_SUCCESS);
1809: }
1811: /*@
1812: DMPlexDistribute - Distributes the mesh and any associated sections.
1814: Collective
1816: Input Parameters:
1817: + dm - The original `DMPLEX` object
1818: - overlap - The overlap of partitions, 0 is the default
1820: Output Parameters:
1821: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1822: - dmParallel - The distributed `DMPLEX` object
1824: Level: intermediate
1826: Note:
1827: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1829: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1830: representation on the mesh.
1832: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1833: @*/
1834: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmParallel)
1835: {
1836: MPI_Comm comm;
1837: PetscPartitioner partitioner;
1838: IS cellPart;
1839: PetscSection cellPartSection;
1840: DM dmCoord;
1841: DMLabel lblPartition, lblMigration;
1842: PetscSF sfMigration, sfStratified, sfPoint;
1843: PetscBool flg, balance, isms;
1844: PetscMPIInt rank, size;
1846: PetscFunctionBegin;
1849: if (sf) PetscAssertPointer(sf, 3);
1850: PetscAssertPointer(dmParallel, 4);
1852: if (sf) *sf = NULL;
1853: *dmParallel = NULL;
1854: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1855: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1856: PetscCallMPI(MPI_Comm_size(comm, &size));
1857: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1859: /* Handle multistage partitioner */
1860: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1861: PetscCall(PetscObjectTypeCompare((PetscObject)partitioner, PETSCPARTITIONERMULTISTAGE, &isms));
1862: if (isms) {
1863: PetscObject stagedm;
1865: PetscCall(PetscPartitionerMultistageGetStage_Multistage(partitioner, NULL, &stagedm));
1866: if (!stagedm) { /* No stage dm present, start the multistage algorithm */
1867: PetscCall(DMPlexDistribute_Multistage(dm, overlap, sf, dmParallel));
1868: PetscFunctionReturn(PETSC_SUCCESS);
1869: }
1870: }
1871: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1872: /* Create cell partition */
1873: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1874: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1875: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1876: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1877: {
1878: /* Convert partition to DMLabel */
1879: IS is;
1880: PetscHSetI ht;
1881: const PetscInt *points;
1882: PetscInt *iranks;
1883: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1885: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1886: /* Preallocate strata */
1887: PetscCall(PetscHSetICreate(&ht));
1888: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1889: for (proc = pStart; proc < pEnd; proc++) {
1890: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1891: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1892: }
1893: PetscCall(PetscHSetIGetSize(ht, &nranks));
1894: PetscCall(PetscMalloc1(nranks, &iranks));
1895: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1896: PetscCall(PetscHSetIDestroy(&ht));
1897: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1898: PetscCall(PetscFree(iranks));
1899: /* Inline DMPlexPartitionLabelClosure() */
1900: PetscCall(ISGetIndices(cellPart, &points));
1901: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1902: for (proc = pStart; proc < pEnd; proc++) {
1903: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1904: if (!npoints) continue;
1905: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1906: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1907: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1908: PetscCall(ISDestroy(&is));
1909: }
1910: PetscCall(ISRestoreIndices(cellPart, &points));
1911: }
1912: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1914: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1915: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1916: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, PETSC_TRUE, &sfMigration));
1917: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1918: PetscCall(PetscSFDestroy(&sfMigration));
1919: sfMigration = sfStratified;
1920: PetscCall(PetscSFSetUp(sfMigration));
1921: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1922: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1923: if (flg) {
1924: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1925: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1926: }
1928: /* Create non-overlapping parallel DM and migrate internal data */
1929: PetscCall(DMPlexCreate(comm, dmParallel));
1930: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1931: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1933: /* Build the point SF without overlap */
1934: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1935: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1936: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1937: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1938: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1939: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1940: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1941: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1943: if (overlap > 0) {
1944: DM dmOverlap;
1945: PetscSF sfOverlap, sfOverlapPoint;
1947: /* Add the partition overlap to the distributed DM */
1948: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1949: PetscCall(DMDestroy(dmParallel));
1950: *dmParallel = dmOverlap;
1951: if (flg) {
1952: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1953: PetscCall(PetscSFView(sfOverlap, NULL));
1954: }
1956: /* Re-map the migration SF to establish the full migration pattern */
1957: PetscCall(DMPlexRemapMigrationSF(sfOverlap, sfMigration, &sfOverlapPoint));
1958: PetscCall(PetscSFDestroy(&sfOverlap));
1959: PetscCall(PetscSFDestroy(&sfMigration));
1960: sfMigration = sfOverlapPoint;
1961: }
1962: /* Cleanup Partition */
1963: PetscCall(DMLabelDestroy(&lblPartition));
1964: PetscCall(DMLabelDestroy(&lblMigration));
1965: PetscCall(PetscSectionDestroy(&cellPartSection));
1966: PetscCall(ISDestroy(&cellPart));
1967: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1968: // Create sfNatural, need discretization information
1969: PetscCall(DMCopyDisc(dm, *dmParallel));
1970: if (dm->localSection) {
1971: PetscSection psection;
1972: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &psection));
1973: PetscCall(PetscSFDistributeSection(sfMigration, dm->localSection, NULL, psection));
1974: PetscCall(DMSetLocalSection(*dmParallel, psection));
1975: PetscCall(PetscSectionDestroy(&psection));
1976: }
1977: if (dm->useNatural) {
1978: PetscSection section;
1980: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1981: PetscCall(DMGetLocalSection(dm, §ion));
1983: /* If DM has useNatural = PETSC_TRUE, but no sfNatural is set, the DM's Section is considered natural */
1984: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1985: /* Compose with a previous sfNatural if present */
1986: if (dm->sfNatural) {
1987: PetscCall(DMPlexMigrateGlobalToNaturalSF(dm, *dmParallel, dm->sfNatural, sfMigration, &(*dmParallel)->sfNatural));
1988: } else {
1989: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1990: }
1991: /* Compose with a previous sfMigration if present */
1992: if (dm->sfMigration) {
1993: PetscSF naturalPointSF;
1995: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
1996: PetscCall(PetscSFDestroy(&sfMigration));
1997: sfMigration = naturalPointSF;
1998: }
1999: }
2000: /* Cleanup */
2001: if (sf) {
2002: *sf = sfMigration;
2003: } else PetscCall(PetscSFDestroy(&sfMigration));
2004: PetscCall(PetscSFDestroy(&sfPoint));
2005: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
2006: PetscFunctionReturn(PETSC_SUCCESS);
2007: }
2009: PetscErrorCode DMPlexDistributeOverlap_Internal(DM dm, PetscInt overlap, MPI_Comm newcomm, const char *ovlboundary, PetscSF *sf, DM *dmOverlap)
2010: {
2011: DM_Plex *mesh = (DM_Plex *)dm->data;
2012: MPI_Comm comm;
2013: PetscMPIInt size, rank;
2014: PetscSection rootSection, leafSection;
2015: IS rootrank, leafrank;
2016: DM dmCoord;
2017: DMLabel lblOverlap;
2018: PetscSF sfOverlap, sfStratified, sfPoint;
2019: PetscBool clear_ovlboundary;
2021: PetscFunctionBegin;
2022: if (sf) *sf = NULL;
2023: *dmOverlap = NULL;
2024: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2025: PetscCallMPI(MPI_Comm_size(comm, &size));
2026: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2027: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2028: {
2029: // We need to get options for the _already_distributed mesh, so it must be done here
2030: PetscInt overlap;
2031: const char *prefix;
2032: char oldPrefix[PETSC_MAX_PATH_LEN];
2034: oldPrefix[0] = '\0';
2035: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
2036: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
2037: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
2038: PetscCall(DMPlexGetOverlap(dm, &overlap));
2039: PetscObjectOptionsBegin((PetscObject)dm);
2040: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
2041: PetscOptionsEnd();
2042: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
2043: }
2044: if (ovlboundary) {
2045: PetscBool flg;
2046: PetscCall(DMHasLabel(dm, ovlboundary, &flg));
2047: if (!flg) {
2048: DMLabel label;
2050: PetscCall(DMCreateLabel(dm, ovlboundary));
2051: PetscCall(DMGetLabel(dm, ovlboundary, &label));
2052: PetscCall(DMPlexMarkBoundaryFaces(dm, 1, label));
2053: clear_ovlboundary = PETSC_TRUE;
2054: }
2055: }
2056: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2057: /* Compute point overlap with neighbouring processes on the distributed DM */
2058: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
2059: PetscCall(PetscSectionCreate(newcomm, &rootSection));
2060: PetscCall(PetscSectionCreate(newcomm, &leafSection));
2061: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
2062: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2063: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2064: /* Convert overlap label to stratified migration SF */
2065: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, PETSC_FALSE, &sfOverlap));
2066: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
2067: PetscCall(PetscSFDestroy(&sfOverlap));
2068: sfOverlap = sfStratified;
2069: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
2070: PetscCall(PetscSFSetFromOptions(sfOverlap));
2072: PetscCall(PetscSectionDestroy(&rootSection));
2073: PetscCall(PetscSectionDestroy(&leafSection));
2074: PetscCall(ISDestroy(&rootrank));
2075: PetscCall(ISDestroy(&leafrank));
2076: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
2078: /* Build the overlapping DM */
2079: PetscCall(DMPlexCreate(newcomm, dmOverlap));
2080: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
2081: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
2082: /* Store the overlap in the new DM */
2083: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
2084: /* Build the new point SF */
2085: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
2086: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
2087: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
2088: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2089: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
2090: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2091: PetscCall(PetscSFDestroy(&sfPoint));
2092: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmOverlap));
2093: /* TODO: labels stored inside the DS for regions should be handled here */
2094: PetscCall(DMCopyDisc(dm, *dmOverlap));
2095: /* Cleanup overlap partition */
2096: PetscCall(DMLabelDestroy(&lblOverlap));
2097: if (sf) *sf = sfOverlap;
2098: else PetscCall(PetscSFDestroy(&sfOverlap));
2099: if (ovlboundary) {
2100: DMLabel label;
2101: PetscBool flg;
2103: if (clear_ovlboundary) PetscCall(DMRemoveLabel(dm, ovlboundary, NULL));
2104: PetscCall(DMHasLabel(*dmOverlap, ovlboundary, &flg));
2105: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing %s label", ovlboundary);
2106: PetscCall(DMGetLabel(*dmOverlap, ovlboundary, &label));
2107: PetscCall(DMPlexMarkBoundaryFaces_Internal(*dmOverlap, 1, 0, label, PETSC_TRUE));
2108: }
2109: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2110: PetscFunctionReturn(PETSC_SUCCESS);
2111: }
2113: /*@
2114: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
2116: Collective
2118: Input Parameters:
2119: + dm - The non-overlapping distributed `DMPLEX` object
2120: - overlap - The overlap of partitions (the same on all ranks)
2122: Output Parameters:
2123: + sf - The `PetscSF` used for point distribution, or pass `NULL` if not needed
2124: - dmOverlap - The overlapping distributed `DMPLEX` object
2126: Options Database Keys:
2127: + -dm_plex_overlap_labels name1,name2,... - List of overlap label names
2128: . -dm_plex_overlap_values int1,int2,... - List of overlap label values
2129: . -dm_plex_overlap_exclude_label label - Label used to exclude points from overlap
2130: - -dm_plex_overlap_exclude_value value - Label value used to exclude points from overlap
2132: Level: advanced
2134: Notes:
2135: If the mesh was not distributed, the return value is `NULL`.
2137: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
2138: representation on the mesh.
2140: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
2141: @*/
2142: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmOverlap)
2143: {
2144: PetscFunctionBegin;
2147: if (sf) PetscAssertPointer(sf, 3);
2148: PetscAssertPointer(dmOverlap, 4);
2149: PetscCall(DMPlexDistributeOverlap_Internal(dm, overlap, PetscObjectComm((PetscObject)dm), NULL, sf, dmOverlap));
2150: PetscFunctionReturn(PETSC_SUCCESS);
2151: }
2153: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2154: {
2155: DM_Plex *mesh = (DM_Plex *)dm->data;
2157: PetscFunctionBegin;
2158: *overlap = mesh->overlap;
2159: PetscFunctionReturn(PETSC_SUCCESS);
2160: }
2162: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2163: {
2164: DM_Plex *mesh = NULL;
2165: DM_Plex *meshSrc = NULL;
2167: PetscFunctionBegin;
2169: mesh = (DM_Plex *)dm->data;
2170: if (dmSrc) {
2172: meshSrc = (DM_Plex *)dmSrc->data;
2173: mesh->overlap = meshSrc->overlap;
2174: } else {
2175: mesh->overlap = 0;
2176: }
2177: mesh->overlap += overlap;
2178: PetscFunctionReturn(PETSC_SUCCESS);
2179: }
2181: /*@
2182: DMPlexGetOverlap - Get the width of the cell overlap
2184: Not Collective
2186: Input Parameter:
2187: . dm - The `DM`
2189: Output Parameter:
2190: . overlap - the width of the cell overlap
2192: Level: intermediate
2194: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2195: @*/
2196: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2197: {
2198: PetscFunctionBegin;
2200: PetscAssertPointer(overlap, 2);
2201: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2202: PetscFunctionReturn(PETSC_SUCCESS);
2203: }
2205: /*@
2206: DMPlexSetOverlap - Set the width of the cell overlap
2208: Logically Collective
2210: Input Parameters:
2211: + dm - The `DM`
2212: . dmSrc - The `DM` that produced this one, or `NULL`
2213: - overlap - the width of the cell overlap
2215: Level: intermediate
2217: Note:
2218: The overlap from `dmSrc` is added to `dm`
2220: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2221: @*/
2222: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2223: {
2224: PetscFunctionBegin;
2227: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2228: PetscFunctionReturn(PETSC_SUCCESS);
2229: }
2231: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2232: {
2233: DM_Plex *mesh = (DM_Plex *)dm->data;
2235: PetscFunctionBegin;
2236: mesh->distDefault = dist;
2237: PetscFunctionReturn(PETSC_SUCCESS);
2238: }
2240: /*@
2241: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2243: Logically Collective
2245: Input Parameters:
2246: + dm - The `DM`
2247: - dist - Flag for distribution
2249: Level: intermediate
2251: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2252: @*/
2253: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2254: {
2255: PetscFunctionBegin;
2258: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2259: PetscFunctionReturn(PETSC_SUCCESS);
2260: }
2262: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2263: {
2264: DM_Plex *mesh = (DM_Plex *)dm->data;
2266: PetscFunctionBegin;
2267: *dist = mesh->distDefault;
2268: PetscFunctionReturn(PETSC_SUCCESS);
2269: }
2271: /*@
2272: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2274: Not Collective
2276: Input Parameter:
2277: . dm - The `DM`
2279: Output Parameter:
2280: . dist - Flag for distribution
2282: Level: intermediate
2284: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2285: @*/
2286: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2287: {
2288: PetscFunctionBegin;
2290: PetscAssertPointer(dist, 2);
2291: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2292: PetscFunctionReturn(PETSC_SUCCESS);
2293: }
2295: /*@
2296: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2297: root process of the original's communicator.
2299: Collective
2301: Input Parameter:
2302: . dm - the original `DMPLEX` object
2304: Output Parameters:
2305: + sf - the `PetscSF` used for point distribution (optional)
2306: - gatherMesh - the gathered `DM` object, or `NULL`
2308: Level: intermediate
2310: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2311: @*/
2312: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, PeOp DM *gatherMesh)
2313: {
2314: MPI_Comm comm;
2315: PetscMPIInt size;
2316: PetscPartitioner oldPart, gatherPart;
2318: PetscFunctionBegin;
2320: PetscAssertPointer(gatherMesh, 3);
2321: *gatherMesh = NULL;
2322: if (sf) *sf = NULL;
2323: comm = PetscObjectComm((PetscObject)dm);
2324: PetscCallMPI(MPI_Comm_size(comm, &size));
2325: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2326: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2327: PetscCall(PetscObjectReference((PetscObject)oldPart));
2328: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2329: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2330: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2331: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2333: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2334: PetscCall(PetscPartitionerDestroy(&gatherPart));
2335: PetscCall(PetscPartitionerDestroy(&oldPart));
2336: PetscFunctionReturn(PETSC_SUCCESS);
2337: }
2339: /*@
2340: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2342: Collective
2344: Input Parameter:
2345: . dm - the original `DMPLEX` object
2347: Output Parameters:
2348: + sf - the `PetscSF` used for point distribution (optional)
2349: - redundantMesh - the redundant `DM` object, or `NULL`
2351: Level: intermediate
2353: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2354: @*/
2355: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, PeOp DM *redundantMesh)
2356: {
2357: MPI_Comm comm;
2358: PetscMPIInt size, rank;
2359: PetscInt pStart, pEnd, p;
2360: PetscInt numPoints = -1;
2361: PetscSF migrationSF, sfPoint, gatherSF;
2362: DM gatherDM, dmCoord;
2363: PetscSFNode *points;
2365: PetscFunctionBegin;
2367: PetscAssertPointer(redundantMesh, 3);
2368: *redundantMesh = NULL;
2369: comm = PetscObjectComm((PetscObject)dm);
2370: PetscCallMPI(MPI_Comm_size(comm, &size));
2371: if (size == 1) {
2372: PetscCall(PetscObjectReference((PetscObject)dm));
2373: *redundantMesh = dm;
2374: if (sf) *sf = NULL;
2375: PetscFunctionReturn(PETSC_SUCCESS);
2376: }
2377: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2378: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2379: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2380: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2381: numPoints = pEnd - pStart;
2382: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2383: PetscCall(PetscMalloc1(numPoints, &points));
2384: PetscCall(PetscSFCreate(comm, &migrationSF));
2385: for (p = 0; p < numPoints; p++) {
2386: points[p].index = p;
2387: points[p].rank = 0;
2388: }
2389: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2390: PetscCall(DMPlexCreate(comm, redundantMesh));
2391: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2392: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2393: /* This is to express that all point are in overlap */
2394: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_INT_MAX));
2395: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2396: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2397: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2398: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2399: PetscCall(PetscSFDestroy(&sfPoint));
2400: if (sf) {
2401: PetscSF tsf;
2403: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2404: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2405: PetscCall(PetscSFDestroy(&tsf));
2406: }
2407: PetscCall(PetscSFDestroy(&migrationSF));
2408: PetscCall(PetscSFDestroy(&gatherSF));
2409: PetscCall(DMDestroy(&gatherDM));
2410: PetscCall(DMCopyDisc(dm, *redundantMesh));
2411: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2412: PetscFunctionReturn(PETSC_SUCCESS);
2413: }
2415: /*@
2416: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2418: Collective
2420: Input Parameter:
2421: . dm - The `DM` object
2423: Output Parameter:
2424: . distributed - Flag whether the `DM` is distributed
2426: Level: intermediate
2428: Notes:
2429: This currently finds out whether at least two ranks have any DAG points.
2430: This involves `MPI_Allreduce()` with one integer.
2431: The result is currently not stashed so every call to this routine involves this global communication.
2433: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2434: @*/
2435: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2436: {
2437: PetscInt pStart, pEnd, count;
2438: MPI_Comm comm;
2439: PetscMPIInt size;
2441: PetscFunctionBegin;
2443: PetscAssertPointer(distributed, 2);
2444: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2445: PetscCallMPI(MPI_Comm_size(comm, &size));
2446: if (size == 1) {
2447: *distributed = PETSC_FALSE;
2448: PetscFunctionReturn(PETSC_SUCCESS);
2449: }
2450: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2451: count = (pEnd - pStart) > 0 ? 1 : 0;
2452: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2453: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2454: PetscFunctionReturn(PETSC_SUCCESS);
2455: }
2457: /*@
2458: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2460: Input Parameters:
2461: + dm - The `DM`
2462: - name - The name of the specific parallel distribution
2464: Level: developer
2466: Note:
2467: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2468: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2469: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2470: loads the parallel distribution stored in file under this name.
2472: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2473: @*/
2474: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2475: {
2476: DM_Plex *mesh = (DM_Plex *)dm->data;
2478: PetscFunctionBegin;
2480: if (name) PetscAssertPointer(name, 2);
2481: PetscCall(PetscFree(mesh->distributionName));
2482: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2483: PetscFunctionReturn(PETSC_SUCCESS);
2484: }
2486: /*@
2487: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2489: Input Parameter:
2490: . dm - The `DM`
2492: Output Parameter:
2493: . name - The name of the specific parallel distribution
2495: Level: developer
2497: Note:
2498: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2499: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2500: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2501: loads the parallel distribution stored in file under this name.
2503: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2504: @*/
2505: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2506: {
2507: DM_Plex *mesh = (DM_Plex *)dm->data;
2509: PetscFunctionBegin;
2511: PetscAssertPointer(name, 2);
2512: *name = mesh->distributionName;
2513: PetscFunctionReturn(PETSC_SUCCESS);
2514: }