Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
3: #include <petsc/private/partitionerimpl.h>
5: /*@C
6: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
8: Input Parameters:
9: + dm - The DM object
10: . user - The user callback, may be `NULL` (to clear the callback)
11: - ctx - context for callback evaluation, may be `NULL`
13: Level: advanced
15: Notes:
16: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
18: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
20: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
21: @*/
22: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx)
23: {
24: DM_Plex *mesh = (DM_Plex *)dm->data;
26: PetscFunctionBegin;
28: mesh->useradjacency = user;
29: mesh->useradjacencyctx = ctx;
30: PetscFunctionReturn(PETSC_SUCCESS);
31: }
33: /*@C
34: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
36: Input Parameter:
37: . dm - The `DM` object
39: Output Parameters:
40: + user - The callback
41: - ctx - context for callback evaluation
43: Level: advanced
45: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
46: @*/
47: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
48: {
49: DM_Plex *mesh = (DM_Plex *)dm->data;
51: PetscFunctionBegin;
53: if (user) *user = mesh->useradjacency;
54: if (ctx) *ctx = mesh->useradjacencyctx;
55: PetscFunctionReturn(PETSC_SUCCESS);
56: }
58: /*@
59: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
61: Input Parameters:
62: + dm - The `DM` object
63: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
65: Level: intermediate
67: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
68: @*/
69: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
70: {
71: DM_Plex *mesh = (DM_Plex *)dm->data;
73: PetscFunctionBegin;
75: mesh->useAnchors = useAnchors;
76: PetscFunctionReturn(PETSC_SUCCESS);
77: }
79: /*@
80: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
82: Input Parameter:
83: . dm - The `DM` object
85: Output Parameter:
86: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
88: Level: intermediate
90: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
91: @*/
92: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
93: {
94: DM_Plex *mesh = (DM_Plex *)dm->data;
96: PetscFunctionBegin;
98: PetscAssertPointer(useAnchors, 2);
99: *useAnchors = mesh->useAnchors;
100: PetscFunctionReturn(PETSC_SUCCESS);
101: }
103: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
104: {
105: const PetscInt *cone = NULL;
106: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
108: PetscFunctionBeginHot;
109: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
110: PetscCall(DMPlexGetCone(dm, p, &cone));
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c - 1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
117: PetscCall(DMPlexGetSupport(dm, point, &support));
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: PetscFunctionReturn(PETSC_SUCCESS);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
134: PetscFunctionBeginHot;
135: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
136: PetscCall(DMPlexGetSupport(dm, p, &support));
137: for (s = 0; s <= supportSize; ++s) {
138: const PetscInt point = !s ? p : support[s - 1];
139: const PetscInt *cone = NULL;
140: PetscInt coneSize, c, q;
142: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
143: PetscCall(DMPlexGetCone(dm, point, &cone));
144: for (c = 0; c < coneSize; ++c) {
145: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
146: if (cone[c] == adj[q]) break;
147: }
148: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
149: }
150: }
151: *adjSize = numAdj;
152: PetscFunctionReturn(PETSC_SUCCESS);
153: }
155: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
156: {
157: PetscInt *star = NULL;
158: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
160: PetscFunctionBeginHot;
161: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
162: for (s = 0; s < starSize * 2; s += 2) {
163: const PetscInt *closure = NULL;
164: PetscInt closureSize, c, q;
166: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
167: for (c = 0; c < closureSize * 2; c += 2) {
168: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
169: if (closure[c] == adj[q]) break;
170: }
171: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
172: }
173: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
174: }
175: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
176: *adjSize = numAdj;
177: PetscFunctionReturn(PETSC_SUCCESS);
178: }
180: // Returns the maximum number of adjacent points in the DMPlex
181: PetscErrorCode DMPlexGetMaxAdjacencySize_Internal(DM dm, PetscBool useAnchors, PetscInt *max_adjacency_size)
182: {
183: PetscInt depth, maxC, maxS, maxP, pStart, pEnd, asiz, maxAnchors = 1;
185: PetscFunctionBeginUser;
186: if (useAnchors) {
187: PetscSection aSec = NULL;
188: IS aIS = NULL;
189: PetscInt aStart, aEnd;
190: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
191: if (aSec) {
192: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
193: maxAnchors = PetscMax(1, maxAnchors);
194: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
195: }
196: }
198: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
199: PetscCall(DMPlexGetDepth(dm, &depth));
200: depth = PetscMax(depth, -depth);
201: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
202: maxP = maxS * maxC;
203: /* Adjacency can be as large as supp(cl(cell)) or cl(supp(vertex)),
204: supp(cell) + supp(maxC faces) + supp(maxC^2 edges) + supp(maxC^3 vertices)
205: = 0 + maxS*maxC + maxS^2*maxC^2 + maxS^3*maxC^3
206: = \sum^d_{i=0} (maxS*maxC)^i - 1
207: = (maxS*maxC)^{d+1} - 1 / (maxS*maxC - 1) - 1
208: We could improve this by getting the max by strata:
209: supp[d](cell) + supp[d-1](maxC[d] faces) + supp[1](maxC[d]*maxC[d-1] edges) + supp[0](maxC[d]*maxC[d-1]*maxC[d-2] vertices)
210: = 0 + maxS[d-1]*maxC[d] + maxS[1]*maxC[d]*maxC[d-1] + maxS[0]*maxC[d]*maxC[d-1]*maxC[d-2]
211: and the same with S and C reversed
212: */
213: if ((depth == 3 && maxP > 200) || (depth == 2 && maxP > 580)) asiz = pEnd - pStart;
214: else asiz = (maxP > 1) ? ((PetscPowInt(maxP, depth + 1) - 1) / (maxP - 1)) : depth + 1;
215: asiz *= maxAnchors;
216: *max_adjacency_size = PetscMin(asiz, pEnd - pStart);
217: PetscFunctionReturn(PETSC_SUCCESS);
218: }
220: // Returns Adjacent mesh points to the selected point given specific criteria
221: //
222: // + adjSize - Number of adjacent points
223: // - adj - Array of the adjacent points
224: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
225: {
226: static PetscInt asiz = 0;
227: PetscInt aStart = -1, aEnd = -1;
228: PetscInt maxAdjSize;
229: PetscSection aSec = NULL;
230: IS aIS = NULL;
231: const PetscInt *anchors;
232: DM_Plex *mesh = (DM_Plex *)dm->data;
234: PetscFunctionBeginHot;
235: if (useAnchors) {
236: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
237: if (aSec) {
238: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
239: PetscCall(ISGetIndices(aIS, &anchors));
240: }
241: }
242: if (!*adj) {
243: PetscCall(DMPlexGetMaxAdjacencySize_Internal(dm, useAnchors, &asiz));
244: PetscCall(PetscMalloc1(asiz, adj));
245: }
246: if (*adjSize < 0) *adjSize = asiz;
247: maxAdjSize = *adjSize;
248: if (mesh->useradjacency) {
249: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
250: } else if (useTransitiveClosure) {
251: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
252: } else if (useCone) {
253: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
254: } else {
255: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
256: }
257: if (useAnchors && aSec) {
258: PetscInt origSize = *adjSize;
259: PetscInt numAdj = origSize;
260: PetscInt i = 0, j;
261: PetscInt *orig = *adj;
263: while (i < origSize) {
264: PetscInt p = orig[i];
265: PetscInt aDof = 0;
267: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
268: if (aDof) {
269: PetscInt aOff;
270: PetscInt s, q;
272: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
273: origSize--;
274: numAdj--;
275: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
276: for (s = 0; s < aDof; ++s) {
277: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
278: if (anchors[aOff + s] == orig[q]) break;
279: }
280: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
281: }
282: } else {
283: i++;
284: }
285: }
286: *adjSize = numAdj;
287: PetscCall(ISRestoreIndices(aIS, &anchors));
288: }
289: PetscFunctionReturn(PETSC_SUCCESS);
290: }
292: /*@
293: DMPlexGetAdjacency - Return all points adjacent to the given point
295: Input Parameters:
296: + dm - The `DM` object
297: - p - The point
299: Input/Output Parameters:
300: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
301: on output the number of adjacent points
302: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
303: on output contains the adjacent points
305: Level: advanced
307: Notes:
308: The user must `PetscFree()` the `adj` array if it was not passed in.
310: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
311: @*/
312: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
313: {
314: PetscBool useCone, useClosure, useAnchors;
316: PetscFunctionBeginHot;
318: PetscAssertPointer(adjSize, 3);
319: PetscAssertPointer(adj, 4);
320: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
321: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
322: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
323: PetscFunctionReturn(PETSC_SUCCESS);
324: }
326: /*@
327: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
329: Collective
331: Input Parameters:
332: + dm - The `DM`
333: . sfPoint - The `PetscSF` which encodes point connectivity
334: . rootRankSection - to be documented
335: . rootRanks - to be documented
336: . leafRankSection - to be documented
337: - leafRanks - to be documented
339: Output Parameters:
340: + processRanks - A list of process neighbors, or `NULL`
341: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
343: Level: developer
345: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
346: @*/
347: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, PeOp IS *processRanks, PeOp PetscSF *sfProcess)
348: {
349: const PetscSFNode *remotePoints;
350: PetscInt *localPointsNew;
351: PetscSFNode *remotePointsNew;
352: const PetscInt *nranks;
353: PetscInt *ranksNew;
354: PetscBT neighbors;
355: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
356: PetscMPIInt size, proc, rank;
358: PetscFunctionBegin;
361: if (processRanks) PetscAssertPointer(processRanks, 7);
362: if (sfProcess) PetscAssertPointer(sfProcess, 8);
363: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
364: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
365: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
366: PetscCall(PetscBTCreate(size, &neighbors));
367: PetscCall(PetscBTMemzero(size, neighbors));
368: /* Compute root-to-leaf process connectivity */
369: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
370: PetscCall(ISGetIndices(rootRanks, &nranks));
371: for (p = pStart; p < pEnd; ++p) {
372: PetscInt ndof, noff, n;
374: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
375: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
376: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
377: }
378: PetscCall(ISRestoreIndices(rootRanks, &nranks));
379: /* Compute leaf-to-neighbor process connectivity */
380: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
381: PetscCall(ISGetIndices(leafRanks, &nranks));
382: for (p = pStart; p < pEnd; ++p) {
383: PetscInt ndof, noff, n;
385: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
386: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
387: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
388: }
389: PetscCall(ISRestoreIndices(leafRanks, &nranks));
390: /* Compute leaf-to-root process connectivity */
391: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
392: /* Calculate edges */
393: PetscCall(PetscBTClear(neighbors, rank));
394: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
395: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
396: }
397: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
398: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
399: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
400: for (proc = 0, n = 0; proc < size; ++proc) {
401: if (PetscBTLookup(neighbors, proc)) {
402: ranksNew[n] = proc;
403: localPointsNew[n] = proc;
404: remotePointsNew[n].index = rank;
405: remotePointsNew[n].rank = proc;
406: ++n;
407: }
408: }
409: PetscCall(PetscBTDestroy(&neighbors));
410: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
411: else PetscCall(PetscFree(ranksNew));
412: if (sfProcess) {
413: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
414: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
415: PetscCall(PetscSFSetFromOptions(*sfProcess));
416: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
417: }
418: PetscFunctionReturn(PETSC_SUCCESS);
419: }
421: /*@
422: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
424: Collective
426: Input Parameter:
427: . dm - The `DM`
429: Output Parameters:
430: + rootSection - The number of leaves for a given root point
431: . rootrank - The rank of each edge into the root point
432: . leafSection - The number of processes sharing a given leaf point
433: - leafrank - The rank of each process sharing a leaf point
435: Level: developer
437: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
438: @*/
439: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
440: {
441: MPI_Comm comm;
442: PetscSF sfPoint;
443: const PetscInt *rootdegree;
444: PetscInt *myrank, *remoterank;
445: PetscInt pStart, pEnd, p, nedges;
446: PetscMPIInt rank;
448: PetscFunctionBegin;
449: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
450: PetscCallMPI(MPI_Comm_rank(comm, &rank));
451: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
452: PetscCall(DMGetPointSF(dm, &sfPoint));
453: /* Compute number of leaves for each root */
454: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
455: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
456: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
457: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
458: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
459: PetscCall(PetscSectionSetUp(rootSection));
460: /* Gather rank of each leaf to root */
461: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
462: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
463: PetscCall(PetscMalloc1(nedges, &remoterank));
464: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
465: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
466: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
467: PetscCall(PetscFree(myrank));
468: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
469: /* Distribute remote ranks to leaves */
470: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
471: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
472: PetscFunctionReturn(PETSC_SUCCESS);
473: }
475: /*@
476: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
478: Collective
480: Input Parameters:
481: + dm - The `DM`
482: . levels - Number of overlap levels
483: . rootSection - The number of leaves for a given root point
484: . rootrank - The rank of each edge into the root point
485: . leafSection - The number of processes sharing a given leaf point
486: - leafrank - The rank of each process sharing a leaf point
488: Output Parameter:
489: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
491: Level: developer
493: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
494: @*/
495: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
496: {
497: MPI_Comm comm;
498: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
499: PetscSF sfPoint;
500: const PetscSFNode *remote;
501: const PetscInt *local;
502: const PetscInt *nrank, *rrank;
503: PetscInt *adj = NULL;
504: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
505: PetscMPIInt rank, size;
506: PetscBool flg;
508: PetscFunctionBegin;
509: *ovLabel = NULL;
510: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
511: PetscCallMPI(MPI_Comm_size(comm, &size));
512: PetscCallMPI(MPI_Comm_rank(comm, &rank));
513: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
514: PetscCall(DMGetPointSF(dm, &sfPoint));
515: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
516: if (!levels) {
517: /* Add owned points */
518: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
519: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
520: PetscFunctionReturn(PETSC_SUCCESS);
521: }
522: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
523: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
524: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
525: /* Handle leaves: shared with the root point */
526: for (l = 0; l < nleaves; ++l) {
527: PetscInt adjSize = PETSC_DETERMINE, a;
529: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
530: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
531: }
532: PetscCall(ISGetIndices(rootrank, &rrank));
533: PetscCall(ISGetIndices(leafrank, &nrank));
534: /* Handle roots */
535: for (p = pStart; p < pEnd; ++p) {
536: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
538: if ((p >= sStart) && (p < sEnd)) {
539: /* Some leaves share a root with other leaves on different processes */
540: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
541: if (neighbors) {
542: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
543: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
544: for (n = 0; n < neighbors; ++n) {
545: const PetscInt remoteRank = nrank[noff + n];
547: if (remoteRank == rank) continue;
548: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
549: }
550: }
551: }
552: /* Roots are shared with leaves */
553: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
554: if (!neighbors) continue;
555: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
556: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
557: for (n = 0; n < neighbors; ++n) {
558: const PetscInt remoteRank = rrank[noff + n];
560: if (remoteRank == rank) continue;
561: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
562: }
563: }
564: PetscCall(PetscFree(adj));
565: PetscCall(ISRestoreIndices(rootrank, &rrank));
566: PetscCall(ISRestoreIndices(leafrank, &nrank));
567: /* Add additional overlap levels */
568: for (l = 1; l < levels; l++) {
569: /* Propagate point donations over SF to capture remote connections */
570: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
571: /* Add next level of point donations to the label */
572: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
573: }
574: /* We require the closure in the overlap */
575: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
576: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
577: if (flg) {
578: PetscViewer viewer;
579: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
580: PetscCall(DMLabelView(ovAdjByRank, viewer));
581: }
582: /* Invert sender to receiver label */
583: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
584: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
585: /* Add owned points, except for shared local points */
586: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
587: for (l = 0; l < nleaves; ++l) {
588: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
589: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
590: }
591: /* Clean up */
592: PetscCall(DMLabelDestroy(&ovAdjByRank));
593: PetscFunctionReturn(PETSC_SUCCESS);
594: }
596: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
597: {
598: PetscInt neighbors, el;
600: PetscFunctionBegin;
601: PetscCall(PetscSectionGetDof(section, p, &neighbors));
602: if (neighbors) {
603: PetscInt *adj = NULL;
604: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
605: PetscMPIInt rank;
607: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
608: PetscCall(PetscSectionGetOffset(section, p, &noff));
609: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
610: for (n = 0; n < neighbors; ++n) {
611: const PetscInt remoteRank = ranks[noff + n];
613: if (remoteRank == rank) continue;
614: for (a = 0; a < adjSize; ++a) {
615: PetscBool insert = PETSC_TRUE;
617: for (el = 0; el < numExLabels; ++el) {
618: PetscInt exVal;
619: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
620: if (exVal == exValue[el]) {
621: insert = PETSC_FALSE;
622: break;
623: }
624: }
625: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
626: }
627: }
628: PetscCall(PetscFree(adj));
629: }
630: PetscFunctionReturn(PETSC_SUCCESS);
631: }
633: /*@C
634: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
636: Collective
638: Input Parameters:
639: + dm - The `DM`
640: . numLabels - The number of labels to draw candidate points from
641: . label - An array of labels containing candidate points
642: . value - An array of label values marking the candidate points
643: . numExLabels - The number of labels to use for exclusion
644: . exLabel - An array of labels indicating points to be excluded, or `NULL`
645: . exValue - An array of label values to be excluded, or `NULL`
646: . rootSection - The number of leaves for a given root point
647: . rootrank - The rank of each edge into the root point
648: . leafSection - The number of processes sharing a given leaf point
649: - leafrank - The rank of each process sharing a leaf point
651: Output Parameter:
652: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
654: Level: developer
656: Note:
657: The candidate points are only added to the overlap if they are adjacent to a shared point
659: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
660: @*/
661: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
662: {
663: MPI_Comm comm;
664: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
665: PetscSF sfPoint;
666: const PetscSFNode *remote;
667: const PetscInt *local;
668: const PetscInt *nrank, *rrank;
669: PetscInt *adj = NULL;
670: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
671: PetscMPIInt rank, size;
672: PetscBool flg;
674: PetscFunctionBegin;
675: *ovLabel = NULL;
676: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
677: PetscCallMPI(MPI_Comm_size(comm, &size));
678: PetscCallMPI(MPI_Comm_rank(comm, &rank));
679: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
680: PetscCall(DMGetPointSF(dm, &sfPoint));
681: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
682: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
683: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
684: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
685: PetscCall(ISGetIndices(rootrank, &rrank));
686: PetscCall(ISGetIndices(leafrank, &nrank));
687: for (l = 0; l < numLabels; ++l) {
688: IS valIS;
689: const PetscInt *points;
690: PetscInt n;
692: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
693: if (!valIS) continue;
694: PetscCall(ISGetIndices(valIS, &points));
695: PetscCall(ISGetLocalSize(valIS, &n));
696: for (PetscInt i = 0; i < n; ++i) {
697: const PetscInt p = points[i];
699: if ((p >= sStart) && (p < sEnd)) {
700: PetscInt loc, adjSize = PETSC_DETERMINE;
702: /* Handle leaves: shared with the root point */
703: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
704: else loc = (p >= 0 && p < nleaves) ? p : -1;
705: if (loc >= 0) {
706: const PetscInt remoteRank = remote[loc].rank;
708: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
709: for (PetscInt a = 0; a < adjSize; ++a) {
710: PetscBool insert = PETSC_TRUE;
712: for (el = 0; el < numExLabels; ++el) {
713: PetscInt exVal;
714: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
715: if (exVal == exValue[el]) {
716: insert = PETSC_FALSE;
717: break;
718: }
719: }
720: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
721: }
722: }
723: /* Some leaves share a root with other leaves on different processes */
724: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
725: }
726: /* Roots are shared with leaves */
727: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
728: }
729: PetscCall(ISRestoreIndices(valIS, &points));
730: PetscCall(ISDestroy(&valIS));
731: }
732: PetscCall(PetscFree(adj));
733: PetscCall(ISRestoreIndices(rootrank, &rrank));
734: PetscCall(ISRestoreIndices(leafrank, &nrank));
735: /* We require the closure in the overlap */
736: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
737: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
738: if (flg) {
739: PetscViewer viewer;
740: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
741: PetscCall(DMLabelView(ovAdjByRank, viewer));
742: }
743: /* Invert sender to receiver label */
744: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
745: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
746: /* Add owned points, except for shared local points */
747: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
748: for (l = 0; l < nleaves; ++l) {
749: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
750: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
751: }
752: /* Clean up */
753: PetscCall(DMLabelDestroy(&ovAdjByRank));
754: PetscFunctionReturn(PETSC_SUCCESS);
755: }
757: /*@
758: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
760: Collective
762: Input Parameters:
763: + dm - The `DM`
764: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
766: Output Parameter:
767: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
769: Level: developer
771: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
772: @*/
773: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
774: {
775: MPI_Comm comm;
776: PetscMPIInt rank, size;
777: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
778: PetscInt *pointDepths, *remoteDepths, *ilocal;
779: PetscInt *depthRecv, *depthShift, *depthIdx;
780: PetscSFNode *iremote;
781: PetscSF pointSF;
782: const PetscInt *sharedLocal;
783: const PetscSFNode *overlapRemote, *sharedRemote;
785: PetscFunctionBegin;
787: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
788: PetscCallMPI(MPI_Comm_rank(comm, &rank));
789: PetscCallMPI(MPI_Comm_size(comm, &size));
790: PetscCall(DMGetDimension(dm, &dim));
792: /* Before building the migration SF we need to know the new stratum offsets */
793: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
794: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
795: for (d = 0; d < dim + 1; d++) {
796: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
797: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
798: }
799: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
800: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
801: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
803: /* Count received points in each stratum and compute the internal strata shift */
804: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
805: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
806: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
807: depthShift[dim] = 0;
808: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
809: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
810: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
811: for (d = 0; d < dim + 1; d++) {
812: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
813: depthIdx[d] = pStart + depthShift[d];
814: }
816: /* Form the overlap SF build an SF that describes the full overlap migration SF */
817: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
818: newLeaves = pEnd - pStart + nleaves;
819: PetscCall(PetscMalloc1(newLeaves, &ilocal));
820: PetscCall(PetscMalloc1(newLeaves, &iremote));
821: /* First map local points to themselves */
822: for (d = 0; d < dim + 1; d++) {
823: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
824: for (p = pStart; p < pEnd; p++) {
825: point = p + depthShift[d];
826: ilocal[point] = point;
827: iremote[point].index = p;
828: iremote[point].rank = rank;
829: depthIdx[d]++;
830: }
831: }
833: /* Add in the remote roots for currently shared points */
834: PetscCall(DMGetPointSF(dm, &pointSF));
835: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
836: for (d = 0; d < dim + 1; d++) {
837: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
838: for (p = 0; p < numSharedPoints; p++) {
839: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
840: point = sharedLocal[p] + depthShift[d];
841: iremote[point].index = sharedRemote[p].index;
842: iremote[point].rank = sharedRemote[p].rank;
843: }
844: }
845: }
847: /* Now add the incoming overlap points */
848: for (p = 0; p < nleaves; p++) {
849: point = depthIdx[remoteDepths[p]];
850: ilocal[point] = point;
851: iremote[point].index = overlapRemote[p].index;
852: iremote[point].rank = overlapRemote[p].rank;
853: depthIdx[remoteDepths[p]]++;
854: }
855: PetscCall(PetscFree2(pointDepths, remoteDepths));
857: PetscCall(PetscSFCreate(comm, migrationSF));
858: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
859: PetscCall(PetscSFSetFromOptions(*migrationSF));
860: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
861: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
863: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
864: PetscFunctionReturn(PETSC_SUCCESS);
865: }
867: /*@
868: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
870: Input Parameters:
871: + dm - The DM
872: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
874: Output Parameter:
875: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
877: Level: developer
879: Note:
880: This lexicographically sorts by (depth, cellType)
882: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
883: @*/
884: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
885: {
886: MPI_Comm comm;
887: PetscMPIInt rank, size;
888: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
889: PetscSFNode *pointDepths, *remoteDepths;
890: PetscInt *ilocal;
891: PetscInt *depthRecv, *depthShift, *depthIdx;
892: PetscInt *ctRecv, *ctShift, *ctIdx;
893: const PetscSFNode *iremote;
895: PetscFunctionBegin;
897: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
898: PetscCallMPI(MPI_Comm_rank(comm, &rank));
899: PetscCallMPI(MPI_Comm_size(comm, &size));
900: PetscCall(DMPlexGetDepth(dm, &ldepth));
901: PetscCall(DMGetDimension(dm, &dim));
902: PetscCallMPI(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
903: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
904: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
906: /* Before building the migration SF we need to know the new stratum offsets */
907: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
908: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
909: for (d = 0; d < depth + 1; ++d) {
910: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
911: for (p = pStart; p < pEnd; ++p) {
912: DMPolytopeType ct;
914: PetscCall(DMPlexGetCellType(dm, p, &ct));
915: pointDepths[p].index = d;
916: pointDepths[p].rank = ct;
917: }
918: }
919: for (p = 0; p < nleaves; ++p) {
920: remoteDepths[p].index = -1;
921: remoteDepths[p].rank = -1;
922: }
923: PetscCall(PetscSFBcastBegin(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
924: PetscCall(PetscSFBcastEnd(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
925: /* Count received points in each stratum and compute the internal strata shift */
926: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
927: for (p = 0; p < nleaves; ++p) {
928: if (remoteDepths[p].rank < 0) {
929: ++depthRecv[remoteDepths[p].index];
930: } else {
931: ++ctRecv[remoteDepths[p].rank];
932: }
933: }
934: {
935: PetscInt depths[4], dims[4], shift = 0, i, c;
937: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
938: Consider DM_POLYTOPE_FV_GHOST, DM_POLYTOPE_INTERIOR_GHOST and DM_POLYTOPE_UNKNOWN_CELL as cells
939: */
940: depths[0] = depth;
941: depths[1] = 0;
942: depths[2] = depth - 1;
943: depths[3] = 1;
944: dims[0] = dim;
945: dims[1] = 0;
946: dims[2] = dim - 1;
947: dims[3] = 1;
948: for (i = 0; i <= depth; ++i) {
949: const PetscInt dep = depths[i];
950: const PetscInt dim = dims[i];
952: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
953: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST || c == DM_POLYTOPE_UNKNOWN_CELL))) continue;
954: ctShift[c] = shift;
955: shift += ctRecv[c];
956: }
957: depthShift[dep] = shift;
958: shift += depthRecv[dep];
959: }
960: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
961: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
963: if ((ctDim < 0 || ctDim > dim) && c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST && c != DM_POLYTOPE_UNKNOWN_CELL) {
964: ctShift[c] = shift;
965: shift += ctRecv[c];
966: }
967: }
968: }
969: /* Derive a new local permutation based on stratified indices */
970: PetscCall(PetscMalloc1(nleaves, &ilocal));
971: for (p = 0; p < nleaves; ++p) {
972: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
974: ilocal[p] = ctShift[ct] + ctIdx[ct];
975: ++ctIdx[ct];
976: }
977: PetscCall(PetscSFCreate(comm, migrationSF));
978: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
979: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
980: PetscCall(PetscFree2(pointDepths, remoteDepths));
981: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
982: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
983: PetscFunctionReturn(PETSC_SUCCESS);
984: }
986: /*@
987: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
989: Collective
991: Input Parameters:
992: + dm - The `DMPLEX` object
993: . pointSF - The `PetscSF` describing the communication pattern
994: . originalSection - The `PetscSection` for existing data layout
995: - originalVec - The existing data in a local vector
997: Output Parameters:
998: + newSection - The `PetscSF` describing the new data layout
999: - newVec - The new data in a local vector
1001: Level: developer
1003: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
1004: @*/
1005: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
1006: {
1007: PetscSF fieldSF;
1008: PetscInt *remoteOffsets, fieldSize;
1009: PetscScalar *originalValues, *newValues;
1011: PetscFunctionBegin;
1012: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1013: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1015: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1016: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
1017: PetscCall(VecSetType(newVec, dm->vectype));
1019: PetscCall(VecGetArray(originalVec, &originalValues));
1020: PetscCall(VecGetArray(newVec, &newValues));
1021: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1022: PetscCall(PetscFree(remoteOffsets));
1023: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1024: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1025: PetscCall(PetscSFDestroy(&fieldSF));
1026: PetscCall(VecRestoreArray(newVec, &newValues));
1027: PetscCall(VecRestoreArray(originalVec, &originalValues));
1028: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1029: PetscFunctionReturn(PETSC_SUCCESS);
1030: }
1032: /*@
1033: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1035: Collective
1037: Input Parameters:
1038: + dm - The `DMPLEX` object
1039: . pointSF - The `PetscSF` describing the communication pattern
1040: . originalSection - The `PetscSection` for existing data layout
1041: - originalIS - The existing data
1043: Output Parameters:
1044: + newSection - The `PetscSF` describing the new data layout
1045: - newIS - The new data
1047: Level: developer
1049: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1050: @*/
1051: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1052: {
1053: PetscSF fieldSF;
1054: PetscInt *newValues, *remoteOffsets, fieldSize;
1055: const PetscInt *originalValues;
1057: PetscFunctionBegin;
1058: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1059: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1061: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1062: PetscCall(PetscMalloc1(fieldSize, &newValues));
1064: PetscCall(ISGetIndices(originalIS, &originalValues));
1065: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1066: PetscCall(PetscFree(remoteOffsets));
1067: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1068: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1069: PetscCall(PetscSFDestroy(&fieldSF));
1070: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1071: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1072: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1073: PetscFunctionReturn(PETSC_SUCCESS);
1074: }
1076: /*@
1077: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1079: Collective
1081: Input Parameters:
1082: + dm - The `DMPLEX` object
1083: . pointSF - The `PetscSF` describing the communication pattern
1084: . originalSection - The `PetscSection` for existing data layout
1085: . datatype - The type of data
1086: - originalData - The existing data
1088: Output Parameters:
1089: + newSection - The `PetscSection` describing the new data layout
1090: - newData - The new data
1092: Level: developer
1094: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`
1095: @*/
1096: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1097: {
1098: PetscSF fieldSF;
1099: PetscInt *remoteOffsets, fieldSize;
1100: PetscMPIInt dataSize;
1102: PetscFunctionBegin;
1103: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1104: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1106: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1107: PetscCallMPI(MPI_Type_size(datatype, &dataSize));
1108: PetscCall(PetscMalloc(fieldSize * dataSize, newData));
1110: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1111: PetscCall(PetscFree(remoteOffsets));
1112: PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1113: PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1114: PetscCall(PetscSFDestroy(&fieldSF));
1115: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1116: PetscFunctionReturn(PETSC_SUCCESS);
1117: }
1119: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1120: {
1121: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1122: MPI_Comm comm;
1123: PetscSF coneSF;
1124: PetscSection originalConeSection, newConeSection;
1125: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1126: PetscBool flg;
1128: PetscFunctionBegin;
1131: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1132: /* Distribute cone section */
1133: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1134: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1135: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1136: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1137: PetscCall(DMSetUp(dmParallel));
1138: /* Communicate and renumber cones */
1139: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1140: PetscCall(PetscFree(remoteOffsets));
1141: PetscCall(DMPlexGetCones(dm, &cones));
1142: if (original) {
1143: PetscInt numCones;
1145: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1146: PetscCall(PetscMalloc1(numCones, &globCones));
1147: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1148: } else {
1149: globCones = cones;
1150: }
1151: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1152: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1153: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1154: if (original) PetscCall(PetscFree(globCones));
1155: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1156: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1157: if (PetscDefined(USE_DEBUG)) {
1158: PetscInt p;
1159: PetscBool valid = PETSC_TRUE;
1160: for (p = 0; p < newConesSize; ++p) {
1161: if (newCones[p] < 0) {
1162: valid = PETSC_FALSE;
1163: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1164: }
1165: }
1166: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1167: }
1168: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1169: if (flg) {
1170: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1171: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1172: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1173: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1174: PetscCall(PetscSFView(coneSF, NULL));
1175: }
1176: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1177: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1178: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1179: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1180: PetscCall(PetscSFDestroy(&coneSF));
1181: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1182: /* Create supports and stratify DMPlex */
1183: {
1184: PetscInt pStart, pEnd;
1186: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1187: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1188: }
1189: PetscCall(DMPlexSymmetrize(dmParallel));
1190: PetscCall(DMPlexStratify(dmParallel));
1191: {
1192: PetscBool useCone, useClosure, useAnchors;
1194: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1195: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1196: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1197: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1198: }
1199: PetscFunctionReturn(PETSC_SUCCESS);
1200: }
1202: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1203: {
1204: MPI_Comm comm;
1205: DM cdm, cdmParallel;
1206: PetscSection originalCoordSection, newCoordSection;
1207: Vec originalCoordinates, newCoordinates;
1208: PetscInt bs;
1209: const char *name;
1210: const PetscReal *maxCell, *Lstart, *L;
1212: PetscFunctionBegin;
1216: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1217: PetscCall(DMGetCoordinateDM(dm, &cdm));
1218: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1219: PetscCall(DMCopyDisc(cdm, cdmParallel));
1220: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1221: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1222: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1223: if (originalCoordinates) {
1224: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1225: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1226: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1228: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1229: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1230: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1231: PetscCall(VecSetBlockSize(newCoordinates, bs));
1232: PetscCall(VecDestroy(&newCoordinates));
1233: }
1235: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1236: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1237: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1238: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1239: if (cdm) {
1240: PetscCall(DMClone(dmParallel, &cdmParallel));
1241: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1242: PetscCall(DMCopyDisc(cdm, cdmParallel));
1243: PetscCall(DMDestroy(&cdmParallel));
1244: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1245: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1246: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1247: if (originalCoordinates) {
1248: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1249: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1250: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1252: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1253: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1254: PetscCall(VecSetBlockSize(newCoordinates, bs));
1255: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1256: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1257: PetscCall(VecDestroy(&newCoordinates));
1258: }
1259: PetscCall(PetscSectionDestroy(&newCoordSection));
1260: }
1261: PetscFunctionReturn(PETSC_SUCCESS);
1262: }
1264: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1265: {
1266: DM_Plex *mesh = (DM_Plex *)dm->data;
1267: MPI_Comm comm;
1268: DMLabel depthLabel;
1269: PetscMPIInt rank;
1270: PetscInt depth, d, numLabels, numLocalLabels, l;
1271: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1272: PetscObjectState depthState = -1;
1274: PetscFunctionBegin;
1278: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1279: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1280: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1282: /* If the user has changed the depth label, communicate it instead */
1283: PetscCall(DMPlexGetDepth(dm, &depth));
1284: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1285: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1286: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1287: PetscCallMPI(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPI_C_BOOL, MPI_LOR, comm));
1288: if (sendDepth) {
1289: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1290: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1291: }
1292: /* Everyone must have either the same number of labels, or none */
1293: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1294: numLabels = numLocalLabels;
1295: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1296: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1297: for (l = 0; l < numLabels; ++l) {
1298: DMLabel label = NULL, labelNew = NULL;
1299: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1300: const char *name = NULL;
1302: if (hasLabels) {
1303: PetscCall(DMGetLabelByNum(dm, l, &label));
1304: /* Skip "depth" because it is recreated */
1305: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1306: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1307: } else {
1308: isDepth = PETSC_FALSE;
1309: }
1310: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPI_C_BOOL, 0, comm));
1311: if (isDepth && !sendDepth) continue;
1312: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1313: if (isDepth) {
1314: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1315: PetscInt gdepth;
1317: PetscCallMPI(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1318: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1319: for (d = 0; d <= gdepth; ++d) {
1320: PetscBool has;
1322: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1323: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1324: }
1325: }
1326: PetscCall(DMAddLabel(dmParallel, labelNew));
1327: /* Put the output flag in the new label */
1328: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1329: PetscCallMPI(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPI_C_BOOL, MPI_LAND, comm));
1330: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1331: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1332: PetscCall(DMLabelDestroy(&labelNew));
1333: }
1334: {
1335: DMLabel ctLabel;
1337: // Reset label for fast lookup
1338: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1339: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1340: }
1341: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1342: PetscFunctionReturn(PETSC_SUCCESS);
1343: }
1345: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1346: {
1347: DM_Plex *mesh = (DM_Plex *)dm->data;
1348: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1349: MPI_Comm comm;
1350: DM refTree;
1351: PetscSection origParentSection, newParentSection;
1352: PetscInt *origParents, *origChildIDs;
1353: PetscBool flg;
1355: PetscFunctionBegin;
1358: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1360: /* Set up tree */
1361: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1362: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1363: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1364: if (origParentSection) {
1365: PetscInt pStart, pEnd;
1366: PetscInt *newParents, *newChildIDs, *globParents;
1367: PetscInt *remoteOffsetsParents, newParentSize;
1368: PetscSF parentSF;
1370: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1371: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1372: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1373: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1374: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1375: PetscCall(PetscFree(remoteOffsetsParents));
1376: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1377: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1378: if (original) {
1379: PetscInt numParents;
1381: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1382: PetscCall(PetscMalloc1(numParents, &globParents));
1383: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1384: } else {
1385: globParents = origParents;
1386: }
1387: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1388: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1389: if (original) PetscCall(PetscFree(globParents));
1390: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1391: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1392: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1393: if (PetscDefined(USE_DEBUG)) {
1394: PetscInt p;
1395: PetscBool valid = PETSC_TRUE;
1396: for (p = 0; p < newParentSize; ++p) {
1397: if (newParents[p] < 0) {
1398: valid = PETSC_FALSE;
1399: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1400: }
1401: }
1402: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1403: }
1404: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1405: if (flg) {
1406: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1407: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1408: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1409: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1410: PetscCall(PetscSFView(parentSF, NULL));
1411: }
1412: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1413: PetscCall(PetscSectionDestroy(&newParentSection));
1414: PetscCall(PetscFree2(newParents, newChildIDs));
1415: PetscCall(PetscSFDestroy(&parentSF));
1416: }
1417: pmesh->useAnchors = mesh->useAnchors;
1418: PetscFunctionReturn(PETSC_SUCCESS);
1419: }
1421: /*@
1422: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1424: Input Parameters:
1425: + dm - The `DMPLEX` object
1426: - flg - Balance the partition?
1428: Level: intermediate
1430: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1431: @*/
1432: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1433: {
1434: DM_Plex *mesh = (DM_Plex *)dm->data;
1436: PetscFunctionBegin;
1437: mesh->partitionBalance = flg;
1438: PetscFunctionReturn(PETSC_SUCCESS);
1439: }
1441: /*@
1442: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1444: Input Parameter:
1445: . dm - The `DMPLEX` object
1447: Output Parameter:
1448: . flg - Balance the partition?
1450: Level: intermediate
1452: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1453: @*/
1454: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1455: {
1456: DM_Plex *mesh = (DM_Plex *)dm->data;
1458: PetscFunctionBegin;
1460: PetscAssertPointer(flg, 2);
1461: *flg = mesh->partitionBalance;
1462: PetscFunctionReturn(PETSC_SUCCESS);
1463: }
1465: typedef struct {
1466: PetscInt vote, rank, index;
1467: } Petsc3Int;
1469: /* MaxLoc, but carry a third piece of information around */
1470: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1471: {
1472: Petsc3Int *a = (Petsc3Int *)inout_;
1473: Petsc3Int *b = (Petsc3Int *)in_;
1474: PetscInt i, len = *len_;
1475: for (i = 0; i < len; i++) {
1476: if (a[i].vote < b[i].vote) {
1477: a[i].vote = b[i].vote;
1478: a[i].rank = b[i].rank;
1479: a[i].index = b[i].index;
1480: } else if (a[i].vote <= b[i].vote) {
1481: if (a[i].rank >= b[i].rank) {
1482: a[i].rank = b[i].rank;
1483: a[i].index = b[i].index;
1484: }
1485: }
1486: }
1487: }
1489: /*@
1490: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1492: Input Parameters:
1493: + dm - The source `DMPLEX` object
1494: . migrationSF - The star forest that describes the parallel point remapping
1495: - ownership - Flag causing a vote to determine point ownership
1497: Output Parameter:
1498: . pointSF - The star forest describing the point overlap in the remapped `DM`
1500: Level: developer
1502: Note:
1503: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1505: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1506: @*/
1507: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1508: {
1509: PetscMPIInt rank, size;
1510: PetscInt p, nroots, nleaves, idx, npointLeaves;
1511: PetscInt *pointLocal;
1512: const PetscInt *leaves;
1513: const PetscSFNode *roots;
1514: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1515: Vec shifts;
1516: const PetscInt numShifts = 13759;
1517: const PetscScalar *shift = NULL;
1518: const PetscBool shiftDebug = PETSC_FALSE;
1519: PetscBool balance;
1521: PetscFunctionBegin;
1523: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1524: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1525: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1526: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1527: PetscCall(PetscSFSetFromOptions(*pointSF));
1528: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1530: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1531: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1532: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1533: if (ownership) {
1534: MPI_Op op;
1535: MPI_Datatype datatype;
1536: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1538: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1539: if (balance) {
1540: PetscRandom r;
1542: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1543: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1544: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1545: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1546: PetscCall(VecSetType(shifts, VECSTANDARD));
1547: PetscCall(VecSetRandom(shifts, r));
1548: PetscCall(PetscRandomDestroy(&r));
1549: PetscCall(VecGetArrayRead(shifts, &shift));
1550: }
1552: PetscCall(PetscMalloc1(nroots, &rootVote));
1553: PetscCall(PetscMalloc1(nleaves, &leafVote));
1554: /* Point ownership vote: Process with highest rank owns shared points */
1555: for (p = 0; p < nleaves; ++p) {
1556: if (shiftDebug) {
1557: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1558: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1559: }
1560: /* Either put in a bid or we know we own it */
1561: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1562: leafVote[p].rank = rank;
1563: leafVote[p].index = p;
1564: }
1565: for (p = 0; p < nroots; p++) {
1566: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1567: rootVote[p].vote = -3;
1568: rootVote[p].rank = -3;
1569: rootVote[p].index = -3;
1570: }
1571: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1572: PetscCallMPI(MPI_Type_commit(&datatype));
1573: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1574: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1575: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1576: PetscCallMPI(MPI_Op_free(&op));
1577: PetscCallMPI(MPI_Type_free(&datatype));
1578: for (p = 0; p < nroots; p++) {
1579: rootNodes[p].rank = rootVote[p].rank;
1580: rootNodes[p].index = rootVote[p].index;
1581: }
1582: PetscCall(PetscFree(leafVote));
1583: PetscCall(PetscFree(rootVote));
1584: } else {
1585: for (p = 0; p < nroots; p++) {
1586: rootNodes[p].index = -1;
1587: rootNodes[p].rank = rank;
1588: }
1589: for (p = 0; p < nleaves; p++) {
1590: /* Write new local id into old location */
1591: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1592: }
1593: }
1594: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1595: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1597: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1598: if (leafNodes[p].rank != rank) npointLeaves++;
1599: }
1600: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1601: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1602: for (idx = 0, p = 0; p < nleaves; p++) {
1603: if (leafNodes[p].rank != rank) {
1604: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1605: pointLocal[idx] = p;
1606: pointRemote[idx] = leafNodes[p];
1607: idx++;
1608: }
1609: }
1610: if (shift) {
1611: PetscCall(VecRestoreArrayRead(shifts, &shift));
1612: PetscCall(VecDestroy(&shifts));
1613: }
1614: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1615: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1616: PetscCall(PetscFree2(rootNodes, leafNodes));
1617: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1618: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1619: PetscFunctionReturn(PETSC_SUCCESS);
1620: }
1622: /*@
1623: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1625: Collective
1627: Input Parameters:
1628: + dm - The source `DMPLEX` object
1629: - sf - The star forest communication context describing the migration pattern
1631: Output Parameter:
1632: . targetDM - The target `DMPLEX` object
1634: Level: intermediate
1636: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1637: @*/
1638: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1639: {
1640: MPI_Comm comm;
1641: PetscInt dim, cdim, nroots;
1642: PetscSF sfPoint;
1643: ISLocalToGlobalMapping ltogMigration;
1644: ISLocalToGlobalMapping ltogOriginal = NULL;
1646: PetscFunctionBegin;
1648: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1649: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1650: PetscCall(DMGetDimension(dm, &dim));
1651: PetscCall(DMSetDimension(targetDM, dim));
1652: PetscCall(DMGetCoordinateDim(dm, &cdim));
1653: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1655: /* Check for a one-to-all distribution pattern */
1656: PetscCall(DMGetPointSF(dm, &sfPoint));
1657: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1658: if (nroots >= 0) {
1659: IS isOriginal;
1660: PetscInt n, size, nleaves;
1661: PetscInt *numbering_orig, *numbering_new;
1663: /* Get the original point numbering */
1664: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1665: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1666: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1667: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1668: /* Convert to positive global numbers */
1669: for (n = 0; n < size; n++) {
1670: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1671: }
1672: /* Derive the new local-to-global mapping from the old one */
1673: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1674: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1675: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1676: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1677: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1678: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1679: PetscCall(ISDestroy(&isOriginal));
1680: } else {
1681: /* One-to-all distribution pattern: We can derive LToG from SF */
1682: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1683: }
1684: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1685: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1686: /* Migrate DM data to target DM */
1687: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1688: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1689: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1690: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1691: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1692: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1693: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1694: PetscFunctionReturn(PETSC_SUCCESS);
1695: }
1697: /*@
1698: DMPlexRemapMigrationSF - Rewrite the distribution SF to account for overlap
1700: Collective
1702: Input Parameters:
1703: + sfOverlap - The `PetscSF` object just for the overlap
1704: - sfMigration - The original distribution `PetscSF` object
1706: Output Parameters:
1707: . sfMigrationNew - A rewritten `PetscSF` object that incorporates the overlap
1709: Level: developer
1711: .seealso: `DMPLEX`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexGetOverlap()`
1712: @*/
1713: PetscErrorCode DMPlexRemapMigrationSF(PetscSF sfOverlap, PetscSF sfMigration, PetscSF *sfMigrationNew)
1714: {
1715: PetscSFNode *newRemote, *permRemote = NULL;
1716: const PetscInt *oldLeaves;
1717: const PetscSFNode *oldRemote;
1718: PetscInt nroots, nleaves, noldleaves;
1720: PetscFunctionBegin;
1721: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1722: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1723: PetscCall(PetscMalloc1(nleaves, &newRemote));
1724: /* oldRemote: original root point mapping to original leaf point
1725: newRemote: original leaf point mapping to overlapped leaf point */
1726: if (oldLeaves) {
1727: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1728: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1729: for (PetscInt l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1730: oldRemote = permRemote;
1731: }
1732: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1733: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1734: PetscCall(PetscFree(permRemote));
1735: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)sfOverlap), sfMigrationNew));
1736: PetscCall(PetscSFSetGraph(*sfMigrationNew, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1737: PetscFunctionReturn(PETSC_SUCCESS);
1738: }
1740: /* For DG-like methods, the code below is equivalent (but faster) than calling
1741: DMPlexCreateClosureIndex(dm,section) */
1742: static PetscErrorCode DMPlexCreateClosureIndex_CELL(DM dm, PetscSection section)
1743: {
1744: PetscSection clSection;
1745: IS clPoints;
1746: PetscInt pStart, pEnd, point;
1747: PetscInt *closure, pos = 0;
1749: PetscFunctionBegin;
1750: if (!section) PetscCall(DMGetLocalSection(dm, §ion));
1751: PetscCall(DMPlexGetHeightStratum(dm, 0, &pStart, &pEnd));
1752: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &clSection));
1753: PetscCall(PetscSectionSetChart(clSection, pStart, pEnd));
1754: PetscCall(PetscMalloc1((2 * (pEnd - pStart)), &closure));
1755: for (point = pStart; point < pEnd; point++) {
1756: PetscCall(PetscSectionSetDof(clSection, point, 2));
1757: closure[pos++] = point; /* point */
1758: closure[pos++] = 0; /* orientation */
1759: }
1760: PetscCall(PetscSectionSetUp(clSection));
1761: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 2 * (pEnd - pStart), closure, PETSC_OWN_POINTER, &clPoints));
1762: PetscCall(PetscSectionSetClosureIndex(section, (PetscObject)dm, clSection, clPoints));
1763: PetscCall(PetscSectionDestroy(&clSection));
1764: PetscCall(ISDestroy(&clPoints));
1765: PetscFunctionReturn(PETSC_SUCCESS);
1766: }
1768: static PetscErrorCode DMPlexDistribute_Multistage(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1769: {
1770: MPI_Comm comm = PetscObjectComm((PetscObject)dm);
1771: PetscPartitioner part;
1772: PetscBool balance, printHeader;
1773: PetscInt nl = 0;
1775: PetscFunctionBegin;
1776: if (sf) *sf = NULL;
1777: *dmParallel = NULL;
1779: PetscCall(DMPlexGetPartitioner(dm, &part));
1780: printHeader = part->printHeader;
1781: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1782: PetscCall(PetscPartitionerSetUp(part));
1783: PetscCall(PetscLogEventBegin(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1784: PetscCall(PetscPartitionerMultistageGetStages_Multistage(part, &nl, NULL));
1785: for (PetscInt l = 0; l < nl; l++) {
1786: PetscInt ovl = (l < nl - 1) ? 0 : overlap;
1787: PetscSF sfDist;
1788: DM dmDist;
1790: PetscCall(DMPlexSetPartitionBalance(dm, balance));
1791: PetscCall(DMViewFromOptions(dm, (PetscObject)part, "-petscpartitioner_multistage_dm_view"));
1792: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, l, (PetscObject)dm));
1793: PetscCall(DMPlexSetPartitioner(dm, part));
1794: PetscCall(DMPlexDistribute(dm, ovl, &sfDist, &dmDist));
1795: PetscCheck(dmDist, comm, PETSC_ERR_PLIB, "No distributed DM generated (stage %" PetscInt_FMT ")", l);
1796: PetscCheck(sfDist, comm, PETSC_ERR_PLIB, "No SF generated (stage %" PetscInt_FMT ")", l);
1797: part->printHeader = PETSC_FALSE;
1799: /* Propagate cell weights to the next level (if any, and if not the final dm) */
1800: if (part->usevwgt && dm->localSection && l != nl - 1) {
1801: PetscSection oldSection, newSection;
1803: PetscCall(DMGetLocalSection(dm, &oldSection));
1804: PetscCall(DMGetLocalSection(dmDist, &newSection));
1805: PetscCall(PetscSFDistributeSection(sfDist, oldSection, NULL, newSection));
1806: PetscCall(DMPlexCreateClosureIndex_CELL(dmDist, newSection));
1807: }
1808: if (!sf) PetscCall(PetscSFDestroy(&sfDist));
1809: if (l > 0) PetscCall(DMDestroy(&dm));
1811: if (sf && *sf) {
1812: PetscSF sfA = *sf, sfB = sfDist;
1813: PetscCall(PetscSFCompose(sfA, sfB, &sfDist));
1814: PetscCall(PetscSFDestroy(&sfA));
1815: PetscCall(PetscSFDestroy(&sfB));
1816: }
1818: if (sf) *sf = sfDist;
1819: dm = *dmParallel = dmDist;
1820: }
1821: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, 0, NULL)); /* reset */
1822: PetscCall(PetscLogEventEnd(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1823: part->printHeader = printHeader;
1824: PetscFunctionReturn(PETSC_SUCCESS);
1825: }
1827: /*@
1828: DMPlexDistribute - Distributes the mesh and any associated sections.
1830: Collective
1832: Input Parameters:
1833: + dm - The original `DMPLEX` object
1834: - overlap - The overlap of partitions, 0 is the default
1836: Output Parameters:
1837: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1838: - dmParallel - The distributed `DMPLEX` object
1840: Level: intermediate
1842: Note:
1843: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1845: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1846: representation on the mesh.
1848: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1849: @*/
1850: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmParallel)
1851: {
1852: MPI_Comm comm;
1853: PetscPartitioner partitioner;
1854: IS cellPart;
1855: PetscSection cellPartSection;
1856: DM dmCoord;
1857: DMLabel lblPartition, lblMigration;
1858: PetscSF sfMigration, sfStratified, sfPoint;
1859: PetscBool flg, balance, isms;
1860: PetscMPIInt rank, size;
1862: PetscFunctionBegin;
1865: if (sf) PetscAssertPointer(sf, 3);
1866: PetscAssertPointer(dmParallel, 4);
1868: if (sf) *sf = NULL;
1869: *dmParallel = NULL;
1870: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1871: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1872: PetscCallMPI(MPI_Comm_size(comm, &size));
1873: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1875: /* Handle multistage partitioner */
1876: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1877: PetscCall(PetscObjectTypeCompare((PetscObject)partitioner, PETSCPARTITIONERMULTISTAGE, &isms));
1878: if (isms) {
1879: PetscObject stagedm;
1881: PetscCall(PetscPartitionerMultistageGetStage_Multistage(partitioner, NULL, &stagedm));
1882: if (!stagedm) { /* No stage dm present, start the multistage algorithm */
1883: PetscCall(DMPlexDistribute_Multistage(dm, overlap, sf, dmParallel));
1884: PetscFunctionReturn(PETSC_SUCCESS);
1885: }
1886: }
1887: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1888: /* Create cell partition */
1889: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1890: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1891: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1892: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1893: {
1894: /* Convert partition to DMLabel */
1895: IS is;
1896: PetscHSetI ht;
1897: const PetscInt *points;
1898: PetscInt *iranks;
1899: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1901: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1902: /* Preallocate strata */
1903: PetscCall(PetscHSetICreate(&ht));
1904: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1905: for (proc = pStart; proc < pEnd; proc++) {
1906: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1907: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1908: }
1909: PetscCall(PetscHSetIGetSize(ht, &nranks));
1910: PetscCall(PetscMalloc1(nranks, &iranks));
1911: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1912: PetscCall(PetscHSetIDestroy(&ht));
1913: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1914: PetscCall(PetscFree(iranks));
1915: /* Inline DMPlexPartitionLabelClosure() */
1916: PetscCall(ISGetIndices(cellPart, &points));
1917: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1918: for (proc = pStart; proc < pEnd; proc++) {
1919: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1920: if (!npoints) continue;
1921: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1922: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1923: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1924: PetscCall(ISDestroy(&is));
1925: }
1926: PetscCall(ISRestoreIndices(cellPart, &points));
1927: }
1928: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1930: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1931: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1932: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, PETSC_TRUE, &sfMigration));
1933: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1934: PetscCall(PetscSFDestroy(&sfMigration));
1935: sfMigration = sfStratified;
1936: PetscCall(PetscSFSetUp(sfMigration));
1937: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1938: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1939: if (flg) {
1940: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1941: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1942: }
1944: /* Create non-overlapping parallel DM and migrate internal data */
1945: PetscCall(DMPlexCreate(comm, dmParallel));
1946: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1947: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1949: /* Build the point SF without overlap */
1950: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1951: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1952: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1953: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1954: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1955: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1956: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1957: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1959: if (overlap > 0) {
1960: DM dmOverlap;
1961: PetscSF sfOverlap, sfOverlapPoint;
1963: /* Add the partition overlap to the distributed DM */
1964: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1965: PetscCall(DMDestroy(dmParallel));
1966: *dmParallel = dmOverlap;
1967: if (flg) {
1968: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1969: PetscCall(PetscSFView(sfOverlap, NULL));
1970: }
1972: /* Re-map the migration SF to establish the full migration pattern */
1973: PetscCall(DMPlexRemapMigrationSF(sfOverlap, sfMigration, &sfOverlapPoint));
1974: PetscCall(PetscSFDestroy(&sfOverlap));
1975: PetscCall(PetscSFDestroy(&sfMigration));
1976: sfMigration = sfOverlapPoint;
1977: }
1978: /* Cleanup Partition */
1979: PetscCall(DMLabelDestroy(&lblPartition));
1980: PetscCall(DMLabelDestroy(&lblMigration));
1981: PetscCall(PetscSectionDestroy(&cellPartSection));
1982: PetscCall(ISDestroy(&cellPart));
1983: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1984: // Create sfNatural, need discretization information
1985: PetscCall(DMCopyDisc(dm, *dmParallel));
1986: if (dm->localSection) {
1987: PetscSection psection;
1988: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &psection));
1989: PetscCall(PetscSFDistributeSection(sfMigration, dm->localSection, NULL, psection));
1990: PetscCall(DMSetLocalSection(*dmParallel, psection));
1991: PetscCall(PetscSectionDestroy(&psection));
1992: }
1993: if (dm->useNatural) {
1994: PetscSection section;
1996: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1997: PetscCall(DMGetLocalSection(dm, §ion));
1999: /* If DM has useNatural = PETSC_TRUE, but no sfNatural is set, the DM's Section is considered natural */
2000: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
2001: /* Compose with a previous sfNatural if present */
2002: if (dm->sfNatural) {
2003: PetscCall(DMPlexMigrateGlobalToNaturalSF(dm, *dmParallel, dm->sfNatural, sfMigration, &(*dmParallel)->sfNatural));
2004: } else {
2005: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
2006: }
2007: /* Compose with a previous sfMigration if present */
2008: if (dm->sfMigration) {
2009: PetscSF naturalPointSF;
2011: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
2012: PetscCall(PetscSFDestroy(&sfMigration));
2013: sfMigration = naturalPointSF;
2014: }
2015: }
2016: /* Cleanup */
2017: if (sf) {
2018: *sf = sfMigration;
2019: } else PetscCall(PetscSFDestroy(&sfMigration));
2020: PetscCall(PetscSFDestroy(&sfPoint));
2021: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
2022: PetscFunctionReturn(PETSC_SUCCESS);
2023: }
2025: PetscErrorCode DMPlexDistributeOverlap_Internal(DM dm, PetscInt overlap, MPI_Comm newcomm, const char *ovlboundary, PetscSF *sf, DM *dmOverlap)
2026: {
2027: DM_Plex *mesh = (DM_Plex *)dm->data;
2028: MPI_Comm comm;
2029: PetscMPIInt size, rank;
2030: PetscSection rootSection, leafSection;
2031: IS rootrank, leafrank;
2032: DM dmCoord;
2033: DMLabel lblOverlap;
2034: PetscSF sfOverlap, sfStratified, sfPoint;
2035: PetscBool clear_ovlboundary;
2037: PetscFunctionBegin;
2038: if (sf) *sf = NULL;
2039: *dmOverlap = NULL;
2040: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2041: PetscCallMPI(MPI_Comm_size(comm, &size));
2042: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2043: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2044: {
2045: // We need to get options for the _already_distributed mesh, so it must be done here
2046: PetscInt overlap;
2047: const char *prefix;
2048: char oldPrefix[PETSC_MAX_PATH_LEN];
2050: oldPrefix[0] = '\0';
2051: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
2052: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
2053: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
2054: PetscCall(DMPlexGetOverlap(dm, &overlap));
2055: PetscObjectOptionsBegin((PetscObject)dm);
2056: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
2057: PetscOptionsEnd();
2058: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
2059: }
2060: if (ovlboundary) {
2061: PetscBool flg;
2062: PetscCall(DMHasLabel(dm, ovlboundary, &flg));
2063: if (!flg) {
2064: DMLabel label;
2066: PetscCall(DMCreateLabel(dm, ovlboundary));
2067: PetscCall(DMGetLabel(dm, ovlboundary, &label));
2068: PetscCall(DMPlexMarkBoundaryFaces(dm, 1, label));
2069: clear_ovlboundary = PETSC_TRUE;
2070: }
2071: }
2072: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2073: /* Compute point overlap with neighbouring processes on the distributed DM */
2074: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
2075: PetscCall(PetscSectionCreate(newcomm, &rootSection));
2076: PetscCall(PetscSectionCreate(newcomm, &leafSection));
2077: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
2078: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2079: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2080: /* Convert overlap label to stratified migration SF */
2081: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, PETSC_FALSE, &sfOverlap));
2082: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
2083: PetscCall(PetscSFDestroy(&sfOverlap));
2084: sfOverlap = sfStratified;
2085: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
2086: PetscCall(PetscSFSetFromOptions(sfOverlap));
2088: PetscCall(PetscSectionDestroy(&rootSection));
2089: PetscCall(PetscSectionDestroy(&leafSection));
2090: PetscCall(ISDestroy(&rootrank));
2091: PetscCall(ISDestroy(&leafrank));
2092: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
2094: /* Build the overlapping DM */
2095: PetscCall(DMPlexCreate(newcomm, dmOverlap));
2096: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
2097: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
2098: /* Store the overlap in the new DM */
2099: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
2100: /* Build the new point SF */
2101: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
2102: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
2103: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
2104: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2105: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
2106: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2107: PetscCall(PetscSFDestroy(&sfPoint));
2108: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmOverlap));
2109: /* TODO: labels stored inside the DS for regions should be handled here */
2110: PetscCall(DMCopyDisc(dm, *dmOverlap));
2111: /* Cleanup overlap partition */
2112: PetscCall(DMLabelDestroy(&lblOverlap));
2113: if (sf) *sf = sfOverlap;
2114: else PetscCall(PetscSFDestroy(&sfOverlap));
2115: if (ovlboundary) {
2116: DMLabel label;
2117: PetscBool flg;
2119: if (clear_ovlboundary) PetscCall(DMRemoveLabel(dm, ovlboundary, NULL));
2120: PetscCall(DMHasLabel(*dmOverlap, ovlboundary, &flg));
2121: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing %s label", ovlboundary);
2122: PetscCall(DMGetLabel(*dmOverlap, ovlboundary, &label));
2123: PetscCall(DMPlexMarkBoundaryFaces_Internal(*dmOverlap, 1, 0, label, PETSC_TRUE));
2124: }
2125: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2126: PetscFunctionReturn(PETSC_SUCCESS);
2127: }
2129: /*@
2130: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
2132: Collective
2134: Input Parameters:
2135: + dm - The non-overlapping distributed `DMPLEX` object
2136: - overlap - The overlap of partitions (the same on all ranks)
2138: Output Parameters:
2139: + sf - The `PetscSF` used for point distribution, or pass `NULL` if not needed
2140: - dmOverlap - The overlapping distributed `DMPLEX` object
2142: Options Database Keys:
2143: + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
2144: . -dm_plex_overlap_values <int1,int2,...> - List of overlap label values
2145: . -dm_plex_overlap_exclude_label <name> - Label used to exclude points from overlap
2146: - -dm_plex_overlap_exclude_value <int> - Label value used to exclude points from overlap
2148: Level: advanced
2150: Notes:
2151: If the mesh was not distributed, the return value is `NULL`.
2153: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
2154: representation on the mesh.
2156: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
2157: @*/
2158: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmOverlap)
2159: {
2160: PetscFunctionBegin;
2163: if (sf) PetscAssertPointer(sf, 3);
2164: PetscAssertPointer(dmOverlap, 4);
2165: PetscCall(DMPlexDistributeOverlap_Internal(dm, overlap, PetscObjectComm((PetscObject)dm), NULL, sf, dmOverlap));
2166: PetscFunctionReturn(PETSC_SUCCESS);
2167: }
2169: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2170: {
2171: DM_Plex *mesh = (DM_Plex *)dm->data;
2173: PetscFunctionBegin;
2174: *overlap = mesh->overlap;
2175: PetscFunctionReturn(PETSC_SUCCESS);
2176: }
2178: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2179: {
2180: DM_Plex *mesh = NULL;
2181: DM_Plex *meshSrc = NULL;
2183: PetscFunctionBegin;
2185: mesh = (DM_Plex *)dm->data;
2186: if (dmSrc) {
2188: meshSrc = (DM_Plex *)dmSrc->data;
2189: mesh->overlap = meshSrc->overlap;
2190: } else {
2191: mesh->overlap = 0;
2192: }
2193: mesh->overlap += overlap;
2194: PetscFunctionReturn(PETSC_SUCCESS);
2195: }
2197: /*@
2198: DMPlexGetOverlap - Get the width of the cell overlap
2200: Not Collective
2202: Input Parameter:
2203: . dm - The `DM`
2205: Output Parameter:
2206: . overlap - the width of the cell overlap
2208: Level: intermediate
2210: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2211: @*/
2212: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2213: {
2214: PetscFunctionBegin;
2216: PetscAssertPointer(overlap, 2);
2217: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2218: PetscFunctionReturn(PETSC_SUCCESS);
2219: }
2221: /*@
2222: DMPlexSetOverlap - Set the width of the cell overlap
2224: Logically Collective
2226: Input Parameters:
2227: + dm - The `DM`
2228: . dmSrc - The `DM` that produced this one, or `NULL`
2229: - overlap - the width of the cell overlap
2231: Level: intermediate
2233: Note:
2234: The overlap from `dmSrc` is added to `dm`
2236: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2237: @*/
2238: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2239: {
2240: PetscFunctionBegin;
2243: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2244: PetscFunctionReturn(PETSC_SUCCESS);
2245: }
2247: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2248: {
2249: DM_Plex *mesh = (DM_Plex *)dm->data;
2251: PetscFunctionBegin;
2252: mesh->distDefault = dist;
2253: PetscFunctionReturn(PETSC_SUCCESS);
2254: }
2256: /*@
2257: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2259: Logically Collective
2261: Input Parameters:
2262: + dm - The `DM`
2263: - dist - Flag for distribution
2265: Level: intermediate
2267: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2268: @*/
2269: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2270: {
2271: PetscFunctionBegin;
2274: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2275: PetscFunctionReturn(PETSC_SUCCESS);
2276: }
2278: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2279: {
2280: DM_Plex *mesh = (DM_Plex *)dm->data;
2282: PetscFunctionBegin;
2283: *dist = mesh->distDefault;
2284: PetscFunctionReturn(PETSC_SUCCESS);
2285: }
2287: /*@
2288: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2290: Not Collective
2292: Input Parameter:
2293: . dm - The `DM`
2295: Output Parameter:
2296: . dist - Flag for distribution
2298: Level: intermediate
2300: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2301: @*/
2302: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2303: {
2304: PetscFunctionBegin;
2306: PetscAssertPointer(dist, 2);
2307: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2308: PetscFunctionReturn(PETSC_SUCCESS);
2309: }
2311: /*@
2312: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2313: root process of the original's communicator.
2315: Collective
2317: Input Parameter:
2318: . dm - the original `DMPLEX` object
2320: Output Parameters:
2321: + sf - the `PetscSF` used for point distribution (optional)
2322: - gatherMesh - the gathered `DM` object, or `NULL`
2324: Level: intermediate
2326: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2327: @*/
2328: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, PeOp DM *gatherMesh)
2329: {
2330: MPI_Comm comm;
2331: PetscMPIInt size;
2332: PetscPartitioner oldPart, gatherPart;
2334: PetscFunctionBegin;
2336: PetscAssertPointer(gatherMesh, 3);
2337: *gatherMesh = NULL;
2338: if (sf) *sf = NULL;
2339: comm = PetscObjectComm((PetscObject)dm);
2340: PetscCallMPI(MPI_Comm_size(comm, &size));
2341: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2342: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2343: PetscCall(PetscObjectReference((PetscObject)oldPart));
2344: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2345: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2346: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2347: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2349: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2350: PetscCall(PetscPartitionerDestroy(&gatherPart));
2351: PetscCall(PetscPartitionerDestroy(&oldPart));
2352: PetscFunctionReturn(PETSC_SUCCESS);
2353: }
2355: /*@
2356: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2358: Collective
2360: Input Parameter:
2361: . dm - the original `DMPLEX` object
2363: Output Parameters:
2364: + sf - the `PetscSF` used for point distribution (optional)
2365: - redundantMesh - the redundant `DM` object, or `NULL`
2367: Level: intermediate
2369: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2370: @*/
2371: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, PeOp DM *redundantMesh)
2372: {
2373: MPI_Comm comm;
2374: PetscMPIInt size, rank;
2375: PetscInt pStart, pEnd, p;
2376: PetscInt numPoints = -1;
2377: PetscSF migrationSF, sfPoint, gatherSF;
2378: DM gatherDM, dmCoord;
2379: PetscSFNode *points;
2381: PetscFunctionBegin;
2383: PetscAssertPointer(redundantMesh, 3);
2384: *redundantMesh = NULL;
2385: comm = PetscObjectComm((PetscObject)dm);
2386: PetscCallMPI(MPI_Comm_size(comm, &size));
2387: if (size == 1) {
2388: PetscCall(PetscObjectReference((PetscObject)dm));
2389: *redundantMesh = dm;
2390: if (sf) *sf = NULL;
2391: PetscFunctionReturn(PETSC_SUCCESS);
2392: }
2393: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2394: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2395: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2396: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2397: numPoints = pEnd - pStart;
2398: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2399: PetscCall(PetscMalloc1(numPoints, &points));
2400: PetscCall(PetscSFCreate(comm, &migrationSF));
2401: for (p = 0; p < numPoints; p++) {
2402: points[p].index = p;
2403: points[p].rank = 0;
2404: }
2405: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2406: PetscCall(DMPlexCreate(comm, redundantMesh));
2407: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2408: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2409: /* This is to express that all point are in overlap */
2410: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_INT_MAX));
2411: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2412: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2413: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2414: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2415: PetscCall(PetscSFDestroy(&sfPoint));
2416: if (sf) {
2417: PetscSF tsf;
2419: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2420: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2421: PetscCall(PetscSFDestroy(&tsf));
2422: }
2423: PetscCall(PetscSFDestroy(&migrationSF));
2424: PetscCall(PetscSFDestroy(&gatherSF));
2425: PetscCall(DMDestroy(&gatherDM));
2426: PetscCall(DMCopyDisc(dm, *redundantMesh));
2427: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2428: PetscFunctionReturn(PETSC_SUCCESS);
2429: }
2431: /*@
2432: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2434: Collective
2436: Input Parameter:
2437: . dm - The `DM` object
2439: Output Parameter:
2440: . distributed - Flag whether the `DM` is distributed
2442: Level: intermediate
2444: Notes:
2445: This currently finds out whether at least two ranks have any DAG points.
2446: This involves `MPI_Allreduce()` with one integer.
2447: The result is currently not stashed so every call to this routine involves this global communication.
2449: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2450: @*/
2451: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2452: {
2453: PetscInt pStart, pEnd, count;
2454: MPI_Comm comm;
2455: PetscMPIInt size;
2457: PetscFunctionBegin;
2459: PetscAssertPointer(distributed, 2);
2460: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2461: PetscCallMPI(MPI_Comm_size(comm, &size));
2462: if (size == 1) {
2463: *distributed = PETSC_FALSE;
2464: PetscFunctionReturn(PETSC_SUCCESS);
2465: }
2466: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2467: count = (pEnd - pStart) > 0 ? 1 : 0;
2468: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2469: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2470: PetscFunctionReturn(PETSC_SUCCESS);
2471: }
2473: /*@
2474: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2476: Input Parameters:
2477: + dm - The `DM`
2478: - name - The name of the specific parallel distribution
2480: Level: developer
2482: Note:
2483: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2484: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2485: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2486: loads the parallel distribution stored in file under this name.
2488: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2489: @*/
2490: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2491: {
2492: DM_Plex *mesh = (DM_Plex *)dm->data;
2494: PetscFunctionBegin;
2496: if (name) PetscAssertPointer(name, 2);
2497: PetscCall(PetscFree(mesh->distributionName));
2498: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2499: PetscFunctionReturn(PETSC_SUCCESS);
2500: }
2502: /*@
2503: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2505: Input Parameter:
2506: . dm - The `DM`
2508: Output Parameter:
2509: . name - The name of the specific parallel distribution
2511: Level: developer
2513: Note:
2514: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2515: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2516: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2517: loads the parallel distribution stored in file under this name.
2519: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2520: @*/
2521: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2522: {
2523: DM_Plex *mesh = (DM_Plex *)dm->data;
2525: PetscFunctionBegin;
2527: PetscAssertPointer(name, 2);
2528: *name = mesh->distributionName;
2529: PetscFunctionReturn(PETSC_SUCCESS);
2530: }