Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
3: #include <petsc/private/partitionerimpl.h>
5: /*@C
6: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
8: Input Parameters:
9: + dm - The DM object
10: . user - The user callback, may be `NULL` (to clear the callback)
11: - ctx - context for callback evaluation, may be `NULL`
13: Level: advanced
15: Notes:
16: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
18: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
20: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
21: @*/
22: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), PetscCtx ctx)
23: {
24: DM_Plex *mesh = (DM_Plex *)dm->data;
26: PetscFunctionBegin;
28: mesh->useradjacency = user;
29: mesh->useradjacencyctx = ctx;
30: PetscFunctionReturn(PETSC_SUCCESS);
31: }
33: /*@C
34: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
36: Input Parameter:
37: . dm - The `DM` object
39: Output Parameters:
40: + user - The callback
41: - ctx - context for callback evaluation
43: Level: advanced
45: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
46: @*/
47: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
48: {
49: DM_Plex *mesh = (DM_Plex *)dm->data;
51: PetscFunctionBegin;
53: if (user) *user = mesh->useradjacency;
54: if (ctx) *ctx = mesh->useradjacencyctx;
55: PetscFunctionReturn(PETSC_SUCCESS);
56: }
58: /*@
59: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
61: Input Parameters:
62: + dm - The `DM` object
63: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
65: Level: intermediate
67: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
68: @*/
69: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
70: {
71: DM_Plex *mesh = (DM_Plex *)dm->data;
73: PetscFunctionBegin;
75: mesh->useAnchors = useAnchors;
76: PetscFunctionReturn(PETSC_SUCCESS);
77: }
79: /*@
80: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
82: Input Parameter:
83: . dm - The `DM` object
85: Output Parameter:
86: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
88: Level: intermediate
90: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
91: @*/
92: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
93: {
94: DM_Plex *mesh = (DM_Plex *)dm->data;
96: PetscFunctionBegin;
98: PetscAssertPointer(useAnchors, 2);
99: *useAnchors = mesh->useAnchors;
100: PetscFunctionReturn(PETSC_SUCCESS);
101: }
103: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
104: {
105: const PetscInt *cone = NULL;
106: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
108: PetscFunctionBeginHot;
109: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
110: PetscCall(DMPlexGetCone(dm, p, &cone));
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c - 1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
117: PetscCall(DMPlexGetSupport(dm, point, &support));
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: PetscFunctionReturn(PETSC_SUCCESS);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
134: PetscFunctionBeginHot;
135: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
136: PetscCall(DMPlexGetSupport(dm, p, &support));
137: for (s = 0; s <= supportSize; ++s) {
138: const PetscInt point = !s ? p : support[s - 1];
139: const PetscInt *cone = NULL;
140: PetscInt coneSize, c, q;
142: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
143: PetscCall(DMPlexGetCone(dm, point, &cone));
144: for (c = 0; c < coneSize; ++c) {
145: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
146: if (cone[c] == adj[q]) break;
147: }
148: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
149: }
150: }
151: *adjSize = numAdj;
152: PetscFunctionReturn(PETSC_SUCCESS);
153: }
155: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
156: {
157: PetscInt *star = NULL;
158: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
160: PetscFunctionBeginHot;
161: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
162: for (s = 0; s < starSize * 2; s += 2) {
163: const PetscInt *closure = NULL;
164: PetscInt closureSize, c, q;
166: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
167: for (c = 0; c < closureSize * 2; c += 2) {
168: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
169: if (closure[c] == adj[q]) break;
170: }
171: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
172: }
173: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
174: }
175: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
176: *adjSize = numAdj;
177: PetscFunctionReturn(PETSC_SUCCESS);
178: }
180: // Returns the maximum number of adjacent points in the DMPlex
181: PetscErrorCode DMPlexGetMaxAdjacencySize_Internal(DM dm, PetscBool useAnchors, PetscInt *max_adjacency_size)
182: {
183: PetscInt depth, maxC, maxS, maxP, pStart, pEnd, asiz, maxAnchors = 1;
185: PetscFunctionBeginUser;
186: if (useAnchors) {
187: PetscSection aSec = NULL;
188: IS aIS = NULL;
189: PetscInt aStart, aEnd;
190: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
191: if (aSec) {
192: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
193: maxAnchors = PetscMax(1, maxAnchors);
194: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
195: }
196: }
198: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
199: PetscCall(DMPlexGetDepth(dm, &depth));
200: depth = PetscMax(depth, -depth);
201: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
202: maxP = maxS * maxC;
203: /* Adjacency can be as large as supp(cl(cell)) or cl(supp(vertex)),
204: supp(cell) + supp(maxC faces) + supp(maxC^2 edges) + supp(maxC^3 vertices)
205: = 0 + maxS*maxC + maxS^2*maxC^2 + maxS^3*maxC^3
206: = \sum^d_{i=0} (maxS*maxC)^i - 1
207: = (maxS*maxC)^{d+1} - 1 / (maxS*maxC - 1) - 1
208: We could improve this by getting the max by strata:
209: supp[d](cell) + supp[d-1](maxC[d] faces) + supp[1](maxC[d]*maxC[d-1] edges) + supp[0](maxC[d]*maxC[d-1]*maxC[d-2] vertices)
210: = 0 + maxS[d-1]*maxC[d] + maxS[1]*maxC[d]*maxC[d-1] + maxS[0]*maxC[d]*maxC[d-1]*maxC[d-2]
211: and the same with S and C reversed
212: */
213: if ((depth == 3 && maxP > 200) || (depth == 2 && maxP > 580)) asiz = pEnd - pStart;
214: else asiz = (maxP > 1) ? ((PetscPowInt(maxP, depth + 1) - 1) / (maxP - 1)) : depth + 1;
215: asiz *= maxAnchors;
216: *max_adjacency_size = PetscMin(asiz, pEnd - pStart);
217: PetscFunctionReturn(PETSC_SUCCESS);
218: }
220: // Returns Adjacent mesh points to the selected point given specific criteria
221: //
222: // + adjSize - Number of adjacent points
223: // - adj - Array of the adjacent points
224: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
225: {
226: static PetscInt asiz = 0;
227: PetscInt aStart = -1, aEnd = -1;
228: PetscInt maxAdjSize;
229: PetscSection aSec = NULL;
230: IS aIS = NULL;
231: const PetscInt *anchors;
232: DM_Plex *mesh = (DM_Plex *)dm->data;
234: PetscFunctionBeginHot;
235: if (useAnchors) {
236: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
237: if (aSec) {
238: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
239: PetscCall(ISGetIndices(aIS, &anchors));
240: }
241: }
242: if (!*adj) {
243: PetscCall(DMPlexGetMaxAdjacencySize_Internal(dm, useAnchors, &asiz));
244: PetscCall(PetscMalloc1(asiz, adj));
245: }
246: if (*adjSize < 0) *adjSize = asiz;
247: maxAdjSize = *adjSize;
248: if (mesh->useradjacency) {
249: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
250: } else if (useTransitiveClosure) {
251: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
252: } else if (useCone) {
253: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
254: } else {
255: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
256: }
257: if (useAnchors && aSec) {
258: PetscInt origSize = *adjSize;
259: PetscInt numAdj = origSize;
260: PetscInt i = 0, j;
261: PetscInt *orig = *adj;
263: while (i < origSize) {
264: PetscInt p = orig[i];
265: PetscInt aDof = 0;
267: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
268: if (aDof) {
269: PetscInt aOff;
271: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
272: origSize--;
273: numAdj--;
274: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
275: for (PetscInt s = 0; s < aDof; ++s) {
276: for (PetscInt q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
277: if (anchors[aOff + s] == orig[q]) break;
278: }
279: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
280: }
281: } else {
282: i++;
283: }
284: }
285: *adjSize = numAdj;
286: PetscCall(ISRestoreIndices(aIS, &anchors));
287: }
288: PetscFunctionReturn(PETSC_SUCCESS);
289: }
291: /*@
292: DMPlexGetAdjacency - Return all points adjacent to the given point
294: Input Parameters:
295: + dm - The `DM` object
296: - p - The point
298: Input/Output Parameters:
299: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
300: on output the number of adjacent points
301: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
302: on output contains the adjacent points
304: Level: advanced
306: Notes:
307: The user must `PetscFree()` the `adj` array if it was not passed in.
309: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
310: @*/
311: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
312: {
313: PetscBool useCone, useClosure, useAnchors;
315: PetscFunctionBeginHot;
317: PetscAssertPointer(adjSize, 3);
318: PetscAssertPointer(adj, 4);
319: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
320: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
321: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
322: PetscFunctionReturn(PETSC_SUCCESS);
323: }
325: /*@
326: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
328: Collective
330: Input Parameters:
331: + dm - The `DM`
332: . sfPoint - The `PetscSF` which encodes point connectivity
333: . rootRankSection - to be documented
334: . rootRanks - to be documented
335: . leafRankSection - to be documented
336: - leafRanks - to be documented
338: Output Parameters:
339: + processRanks - A list of process neighbors, or `NULL`
340: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
342: Level: developer
344: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
345: @*/
346: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, PeOp IS *processRanks, PeOp PetscSF *sfProcess)
347: {
348: const PetscSFNode *remotePoints;
349: PetscInt *localPointsNew;
350: PetscSFNode *remotePointsNew;
351: const PetscInt *nranks;
352: PetscInt *ranksNew;
353: PetscBT neighbors;
354: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
355: PetscMPIInt size, proc, rank;
357: PetscFunctionBegin;
360: if (processRanks) PetscAssertPointer(processRanks, 7);
361: if (sfProcess) PetscAssertPointer(sfProcess, 8);
362: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
363: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
364: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
365: PetscCall(PetscBTCreate(size, &neighbors));
366: PetscCall(PetscBTMemzero(size, neighbors));
367: /* Compute root-to-leaf process connectivity */
368: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
369: PetscCall(ISGetIndices(rootRanks, &nranks));
370: for (p = pStart; p < pEnd; ++p) {
371: PetscInt ndof, noff, n;
373: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
374: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
375: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
376: }
377: PetscCall(ISRestoreIndices(rootRanks, &nranks));
378: /* Compute leaf-to-neighbor process connectivity */
379: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
380: PetscCall(ISGetIndices(leafRanks, &nranks));
381: for (p = pStart; p < pEnd; ++p) {
382: PetscInt ndof, noff, n;
384: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
385: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
386: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
387: }
388: PetscCall(ISRestoreIndices(leafRanks, &nranks));
389: /* Compute leaf-to-root process connectivity */
390: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
391: /* Calculate edges */
392: PetscCall(PetscBTClear(neighbors, rank));
393: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
394: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
395: }
396: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
397: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
398: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
399: for (proc = 0, n = 0; proc < size; ++proc) {
400: if (PetscBTLookup(neighbors, proc)) {
401: ranksNew[n] = proc;
402: localPointsNew[n] = proc;
403: remotePointsNew[n].index = rank;
404: remotePointsNew[n].rank = proc;
405: ++n;
406: }
407: }
408: PetscCall(PetscBTDestroy(&neighbors));
409: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
410: else PetscCall(PetscFree(ranksNew));
411: if (sfProcess) {
412: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
413: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
414: PetscCall(PetscSFSetFromOptions(*sfProcess));
415: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
416: }
417: PetscFunctionReturn(PETSC_SUCCESS);
418: }
420: /*@
421: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
423: Collective
425: Input Parameter:
426: . dm - The `DM`
428: Output Parameters:
429: + rootSection - The number of leaves for a given root point
430: . rootrank - The rank of each edge into the root point
431: . leafSection - The number of processes sharing a given leaf point
432: - leafrank - The rank of each process sharing a leaf point
434: Level: developer
436: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
437: @*/
438: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
439: {
440: MPI_Comm comm;
441: PetscSF sfPoint;
442: const PetscInt *rootdegree;
443: PetscInt *myrank, *remoterank;
444: PetscInt pStart, pEnd, p, nedges;
445: PetscMPIInt rank;
447: PetscFunctionBegin;
448: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
449: PetscCallMPI(MPI_Comm_rank(comm, &rank));
450: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
451: PetscCall(DMGetPointSF(dm, &sfPoint));
452: /* Compute number of leaves for each root */
453: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
454: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
455: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
456: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
457: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
458: PetscCall(PetscSectionSetUp(rootSection));
459: /* Gather rank of each leaf to root */
460: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
461: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
462: PetscCall(PetscMalloc1(nedges, &remoterank));
463: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
464: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
465: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
466: PetscCall(PetscFree(myrank));
467: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
468: /* Distribute remote ranks to leaves */
469: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
470: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
471: PetscFunctionReturn(PETSC_SUCCESS);
472: }
474: /*@
475: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
477: Collective
479: Input Parameters:
480: + dm - The `DM`
481: . levels - Number of overlap levels
482: . rootSection - The number of leaves for a given root point
483: . rootrank - The rank of each edge into the root point
484: . leafSection - The number of processes sharing a given leaf point
485: - leafrank - The rank of each process sharing a leaf point
487: Output Parameter:
488: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
490: Level: developer
492: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
493: @*/
494: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
495: {
496: MPI_Comm comm;
497: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
498: PetscSF sfPoint;
499: const PetscSFNode *remote;
500: const PetscInt *local;
501: const PetscInt *nrank, *rrank;
502: PetscInt *adj = NULL;
503: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
504: PetscMPIInt rank, size;
505: PetscBool flg;
507: PetscFunctionBegin;
508: *ovLabel = NULL;
509: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
510: PetscCallMPI(MPI_Comm_size(comm, &size));
511: PetscCallMPI(MPI_Comm_rank(comm, &rank));
512: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
513: PetscCall(DMGetPointSF(dm, &sfPoint));
514: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
515: if (!levels) {
516: /* Add owned points */
517: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
518: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
519: PetscFunctionReturn(PETSC_SUCCESS);
520: }
521: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
522: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
523: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
524: /* Handle leaves: shared with the root point */
525: for (l = 0; l < nleaves; ++l) {
526: PetscInt adjSize = PETSC_DETERMINE, a;
528: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
529: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
530: }
531: PetscCall(ISGetIndices(rootrank, &rrank));
532: PetscCall(ISGetIndices(leafrank, &nrank));
533: /* Handle roots */
534: for (p = pStart; p < pEnd; ++p) {
535: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
537: if ((p >= sStart) && (p < sEnd)) {
538: /* Some leaves share a root with other leaves on different processes */
539: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
540: if (neighbors) {
541: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
542: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
543: for (n = 0; n < neighbors; ++n) {
544: const PetscInt remoteRank = nrank[noff + n];
546: if (remoteRank == rank) continue;
547: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
548: }
549: }
550: }
551: /* Roots are shared with leaves */
552: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
553: if (!neighbors) continue;
554: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
555: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
556: for (n = 0; n < neighbors; ++n) {
557: const PetscInt remoteRank = rrank[noff + n];
559: if (remoteRank == rank) continue;
560: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
561: }
562: }
563: PetscCall(PetscFree(adj));
564: PetscCall(ISRestoreIndices(rootrank, &rrank));
565: PetscCall(ISRestoreIndices(leafrank, &nrank));
566: /* Add additional overlap levels */
567: for (l = 1; l < levels; l++) {
568: /* Propagate point donations over SF to capture remote connections */
569: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
570: /* Add next level of point donations to the label */
571: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
572: }
573: /* We require the closure in the overlap */
574: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
575: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
576: if (flg) {
577: PetscViewer viewer;
578: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
579: PetscCall(DMLabelView(ovAdjByRank, viewer));
580: }
581: /* Invert sender to receiver label */
582: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
583: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
584: /* Add owned points, except for shared local points */
585: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
586: for (l = 0; l < nleaves; ++l) {
587: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
588: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
589: }
590: /* Clean up */
591: PetscCall(DMLabelDestroy(&ovAdjByRank));
592: PetscFunctionReturn(PETSC_SUCCESS);
593: }
595: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
596: {
597: PetscInt neighbors, el;
599: PetscFunctionBegin;
600: PetscCall(PetscSectionGetDof(section, p, &neighbors));
601: if (neighbors) {
602: PetscInt *adj = NULL;
603: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
604: PetscMPIInt rank;
606: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
607: PetscCall(PetscSectionGetOffset(section, p, &noff));
608: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
609: for (n = 0; n < neighbors; ++n) {
610: const PetscInt remoteRank = ranks[noff + n];
612: if (remoteRank == rank) continue;
613: for (a = 0; a < adjSize; ++a) {
614: PetscBool insert = PETSC_TRUE;
616: for (el = 0; el < numExLabels; ++el) {
617: PetscInt exVal;
618: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
619: if (exVal == exValue[el]) {
620: insert = PETSC_FALSE;
621: break;
622: }
623: }
624: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
625: }
626: }
627: PetscCall(PetscFree(adj));
628: }
629: PetscFunctionReturn(PETSC_SUCCESS);
630: }
632: /*@C
633: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
635: Collective
637: Input Parameters:
638: + dm - The `DM`
639: . numLabels - The number of labels to draw candidate points from
640: . label - An array of labels containing candidate points
641: . value - An array of label values marking the candidate points
642: . numExLabels - The number of labels to use for exclusion
643: . exLabel - An array of labels indicating points to be excluded, or `NULL`
644: . exValue - An array of label values to be excluded, or `NULL`
645: . rootSection - The number of leaves for a given root point
646: . rootrank - The rank of each edge into the root point
647: . leafSection - The number of processes sharing a given leaf point
648: - leafrank - The rank of each process sharing a leaf point
650: Output Parameter:
651: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
653: Level: developer
655: Note:
656: The candidate points are only added to the overlap if they are adjacent to a shared point
658: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
659: @*/
660: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
661: {
662: MPI_Comm comm;
663: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
664: PetscSF sfPoint;
665: const PetscSFNode *remote;
666: const PetscInt *local;
667: const PetscInt *nrank, *rrank;
668: PetscInt *adj = NULL;
669: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
670: PetscMPIInt rank, size;
671: PetscBool flg;
673: PetscFunctionBegin;
674: *ovLabel = NULL;
675: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
676: PetscCallMPI(MPI_Comm_size(comm, &size));
677: PetscCallMPI(MPI_Comm_rank(comm, &rank));
678: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
679: PetscCall(DMGetPointSF(dm, &sfPoint));
680: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
681: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
682: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
683: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
684: PetscCall(ISGetIndices(rootrank, &rrank));
685: PetscCall(ISGetIndices(leafrank, &nrank));
686: for (l = 0; l < numLabels; ++l) {
687: IS valIS;
688: const PetscInt *points;
689: PetscInt n;
691: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
692: if (!valIS) continue;
693: PetscCall(ISGetIndices(valIS, &points));
694: PetscCall(ISGetLocalSize(valIS, &n));
695: for (PetscInt i = 0; i < n; ++i) {
696: const PetscInt p = points[i];
698: if ((p >= sStart) && (p < sEnd)) {
699: PetscInt loc, adjSize = PETSC_DETERMINE;
701: /* Handle leaves: shared with the root point */
702: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
703: else loc = (p >= 0 && p < nleaves) ? p : -1;
704: if (loc >= 0) {
705: const PetscInt remoteRank = remote[loc].rank;
707: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
708: for (PetscInt a = 0; a < adjSize; ++a) {
709: PetscBool insert = PETSC_TRUE;
711: for (el = 0; el < numExLabels; ++el) {
712: PetscInt exVal;
713: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
714: if (exVal == exValue[el]) {
715: insert = PETSC_FALSE;
716: break;
717: }
718: }
719: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
720: }
721: }
722: /* Some leaves share a root with other leaves on different processes */
723: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
724: }
725: /* Roots are shared with leaves */
726: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
727: }
728: PetscCall(ISRestoreIndices(valIS, &points));
729: PetscCall(ISDestroy(&valIS));
730: }
731: PetscCall(PetscFree(adj));
732: PetscCall(ISRestoreIndices(rootrank, &rrank));
733: PetscCall(ISRestoreIndices(leafrank, &nrank));
734: /* We require the closure in the overlap */
735: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
736: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
737: if (flg) {
738: PetscViewer viewer;
739: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
740: PetscCall(DMLabelView(ovAdjByRank, viewer));
741: }
742: /* Invert sender to receiver label */
743: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
744: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
745: /* Add owned points, except for shared local points */
746: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
747: for (l = 0; l < nleaves; ++l) {
748: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
749: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
750: }
751: /* Clean up */
752: PetscCall(DMLabelDestroy(&ovAdjByRank));
753: PetscFunctionReturn(PETSC_SUCCESS);
754: }
756: /*@
757: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
759: Collective
761: Input Parameters:
762: + dm - The `DM`
763: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
765: Output Parameter:
766: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
768: Level: developer
770: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
771: @*/
772: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
773: {
774: MPI_Comm comm;
775: PetscMPIInt rank, size;
776: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
777: PetscInt *pointDepths, *remoteDepths, *ilocal;
778: PetscInt *depthRecv, *depthShift, *depthIdx;
779: PetscSFNode *iremote;
780: PetscSF pointSF;
781: const PetscInt *sharedLocal;
782: const PetscSFNode *overlapRemote, *sharedRemote;
784: PetscFunctionBegin;
786: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
787: PetscCallMPI(MPI_Comm_rank(comm, &rank));
788: PetscCallMPI(MPI_Comm_size(comm, &size));
789: PetscCall(DMGetDimension(dm, &dim));
791: /* Before building the migration SF we need to know the new stratum offsets */
792: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
793: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
794: for (d = 0; d < dim + 1; d++) {
795: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
796: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
797: }
798: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
799: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
800: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
802: /* Count received points in each stratum and compute the internal strata shift */
803: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
804: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
805: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
806: depthShift[dim] = 0;
807: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
808: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
809: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
810: for (d = 0; d < dim + 1; d++) {
811: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
812: depthIdx[d] = pStart + depthShift[d];
813: }
815: /* Form the overlap SF build an SF that describes the full overlap migration SF */
816: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
817: newLeaves = pEnd - pStart + nleaves;
818: PetscCall(PetscMalloc1(newLeaves, &ilocal));
819: PetscCall(PetscMalloc1(newLeaves, &iremote));
820: /* First map local points to themselves */
821: for (d = 0; d < dim + 1; d++) {
822: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
823: for (p = pStart; p < pEnd; p++) {
824: point = p + depthShift[d];
825: ilocal[point] = point;
826: iremote[point].index = p;
827: iremote[point].rank = rank;
828: depthIdx[d]++;
829: }
830: }
832: /* Add in the remote roots for currently shared points */
833: PetscCall(DMGetPointSF(dm, &pointSF));
834: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
835: for (d = 0; d < dim + 1; d++) {
836: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
837: for (p = 0; p < numSharedPoints; p++) {
838: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
839: point = sharedLocal[p] + depthShift[d];
840: iremote[point].index = sharedRemote[p].index;
841: iremote[point].rank = sharedRemote[p].rank;
842: }
843: }
844: }
846: /* Now add the incoming overlap points */
847: for (p = 0; p < nleaves; p++) {
848: point = depthIdx[remoteDepths[p]];
849: ilocal[point] = point;
850: iremote[point].index = overlapRemote[p].index;
851: iremote[point].rank = overlapRemote[p].rank;
852: depthIdx[remoteDepths[p]]++;
853: }
854: PetscCall(PetscFree2(pointDepths, remoteDepths));
856: PetscCall(PetscSFCreate(comm, migrationSF));
857: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
858: PetscCall(PetscSFSetFromOptions(*migrationSF));
859: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
860: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
862: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
863: PetscFunctionReturn(PETSC_SUCCESS);
864: }
866: /*@
867: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
869: Input Parameters:
870: + dm - The DM
871: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
873: Output Parameter:
874: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
876: Level: developer
878: Note:
879: This lexicographically sorts by (depth, cellType)
881: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
882: @*/
883: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
884: {
885: MPI_Comm comm;
886: PetscMPIInt rank, size;
887: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
888: PetscSFNode *pointDepths, *remoteDepths;
889: PetscInt *ilocal;
890: PetscInt *depthRecv, *depthShift, *depthIdx;
891: PetscInt *ctRecv, *ctShift, *ctIdx;
892: const PetscSFNode *iremote;
894: PetscFunctionBegin;
896: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
897: PetscCallMPI(MPI_Comm_rank(comm, &rank));
898: PetscCallMPI(MPI_Comm_size(comm, &size));
899: PetscCall(DMPlexGetDepth(dm, &ldepth));
900: PetscCall(DMGetDimension(dm, &dim));
901: PetscCallMPI(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
902: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
903: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
905: /* Before building the migration SF we need to know the new stratum offsets */
906: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
907: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
908: for (d = 0; d < depth + 1; ++d) {
909: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
910: for (p = pStart; p < pEnd; ++p) {
911: DMPolytopeType ct;
913: PetscCall(DMPlexGetCellType(dm, p, &ct));
914: pointDepths[p].index = d;
915: pointDepths[p].rank = ct;
916: }
917: }
918: for (p = 0; p < nleaves; ++p) {
919: remoteDepths[p].index = -1;
920: remoteDepths[p].rank = -1;
921: }
922: PetscCall(PetscSFBcastBegin(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
923: PetscCall(PetscSFBcastEnd(sf, MPIU_SF_NODE, pointDepths, remoteDepths, MPI_REPLACE));
924: /* Count received points in each stratum and compute the internal strata shift */
925: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
926: for (p = 0; p < nleaves; ++p) {
927: if (remoteDepths[p].rank < 0) {
928: ++depthRecv[remoteDepths[p].index];
929: } else {
930: ++ctRecv[remoteDepths[p].rank];
931: }
932: }
933: {
934: PetscInt depths[4], dims[4], shift = 0, i, c;
936: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
937: Consider DM_POLYTOPE_FV_GHOST, DM_POLYTOPE_INTERIOR_GHOST and DM_POLYTOPE_UNKNOWN_CELL as cells
938: */
939: depths[0] = depth;
940: depths[1] = 0;
941: depths[2] = depth - 1;
942: depths[3] = 1;
943: dims[0] = dim;
944: dims[1] = 0;
945: dims[2] = dim - 1;
946: dims[3] = 1;
947: for (i = 0; i <= depth; ++i) {
948: const PetscInt dep = depths[i];
949: const PetscInt dim = dims[i];
951: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
952: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST || c == DM_POLYTOPE_UNKNOWN_CELL))) continue;
953: ctShift[c] = shift;
954: shift += ctRecv[c];
955: }
956: depthShift[dep] = shift;
957: shift += depthRecv[dep];
958: }
959: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
960: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
962: if ((ctDim < 0 || ctDim > dim) && c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST && c != DM_POLYTOPE_UNKNOWN_CELL) {
963: ctShift[c] = shift;
964: shift += ctRecv[c];
965: }
966: }
967: }
968: /* Derive a new local permutation based on stratified indices */
969: PetscCall(PetscMalloc1(nleaves, &ilocal));
970: for (p = 0; p < nleaves; ++p) {
971: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
973: ilocal[p] = ctShift[ct] + ctIdx[ct];
974: ++ctIdx[ct];
975: }
976: PetscCall(PetscSFCreate(comm, migrationSF));
977: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
978: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
979: PetscCall(PetscFree2(pointDepths, remoteDepths));
980: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
981: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
982: PetscFunctionReturn(PETSC_SUCCESS);
983: }
985: /*@
986: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
988: Collective
990: Input Parameters:
991: + dm - The `DMPLEX` object
992: . pointSF - The `PetscSF` describing the communication pattern
993: . originalSection - The `PetscSection` for existing data layout
994: - originalVec - The existing data in a local vector
996: Output Parameters:
997: + newSection - The `PetscSF` describing the new data layout
998: - newVec - The new data in a local vector
1000: Level: developer
1002: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`, `PetscSectionMigrateData()`
1003: @*/
1004: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
1005: {
1006: PetscSF fieldSF;
1007: PetscInt *remoteOffsets, fieldSize;
1008: PetscScalar *originalValues, *newValues;
1010: PetscFunctionBegin;
1011: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1012: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1014: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1015: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
1016: PetscCall(VecSetType(newVec, dm->vectype));
1018: PetscCall(VecGetArray(originalVec, &originalValues));
1019: PetscCall(VecGetArray(newVec, &newValues));
1020: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1021: PetscCall(PetscFree(remoteOffsets));
1022: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1023: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1024: PetscCall(PetscSFDestroy(&fieldSF));
1025: PetscCall(VecRestoreArray(newVec, &newValues));
1026: PetscCall(VecRestoreArray(originalVec, &originalValues));
1027: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1028: PetscFunctionReturn(PETSC_SUCCESS);
1029: }
1031: /*@
1032: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1034: Collective
1036: Input Parameters:
1037: + dm - The `DMPLEX` object
1038: . pointSF - The `PetscSF` describing the communication pattern
1039: . originalSection - The `PetscSection` for existing data layout
1040: - originalIS - The existing data
1042: Output Parameters:
1043: + newSection - The `PetscSF` describing the new data layout
1044: - newIS - The new data
1046: Level: developer
1048: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`, `PetscSectionMigrateData()`
1049: @*/
1050: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1051: {
1052: PetscInt *newValues, fieldSize;
1053: const PetscInt *originalValues;
1055: PetscFunctionBegin;
1056: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1057: PetscCall(ISGetIndices(originalIS, &originalValues));
1058: PetscCall(PetscSectionMigrateData(pointSF, MPIU_INT, originalSection, originalValues, newSection, (void **)&newValues, NULL));
1059: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1061: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1062: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1063: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1064: PetscFunctionReturn(PETSC_SUCCESS);
1065: }
1067: /*@
1068: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1070: Collective
1072: Input Parameters:
1073: + dm - The `DMPLEX` object
1074: . pointSF - The `PetscSF` describing the communication pattern
1075: . originalSection - The `PetscSection` for existing data layout
1076: . datatype - The type of data
1077: - originalData - The existing data
1079: Output Parameters:
1080: + newSection - The `PetscSection` describing the new data layout
1081: - newData - The new data
1083: Level: developer
1085: Note:
1086: This is simply a wrapper around `PetscSectionMigrateData()`, but includes DM-specific logging.
1088: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `PetscSectionMigrateData()`
1089: @*/
1090: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1091: {
1092: PetscFunctionBegin;
1093: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1094: PetscCall(PetscSectionMigrateData(pointSF, datatype, originalSection, originalData, newSection, newData, NULL));
1095: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1096: PetscFunctionReturn(PETSC_SUCCESS);
1097: }
1099: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1100: {
1101: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1102: MPI_Comm comm;
1103: PetscSF coneSF;
1104: PetscSection originalConeSection, newConeSection;
1105: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1106: PetscBool flg;
1108: PetscFunctionBegin;
1111: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1112: /* Distribute cone section */
1113: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1114: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1115: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1116: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1117: PetscCall(DMSetUp(dmParallel));
1118: /* Communicate and renumber cones */
1119: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1120: PetscCall(PetscFree(remoteOffsets));
1121: PetscCall(DMPlexGetCones(dm, &cones));
1122: if (original) {
1123: PetscInt numCones;
1125: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1126: PetscCall(PetscMalloc1(numCones, &globCones));
1127: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1128: } else {
1129: globCones = cones;
1130: }
1131: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1132: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1133: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1134: if (original) PetscCall(PetscFree(globCones));
1135: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1136: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1137: if (PetscDefined(USE_DEBUG)) {
1138: PetscBool valid = PETSC_TRUE;
1139: for (PetscInt p = 0; p < newConesSize; ++p) {
1140: if (newCones[p] < 0) {
1141: valid = PETSC_FALSE;
1142: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1143: }
1144: }
1145: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1146: }
1147: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1148: if (flg) {
1149: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1150: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1151: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1152: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1153: PetscCall(PetscPrintf(comm, "Migration SF:\n"));
1154: PetscCall(PetscSFView(coneSF, NULL));
1155: if (original) {
1156: PetscViewer viewer;
1158: PetscCall(PetscPrintf(comm, "Serial Renumbering:\n"));
1159: PetscCall(PetscViewerGetSubViewer(PETSC_VIEWER_STDOUT_(comm), PETSC_COMM_SELF, &viewer));
1160: PetscCall(ISLocalToGlobalMappingView(original, viewer));
1161: PetscCall(PetscViewerRestoreSubViewer(PETSC_VIEWER_STDOUT_(comm), PETSC_COMM_SELF, &viewer));
1162: }
1163: PetscCall(PetscPrintf(comm, "Parallel Renumbering:\n"));
1164: PetscCall(ISLocalToGlobalMappingView(renumbering, PETSC_VIEWER_STDOUT_(comm)));
1165: }
1166: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1167: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1168: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1169: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1170: PetscCall(PetscSFDestroy(&coneSF));
1171: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1172: /* Create supports and stratify DMPlex */
1173: {
1174: PetscInt pStart, pEnd;
1176: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1177: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1178: }
1179: PetscCall(DMPlexSymmetrize(dmParallel));
1180: PetscCall(DMPlexStratify(dmParallel));
1181: {
1182: PetscBool useCone, useClosure, useAnchors;
1184: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1185: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1186: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1187: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1188: }
1189: PetscFunctionReturn(PETSC_SUCCESS);
1190: }
1192: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1193: {
1194: MPI_Comm comm;
1195: DM cdm, cdmParallel;
1196: PetscSection originalCoordSection, newCoordSection;
1197: Vec originalCoordinates, newCoordinates;
1198: PetscInt bs;
1199: PetscBool sparse;
1200: const char *name;
1201: const PetscReal *maxCell, *Lstart, *L;
1203: PetscFunctionBegin;
1207: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1208: PetscCall(DMGetCoordinateDM(dm, &cdm));
1209: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1210: PetscCall(DMCopyDisc(cdm, cdmParallel));
1211: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1212: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1213: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1214: if (originalCoordinates) {
1215: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1216: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1217: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1219: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1220: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1221: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1222: PetscCall(VecSetBlockSize(newCoordinates, bs));
1223: PetscCall(VecDestroy(&newCoordinates));
1224: }
1226: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1227: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1228: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1229: PetscCall(DMGetSparseLocalize(dm, &sparse));
1230: PetscCall(DMSetSparseLocalize(dmParallel, sparse));
1231: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1232: if (cdm) {
1233: PetscCall(DMClone(dmParallel, &cdmParallel));
1234: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1235: PetscCall(DMCopyDisc(cdm, cdmParallel));
1236: PetscCall(DMDestroy(&cdmParallel));
1237: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1238: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1239: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1240: if (originalCoordinates) {
1241: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1242: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1243: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1245: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1246: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1247: PetscCall(VecSetBlockSize(newCoordinates, bs));
1248: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1249: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1250: PetscCall(VecDestroy(&newCoordinates));
1251: }
1252: PetscCall(PetscSectionDestroy(&newCoordSection));
1253: }
1254: PetscFunctionReturn(PETSC_SUCCESS);
1255: }
1257: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1258: {
1259: DM_Plex *mesh = (DM_Plex *)dm->data;
1260: MPI_Comm comm;
1261: DMLabel depthLabel;
1262: PetscMPIInt rank;
1263: PetscInt depth, d, numLabels, numLocalLabels, l;
1264: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1265: PetscObjectState depthState = -1;
1267: PetscFunctionBegin;
1271: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1272: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1273: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1275: /* If the user has changed the depth label, communicate it instead */
1276: PetscCall(DMPlexGetDepth(dm, &depth));
1277: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1278: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1279: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1280: PetscCallMPI(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPI_C_BOOL, MPI_LOR, comm));
1281: if (sendDepth) {
1282: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1283: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1284: }
1285: /* Everyone must have either the same number of labels, or none */
1286: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1287: numLabels = numLocalLabels;
1288: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1289: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1290: for (l = 0; l < numLabels; ++l) {
1291: DMLabel label = NULL, labelNew = NULL;
1292: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1293: const char *name = NULL;
1295: if (hasLabels) {
1296: PetscCall(DMGetLabelByNum(dm, l, &label));
1297: /* Skip "depth" because it is recreated */
1298: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1299: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1300: } else {
1301: isDepth = PETSC_FALSE;
1302: }
1303: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPI_C_BOOL, 0, comm));
1304: if (isDepth && !sendDepth) continue;
1305: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1306: if (isDepth) {
1307: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1308: PetscInt gdepth;
1310: PetscCallMPI(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1311: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1312: for (d = 0; d <= gdepth; ++d) {
1313: PetscBool has;
1315: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1316: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1317: }
1318: }
1319: PetscCall(DMAddLabel(dmParallel, labelNew));
1320: /* Put the output flag in the new label */
1321: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1322: PetscCallMPI(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPI_C_BOOL, MPI_LAND, comm));
1323: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1324: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1325: PetscCall(DMLabelDestroy(&labelNew));
1326: }
1327: {
1328: DMLabel ctLabel;
1330: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1331: // Check that each point has a valid cell type
1332: if (PetscDefined(USE_DEBUG)) {
1333: PetscInt pStart, pEnd;
1334: PetscBool defined = PETSC_TRUE, gdefined;
1336: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1337: for (PetscInt p = pStart; p < pEnd; ++p) {
1338: PetscInt val;
1340: PetscCall(DMLabelGetValue(ctLabel, p, &val));
1341: if (val < 0) {
1342: defined = PETSC_FALSE;
1343: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d]Point %" PetscInt_FMT " has no cell type\n", rank, p));
1344: }
1345: }
1346: PetscCallMPI(MPIU_Allreduce(&defined, &gdefined, 1, MPI_C_BOOL, MPI_LAND, comm));
1347: PetscCheck(gdefined, comm, PETSC_ERR_PLIB, "Not all points have a valid cell type");
1348: }
1349: // Reset label for fast lookup
1350: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1351: }
1352: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1353: PetscFunctionReturn(PETSC_SUCCESS);
1354: }
1356: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1357: {
1358: DM_Plex *mesh = (DM_Plex *)dm->data;
1359: DM_Plex *pmesh = (DM_Plex *)dmParallel->data;
1360: MPI_Comm comm;
1361: DM refTree;
1362: PetscSection origParentSection, newParentSection;
1363: PetscInt *origParents, *origChildIDs;
1364: PetscBool flg;
1366: PetscFunctionBegin;
1369: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1371: /* Set up tree */
1372: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1373: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1374: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1375: if (origParentSection) {
1376: PetscInt pStart, pEnd;
1377: PetscInt *newParents, *newChildIDs, *globParents;
1378: PetscInt *remoteOffsetsParents, newParentSize;
1379: PetscSF parentSF;
1381: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1382: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1383: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1384: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1385: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1386: PetscCall(PetscFree(remoteOffsetsParents));
1387: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1388: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1389: if (original) {
1390: PetscInt numParents;
1392: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1393: PetscCall(PetscMalloc1(numParents, &globParents));
1394: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1395: } else {
1396: globParents = origParents;
1397: }
1398: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1399: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1400: if (original) PetscCall(PetscFree(globParents));
1401: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1402: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1403: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1404: if (PetscDefined(USE_DEBUG)) {
1405: PetscInt p;
1406: PetscBool valid = PETSC_TRUE;
1407: for (p = 0; p < newParentSize; ++p) {
1408: if (newParents[p] < 0) {
1409: valid = PETSC_FALSE;
1410: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1411: }
1412: }
1413: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1414: }
1415: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1416: if (flg) {
1417: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1418: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1419: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1420: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1421: PetscCall(PetscSFView(parentSF, NULL));
1422: }
1423: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1424: PetscCall(PetscSectionDestroy(&newParentSection));
1425: PetscCall(PetscFree2(newParents, newChildIDs));
1426: PetscCall(PetscSFDestroy(&parentSF));
1427: }
1428: pmesh->useAnchors = mesh->useAnchors;
1429: PetscFunctionReturn(PETSC_SUCCESS);
1430: }
1432: /*@
1433: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1435: Input Parameters:
1436: + dm - The `DMPLEX` object
1437: - flg - Balance the partition?
1439: Level: intermediate
1441: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1442: @*/
1443: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1444: {
1445: DM_Plex *mesh = (DM_Plex *)dm->data;
1447: PetscFunctionBegin;
1448: mesh->partitionBalance = flg;
1449: PetscFunctionReturn(PETSC_SUCCESS);
1450: }
1452: /*@
1453: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1455: Input Parameter:
1456: . dm - The `DMPLEX` object
1458: Output Parameter:
1459: . flg - Balance the partition?
1461: Level: intermediate
1463: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1464: @*/
1465: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1466: {
1467: DM_Plex *mesh = (DM_Plex *)dm->data;
1469: PetscFunctionBegin;
1471: PetscAssertPointer(flg, 2);
1472: *flg = mesh->partitionBalance;
1473: PetscFunctionReturn(PETSC_SUCCESS);
1474: }
1476: typedef struct {
1477: PetscInt vote, rank, index;
1478: } Petsc3Int;
1480: /* MaxLoc, but carry a third piece of information around */
1481: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1482: {
1483: Petsc3Int *a = (Petsc3Int *)inout_;
1484: Petsc3Int *b = (Petsc3Int *)in_;
1485: PetscInt i, len = *len_;
1486: for (i = 0; i < len; i++) {
1487: if (a[i].vote < b[i].vote) {
1488: a[i].vote = b[i].vote;
1489: a[i].rank = b[i].rank;
1490: a[i].index = b[i].index;
1491: } else if (a[i].vote <= b[i].vote) {
1492: if (a[i].rank >= b[i].rank) {
1493: a[i].rank = b[i].rank;
1494: a[i].index = b[i].index;
1495: }
1496: }
1497: }
1498: }
1500: /*@
1501: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1503: Input Parameters:
1504: + dm - The source `DMPLEX` object
1505: . migrationSF - The star forest that describes the parallel point remapping
1506: - ownership - Flag causing a vote to determine point ownership
1508: Output Parameter:
1509: . pointSF - The star forest describing the point overlap in the remapped `DM`
1511: Level: developer
1513: Note:
1514: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1516: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1517: @*/
1518: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1519: {
1520: PetscMPIInt rank, size;
1521: PetscInt p, nroots, nleaves, idx, npointLeaves;
1522: PetscInt *pointLocal;
1523: const PetscInt *leaves;
1524: const PetscSFNode *roots;
1525: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1526: Vec shifts;
1527: const PetscInt numShifts = 13759;
1528: const PetscScalar *shift = NULL;
1529: const PetscBool shiftDebug = PETSC_FALSE;
1530: PetscBool balance;
1532: PetscFunctionBegin;
1534: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1535: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1536: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1537: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1538: PetscCall(PetscSFSetFromOptions(*pointSF));
1539: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1541: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1542: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1543: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1544: if (ownership) {
1545: MPI_Op op;
1546: MPI_Datatype datatype;
1547: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1549: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1550: if (balance) {
1551: PetscRandom r;
1553: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1554: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1555: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1556: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1557: PetscCall(VecSetType(shifts, VECSTANDARD));
1558: PetscCall(VecSetRandom(shifts, r));
1559: PetscCall(PetscRandomDestroy(&r));
1560: PetscCall(VecGetArrayRead(shifts, &shift));
1561: }
1563: PetscCall(PetscMalloc1(nroots, &rootVote));
1564: PetscCall(PetscMalloc1(nleaves, &leafVote));
1565: /* Point ownership vote: Process with highest rank owns shared points */
1566: for (p = 0; p < nleaves; ++p) {
1567: if (shiftDebug) {
1568: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1569: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1570: }
1571: /* Either put in a bid or we know we own it */
1572: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1573: leafVote[p].rank = rank;
1574: leafVote[p].index = p;
1575: }
1576: for (p = 0; p < nroots; p++) {
1577: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1578: rootVote[p].vote = -3;
1579: rootVote[p].rank = -3;
1580: rootVote[p].index = -3;
1581: }
1582: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1583: PetscCallMPI(MPI_Type_commit(&datatype));
1584: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1585: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1586: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1587: PetscCallMPI(MPI_Op_free(&op));
1588: PetscCallMPI(MPI_Type_free(&datatype));
1589: for (p = 0; p < nroots; p++) {
1590: rootNodes[p].rank = rootVote[p].rank;
1591: rootNodes[p].index = rootVote[p].index;
1592: }
1593: PetscCall(PetscFree(leafVote));
1594: PetscCall(PetscFree(rootVote));
1595: } else {
1596: for (p = 0; p < nroots; p++) {
1597: rootNodes[p].index = -1;
1598: rootNodes[p].rank = rank;
1599: }
1600: for (p = 0; p < nleaves; p++) {
1601: /* Write new local id into old location */
1602: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1603: }
1604: }
1605: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1606: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_SF_NODE, rootNodes, leafNodes, MPI_REPLACE));
1608: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1609: if (leafNodes[p].rank != rank) npointLeaves++;
1610: }
1611: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1612: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1613: for (idx = 0, p = 0; p < nleaves; p++) {
1614: if (leafNodes[p].rank != rank) {
1615: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1616: pointLocal[idx] = p;
1617: pointRemote[idx] = leafNodes[p];
1618: idx++;
1619: }
1620: }
1621: if (shift) {
1622: PetscCall(VecRestoreArrayRead(shifts, &shift));
1623: PetscCall(VecDestroy(&shifts));
1624: }
1625: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1626: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1627: PetscCall(PetscFree2(rootNodes, leafNodes));
1628: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1629: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1630: PetscFunctionReturn(PETSC_SUCCESS);
1631: }
1633: /*@
1634: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1636: Collective
1638: Input Parameters:
1639: + dm - The source `DMPLEX` object
1640: - sf - The star forest communication context describing the migration pattern
1642: Output Parameter:
1643: . targetDM - The target `DMPLEX` object
1645: Level: intermediate
1647: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1648: @*/
1649: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1650: {
1651: MPI_Comm comm;
1652: PetscInt dim, cdim, nroots;
1653: PetscSF sfPoint;
1654: ISLocalToGlobalMapping ltogMigration;
1655: ISLocalToGlobalMapping ltogOriginal = NULL;
1657: PetscFunctionBegin;
1659: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1660: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1661: PetscCall(DMGetDimension(dm, &dim));
1662: PetscCall(DMSetDimension(targetDM, dim));
1663: PetscCall(DMGetCoordinateDim(dm, &cdim));
1664: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1666: /* Check for a one-to-all distribution pattern */
1667: PetscCall(DMGetPointSF(dm, &sfPoint));
1668: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1669: if (nroots >= 0) {
1670: IS isOriginal;
1671: PetscInt n, size, nleaves;
1672: PetscInt *numbering_orig, *numbering_new;
1674: /* Get the original point numbering */
1675: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1676: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1677: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1678: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1679: /* Convert to positive global numbers */
1680: for (n = 0; n < size; n++) {
1681: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1682: }
1683: /* Derive the new local-to-global mapping from the old one */
1684: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1685: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1686: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1687: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1688: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1689: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1690: PetscCall(ISDestroy(&isOriginal));
1691: } else {
1692: /* One-to-all distribution pattern: We can derive LToG from SF */
1693: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1694: }
1695: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1696: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1697: /* Migrate DM data to target DM */
1698: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1699: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1700: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1701: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1702: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1703: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1704: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1705: PetscFunctionReturn(PETSC_SUCCESS);
1706: }
1708: /*@
1709: DMPlexRemapMigrationSF - Rewrite the distribution SF to account for overlap
1711: Collective
1713: Input Parameters:
1714: + sfOverlap - The `PetscSF` object just for the overlap
1715: - sfMigration - The original distribution `PetscSF` object
1717: Output Parameters:
1718: . sfMigrationNew - A rewritten `PetscSF` object that incorporates the overlap
1720: Level: developer
1722: .seealso: `DMPLEX`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexGetOverlap()`
1723: @*/
1724: PetscErrorCode DMPlexRemapMigrationSF(PetscSF sfOverlap, PetscSF sfMigration, PetscSF *sfMigrationNew)
1725: {
1726: PetscSFNode *newRemote, *permRemote = NULL;
1727: const PetscInt *oldLeaves;
1728: const PetscSFNode *oldRemote;
1729: PetscInt nroots, nleaves, noldleaves;
1731: PetscFunctionBegin;
1732: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1733: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1734: PetscCall(PetscMalloc1(nleaves, &newRemote));
1735: /* oldRemote: original root point mapping to original leaf point
1736: newRemote: original leaf point mapping to overlapped leaf point */
1737: if (oldLeaves) {
1738: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1739: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1740: for (PetscInt l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1741: oldRemote = permRemote;
1742: }
1743: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1744: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_SF_NODE, oldRemote, newRemote, MPI_REPLACE));
1745: PetscCall(PetscFree(permRemote));
1746: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)sfOverlap), sfMigrationNew));
1747: PetscCall(PetscSFSetGraph(*sfMigrationNew, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1748: PetscFunctionReturn(PETSC_SUCCESS);
1749: }
1751: /* For DG-like methods, the code below is equivalent (but faster) than calling
1752: DMPlexCreateClosureIndex(dm,section) */
1753: static PetscErrorCode DMPlexCreateClosureIndex_CELL(DM dm, PetscSection section)
1754: {
1755: PetscSection clSection;
1756: IS clPoints;
1757: PetscInt pStart, pEnd, point;
1758: PetscInt *closure, pos = 0;
1760: PetscFunctionBegin;
1761: if (!section) PetscCall(DMGetLocalSection(dm, §ion));
1762: PetscCall(DMPlexGetHeightStratum(dm, 0, &pStart, &pEnd));
1763: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &clSection));
1764: PetscCall(PetscSectionSetChart(clSection, pStart, pEnd));
1765: PetscCall(PetscMalloc1((2 * (pEnd - pStart)), &closure));
1766: for (point = pStart; point < pEnd; point++) {
1767: PetscCall(PetscSectionSetDof(clSection, point, 2));
1768: closure[pos++] = point; /* point */
1769: closure[pos++] = 0; /* orientation */
1770: }
1771: PetscCall(PetscSectionSetUp(clSection));
1772: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 2 * (pEnd - pStart), closure, PETSC_OWN_POINTER, &clPoints));
1773: PetscCall(PetscSectionSetClosureIndex(section, (PetscObject)dm, clSection, clPoints));
1774: PetscCall(PetscSectionDestroy(&clSection));
1775: PetscCall(ISDestroy(&clPoints));
1776: PetscFunctionReturn(PETSC_SUCCESS);
1777: }
1779: static PetscErrorCode DMPlexDistribute_Multistage(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1780: {
1781: MPI_Comm comm = PetscObjectComm((PetscObject)dm);
1782: PetscPartitioner part;
1783: PetscBool balance, printHeader;
1784: PetscInt nl = 0;
1786: PetscFunctionBegin;
1787: if (sf) *sf = NULL;
1788: *dmParallel = NULL;
1790: PetscCall(DMPlexGetPartitioner(dm, &part));
1791: printHeader = part->printHeader;
1792: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1793: PetscCall(PetscPartitionerSetUp(part));
1794: PetscCall(PetscLogEventBegin(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1795: PetscCall(PetscPartitionerMultistageGetStages_Multistage(part, &nl, NULL));
1796: for (PetscInt l = 0; l < nl; l++) {
1797: PetscInt ovl = (l < nl - 1) ? 0 : overlap;
1798: PetscSF sfDist;
1799: DM dmDist;
1801: PetscCall(DMPlexSetPartitionBalance(dm, balance));
1802: PetscCall(DMViewFromOptions(dm, (PetscObject)part, "-petscpartitioner_multistage_dm_view"));
1803: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, l, (PetscObject)dm));
1804: PetscCall(DMPlexSetPartitioner(dm, part));
1805: PetscCall(DMPlexDistribute(dm, ovl, &sfDist, &dmDist));
1806: PetscCheck(dmDist, comm, PETSC_ERR_PLIB, "No distributed DM generated (stage %" PetscInt_FMT ")", l);
1807: PetscCheck(sfDist, comm, PETSC_ERR_PLIB, "No SF generated (stage %" PetscInt_FMT ")", l);
1808: part->printHeader = PETSC_FALSE;
1810: /* Propagate cell weights to the next level (if any, and if not the final dm) */
1811: if (part->usevwgt && dm->localSection && l != nl - 1) {
1812: PetscSection oldSection, newSection;
1814: PetscCall(DMGetLocalSection(dm, &oldSection));
1815: PetscCall(DMGetLocalSection(dmDist, &newSection));
1816: PetscCall(PetscSFDistributeSection(sfDist, oldSection, NULL, newSection));
1817: PetscCall(DMPlexCreateClosureIndex_CELL(dmDist, newSection));
1818: }
1819: if (!sf) PetscCall(PetscSFDestroy(&sfDist));
1820: if (l > 0) PetscCall(DMDestroy(&dm));
1822: if (sf && *sf) {
1823: PetscSF sfA = *sf, sfB = sfDist;
1824: PetscCall(PetscSFCompose(sfA, sfB, &sfDist));
1825: PetscCall(PetscSFDestroy(&sfA));
1826: PetscCall(PetscSFDestroy(&sfB));
1827: }
1829: if (sf) *sf = sfDist;
1830: dm = *dmParallel = dmDist;
1831: }
1832: PetscCall(PetscPartitionerMultistageSetStage_Multistage(part, 0, NULL)); /* reset */
1833: PetscCall(PetscLogEventEnd(DMPLEX_DistributeMultistage, dm, 0, 0, 0));
1834: part->printHeader = printHeader;
1835: PetscFunctionReturn(PETSC_SUCCESS);
1836: }
1838: /*@
1839: DMPlexDistribute - Distributes the mesh and any associated sections.
1841: Collective
1843: Input Parameters:
1844: + dm - The original `DMPLEX` object
1845: - overlap - The overlap of partitions, 0 is the default
1847: Output Parameters:
1848: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1849: - dmParallel - The distributed `DMPLEX` object
1851: Level: intermediate
1853: Note:
1854: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1856: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1857: representation on the mesh.
1859: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1860: @*/
1861: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmParallel)
1862: {
1863: MPI_Comm comm;
1864: PetscPartitioner partitioner;
1865: IS cellPart;
1866: PetscSection cellPartSection;
1867: DM dmCoord;
1868: DMLabel lblPartition, lblMigration;
1869: PetscSF sfMigration, sfStratified, sfPoint;
1870: PetscBool flg, balance, isms;
1871: PetscMPIInt rank, size;
1873: PetscFunctionBegin;
1876: if (sf) PetscAssertPointer(sf, 3);
1877: PetscAssertPointer(dmParallel, 4);
1879: if (sf) *sf = NULL;
1880: *dmParallel = NULL;
1881: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1882: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1883: PetscCallMPI(MPI_Comm_size(comm, &size));
1884: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1886: /* Handle multistage partitioner */
1887: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1888: PetscCall(PetscObjectTypeCompare((PetscObject)partitioner, PETSCPARTITIONERMULTISTAGE, &isms));
1889: if (isms) {
1890: PetscObject stagedm;
1892: PetscCall(PetscPartitionerMultistageGetStage_Multistage(partitioner, NULL, &stagedm));
1893: if (!stagedm) { /* No stage dm present, start the multistage algorithm */
1894: PetscCall(DMPlexDistribute_Multistage(dm, overlap, sf, dmParallel));
1895: PetscFunctionReturn(PETSC_SUCCESS);
1896: }
1897: }
1898: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1899: /* Create cell partition */
1900: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1901: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1902: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1903: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1904: {
1905: /* Convert partition to DMLabel */
1906: IS is;
1907: PetscHSetI ht;
1908: const PetscInt *points;
1909: PetscInt *iranks;
1910: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1912: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1913: /* Preallocate strata */
1914: PetscCall(PetscHSetICreate(&ht));
1915: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1916: for (proc = pStart; proc < pEnd; proc++) {
1917: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1918: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1919: }
1920: PetscCall(PetscHSetIGetSize(ht, &nranks));
1921: PetscCall(PetscMalloc1(nranks, &iranks));
1922: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1923: PetscCall(PetscHSetIDestroy(&ht));
1924: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1925: PetscCall(PetscFree(iranks));
1926: /* Inline DMPlexPartitionLabelClosure() */
1927: PetscCall(ISGetIndices(cellPart, &points));
1928: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1929: for (proc = pStart; proc < pEnd; proc++) {
1930: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1931: if (!npoints) continue;
1932: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1933: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1934: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1935: PetscCall(ISDestroy(&is));
1936: }
1937: PetscCall(ISRestoreIndices(cellPart, &points));
1938: }
1939: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1941: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1942: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1943: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, PETSC_TRUE, &sfMigration));
1944: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1945: PetscCall(PetscSFDestroy(&sfMigration));
1946: sfMigration = sfStratified;
1947: PetscCall(PetscSFSetUp(sfMigration));
1948: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1949: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1950: if (flg) {
1951: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1952: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1953: }
1955: /* Create non-overlapping parallel DM and migrate internal data */
1956: PetscCall(DMPlexCreate(comm, dmParallel));
1957: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1958: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1960: /* Build the point SF without overlap */
1961: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1962: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1963: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1964: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1965: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1966: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1967: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1968: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1970: if (overlap > 0) {
1971: DM dmOverlap;
1972: PetscSF sfOverlap, sfOverlapPoint;
1974: /* Add the partition overlap to the distributed DM */
1975: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1976: PetscCall(DMDestroy(dmParallel));
1977: *dmParallel = dmOverlap;
1978: if (flg) {
1979: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1980: PetscCall(PetscSFView(sfOverlap, NULL));
1981: }
1983: /* Re-map the migration SF to establish the full migration pattern */
1984: PetscCall(DMPlexRemapMigrationSF(sfOverlap, sfMigration, &sfOverlapPoint));
1985: PetscCall(PetscSFDestroy(&sfOverlap));
1986: PetscCall(PetscSFDestroy(&sfMigration));
1987: sfMigration = sfOverlapPoint;
1988: }
1989: /* Cleanup Partition */
1990: PetscCall(DMLabelDestroy(&lblPartition));
1991: PetscCall(DMLabelDestroy(&lblMigration));
1992: PetscCall(PetscSectionDestroy(&cellPartSection));
1993: PetscCall(ISDestroy(&cellPart));
1994: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1995: // Create sfNatural, need discretization information
1996: PetscCall(DMCopyDisc(dm, *dmParallel));
1997: if (dm->localSection) {
1998: PetscSection psection;
1999: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dm), &psection));
2000: PetscCall(PetscSFDistributeSection(sfMigration, dm->localSection, NULL, psection));
2001: PetscCall(DMSetLocalSection(*dmParallel, psection));
2002: PetscCall(PetscSectionDestroy(&psection));
2003: }
2004: if (dm->useNatural) {
2005: PetscSection section;
2007: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
2008: PetscCall(DMGetLocalSection(dm, §ion));
2010: /* If DM has useNatural = PETSC_TRUE, but no sfNatural is set, the DM's Section is considered natural */
2011: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
2012: /* Compose with a previous sfNatural if present */
2013: if (dm->sfNatural) {
2014: PetscCall(DMPlexMigrateGlobalToNaturalSF(dm, *dmParallel, dm->sfNatural, sfMigration, &(*dmParallel)->sfNatural));
2015: } else {
2016: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
2017: }
2018: /* Compose with a previous sfMigration if present */
2019: if (dm->sfMigration) {
2020: PetscSF naturalPointSF;
2022: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
2023: PetscCall(PetscSFDestroy(&sfMigration));
2024: sfMigration = naturalPointSF;
2025: }
2026: }
2027: PetscCall(DMPlexCopyFlags(dm, *dmParallel));
2028: /* Cleanup */
2029: if (sf) {
2030: *sf = sfMigration;
2031: } else PetscCall(PetscSFDestroy(&sfMigration));
2032: PetscCall(PetscSFDestroy(&sfPoint));
2033: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
2034: PetscFunctionReturn(PETSC_SUCCESS);
2035: }
2037: PetscErrorCode DMPlexDistributeOverlap_Internal(DM dm, PetscInt overlap, MPI_Comm newcomm, const char *ovlboundary, PetscSF *sf, DM *dmOverlap)
2038: {
2039: DM_Plex *mesh = (DM_Plex *)dm->data;
2040: MPI_Comm comm;
2041: PetscMPIInt size, rank;
2042: PetscSection rootSection, leafSection;
2043: IS rootrank, leafrank;
2044: DM dmCoord;
2045: DMLabel lblOverlap;
2046: PetscSF sfOverlap, sfStratified, sfPoint;
2047: PetscBool clear_ovlboundary;
2049: PetscFunctionBegin;
2050: if (sf) *sf = NULL;
2051: *dmOverlap = NULL;
2052: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2053: PetscCallMPI(MPI_Comm_size(comm, &size));
2054: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2055: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2056: {
2057: // We need to get options for the _already_distributed mesh, so it must be done here
2058: PetscInt overlap;
2059: const char *prefix;
2060: char oldPrefix[PETSC_MAX_PATH_LEN];
2062: oldPrefix[0] = '\0';
2063: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
2064: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
2065: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
2066: PetscCall(DMPlexGetOverlap(dm, &overlap));
2067: PetscObjectOptionsBegin((PetscObject)dm);
2068: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
2069: PetscOptionsEnd();
2070: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
2071: }
2072: if (ovlboundary) {
2073: PetscBool flg;
2074: PetscCall(DMHasLabel(dm, ovlboundary, &flg));
2075: if (!flg) {
2076: DMLabel label;
2078: PetscCall(DMCreateLabel(dm, ovlboundary));
2079: PetscCall(DMGetLabel(dm, ovlboundary, &label));
2080: PetscCall(DMPlexMarkBoundaryFaces(dm, 1, label));
2081: clear_ovlboundary = PETSC_TRUE;
2082: }
2083: }
2084: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2085: /* Compute point overlap with neighbouring processes on the distributed DM */
2086: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
2087: PetscCall(PetscSectionCreate(newcomm, &rootSection));
2088: PetscCall(PetscSectionCreate(newcomm, &leafSection));
2089: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
2090: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2091: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2092: /* Convert overlap label to stratified migration SF */
2093: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, PETSC_FALSE, &sfOverlap));
2094: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
2095: PetscCall(PetscSFDestroy(&sfOverlap));
2096: sfOverlap = sfStratified;
2097: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
2098: PetscCall(PetscSFSetFromOptions(sfOverlap));
2100: PetscCall(PetscSectionDestroy(&rootSection));
2101: PetscCall(PetscSectionDestroy(&leafSection));
2102: PetscCall(ISDestroy(&rootrank));
2103: PetscCall(ISDestroy(&leafrank));
2104: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
2106: /* Build the overlapping DM */
2107: PetscCall(DMPlexCreate(newcomm, dmOverlap));
2108: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
2109: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
2110: /* Store the overlap in the new DM */
2111: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
2112: /* Build the new point SF */
2113: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
2114: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
2115: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
2116: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2117: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
2118: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2119: PetscCall(PetscSFDestroy(&sfPoint));
2120: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmOverlap));
2121: /* TODO: labels stored inside the DS for regions should be handled here */
2122: PetscCall(DMCopyDisc(dm, *dmOverlap));
2123: PetscCall(DMPlexCopyFlags(dm, *dmOverlap));
2124: /* Cleanup overlap partition */
2125: PetscCall(DMLabelDestroy(&lblOverlap));
2126: if (sf) *sf = sfOverlap;
2127: else PetscCall(PetscSFDestroy(&sfOverlap));
2128: if (ovlboundary) {
2129: DMLabel label;
2130: PetscBool flg;
2132: if (clear_ovlboundary) PetscCall(DMRemoveLabel(dm, ovlboundary, NULL));
2133: PetscCall(DMHasLabel(*dmOverlap, ovlboundary, &flg));
2134: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing %s label", ovlboundary);
2135: PetscCall(DMGetLabel(*dmOverlap, ovlboundary, &label));
2136: PetscCall(DMPlexMarkBoundaryFaces_Internal(*dmOverlap, 1, 0, label, PETSC_TRUE));
2137: }
2138: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2139: PetscFunctionReturn(PETSC_SUCCESS);
2140: }
2142: /*@
2143: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
2145: Collective
2147: Input Parameters:
2148: + dm - The non-overlapping distributed `DMPLEX` object
2149: - overlap - The overlap of partitions (the same on all ranks)
2151: Output Parameters:
2152: + sf - The `PetscSF` used for point distribution, or pass `NULL` if not needed
2153: - dmOverlap - The overlapping distributed `DMPLEX` object
2155: Options Database Keys:
2156: + -dm_plex_overlap_labels name1,name2,... - List of overlap label names
2157: . -dm_plex_overlap_values int1,int2,... - List of overlap label values
2158: . -dm_plex_overlap_exclude_label label - Label used to exclude points from overlap
2159: - -dm_plex_overlap_exclude_value value - Label value used to exclude points from overlap
2161: Level: advanced
2163: Notes:
2164: If the mesh was not distributed, the return value is `NULL`.
2166: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
2167: representation on the mesh.
2169: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
2170: @*/
2171: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PeOp PetscSF *sf, DM *dmOverlap)
2172: {
2173: PetscFunctionBegin;
2176: if (sf) PetscAssertPointer(sf, 3);
2177: PetscAssertPointer(dmOverlap, 4);
2178: PetscCall(DMPlexDistributeOverlap_Internal(dm, overlap, PetscObjectComm((PetscObject)dm), NULL, sf, dmOverlap));
2179: PetscFunctionReturn(PETSC_SUCCESS);
2180: }
2182: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2183: {
2184: DM_Plex *mesh = (DM_Plex *)dm->data;
2186: PetscFunctionBegin;
2187: *overlap = mesh->overlap;
2188: PetscFunctionReturn(PETSC_SUCCESS);
2189: }
2191: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2192: {
2193: DM_Plex *mesh = NULL;
2194: DM_Plex *meshSrc = NULL;
2196: PetscFunctionBegin;
2198: mesh = (DM_Plex *)dm->data;
2199: if (dmSrc) {
2201: meshSrc = (DM_Plex *)dmSrc->data;
2202: mesh->overlap = meshSrc->overlap;
2203: } else {
2204: mesh->overlap = 0;
2205: }
2206: mesh->overlap += overlap;
2207: PetscFunctionReturn(PETSC_SUCCESS);
2208: }
2210: /*@
2211: DMPlexGetOverlap - Get the width of the cell overlap
2213: Not Collective
2215: Input Parameter:
2216: . dm - The `DM`
2218: Output Parameter:
2219: . overlap - the width of the cell overlap
2221: Level: intermediate
2223: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2224: @*/
2225: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2226: {
2227: PetscFunctionBegin;
2229: PetscAssertPointer(overlap, 2);
2230: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2231: PetscFunctionReturn(PETSC_SUCCESS);
2232: }
2234: /*@
2235: DMPlexSetOverlap - Set the width of the cell overlap
2237: Logically Collective
2239: Input Parameters:
2240: + dm - The `DM`
2241: . dmSrc - The `DM` that produced this one, or `NULL`
2242: - overlap - the width of the cell overlap
2244: Level: intermediate
2246: Note:
2247: The overlap from `dmSrc` is added to `dm`
2249: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2250: @*/
2251: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2252: {
2253: PetscFunctionBegin;
2256: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2257: PetscFunctionReturn(PETSC_SUCCESS);
2258: }
2260: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2261: {
2262: DM_Plex *mesh = (DM_Plex *)dm->data;
2264: PetscFunctionBegin;
2265: mesh->distDefault = dist;
2266: PetscFunctionReturn(PETSC_SUCCESS);
2267: }
2269: /*@
2270: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2272: Logically Collective
2274: Input Parameters:
2275: + dm - The `DM`
2276: - dist - Flag for distribution
2278: Level: intermediate
2280: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2281: @*/
2282: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2283: {
2284: PetscFunctionBegin;
2287: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2288: PetscFunctionReturn(PETSC_SUCCESS);
2289: }
2291: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2292: {
2293: DM_Plex *mesh = (DM_Plex *)dm->data;
2295: PetscFunctionBegin;
2296: *dist = mesh->distDefault;
2297: PetscFunctionReturn(PETSC_SUCCESS);
2298: }
2300: /*@
2301: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2303: Not Collective
2305: Input Parameter:
2306: . dm - The `DM`
2308: Output Parameter:
2309: . dist - Flag for distribution
2311: Level: intermediate
2313: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2314: @*/
2315: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2316: {
2317: PetscFunctionBegin;
2319: PetscAssertPointer(dist, 2);
2320: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2321: PetscFunctionReturn(PETSC_SUCCESS);
2322: }
2324: /*@
2325: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2326: root process of the original's communicator.
2328: Collective
2330: Input Parameter:
2331: . dm - the original `DMPLEX` object
2333: Output Parameters:
2334: + sf - the `PetscSF` used for point distribution (optional)
2335: - gatherMesh - the gathered `DM` object, or `NULL`
2337: Level: intermediate
2339: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2340: @*/
2341: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, PeOp DM *gatherMesh)
2342: {
2343: MPI_Comm comm;
2344: PetscMPIInt size;
2345: PetscPartitioner oldPart, gatherPart;
2347: PetscFunctionBegin;
2349: PetscAssertPointer(gatherMesh, 3);
2350: *gatherMesh = NULL;
2351: if (sf) *sf = NULL;
2352: comm = PetscObjectComm((PetscObject)dm);
2353: PetscCallMPI(MPI_Comm_size(comm, &size));
2354: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2355: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2356: PetscCall(PetscObjectReference((PetscObject)oldPart));
2357: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2358: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2359: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2360: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2362: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2363: PetscCall(PetscPartitionerDestroy(&gatherPart));
2364: PetscCall(PetscPartitionerDestroy(&oldPart));
2365: PetscFunctionReturn(PETSC_SUCCESS);
2366: }
2368: /*@
2369: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2371: Collective
2373: Input Parameter:
2374: . dm - the original `DMPLEX` object
2376: Output Parameters:
2377: + sf - the `PetscSF` used for point distribution (optional)
2378: - redundantMesh - the redundant `DM` object, or `NULL`
2380: Level: intermediate
2382: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2383: @*/
2384: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, PeOp DM *redundantMesh)
2385: {
2386: MPI_Comm comm;
2387: PetscMPIInt size, rank;
2388: PetscInt pStart, pEnd, p;
2389: PetscInt numPoints = -1;
2390: PetscSF migrationSF, sfPoint, gatherSF;
2391: DM gatherDM, dmCoord;
2392: PetscSFNode *points;
2394: PetscFunctionBegin;
2396: PetscAssertPointer(redundantMesh, 3);
2397: *redundantMesh = NULL;
2398: comm = PetscObjectComm((PetscObject)dm);
2399: PetscCallMPI(MPI_Comm_size(comm, &size));
2400: if (size == 1) {
2401: PetscCall(PetscObjectReference((PetscObject)dm));
2402: *redundantMesh = dm;
2403: if (sf) *sf = NULL;
2404: PetscFunctionReturn(PETSC_SUCCESS);
2405: }
2406: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2407: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2408: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2409: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2410: numPoints = pEnd - pStart;
2411: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2412: PetscCall(PetscMalloc1(numPoints, &points));
2413: PetscCall(PetscSFCreate(comm, &migrationSF));
2414: for (p = 0; p < numPoints; p++) {
2415: points[p].index = p;
2416: points[p].rank = 0;
2417: }
2418: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2419: PetscCall(DMPlexCreate(comm, redundantMesh));
2420: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2421: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2422: /* This is to express that all point are in overlap */
2423: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_INT_MAX));
2424: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2425: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2426: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2427: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2428: PetscCall(PetscSFDestroy(&sfPoint));
2429: if (sf) {
2430: PetscSF tsf;
2432: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2433: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2434: PetscCall(PetscSFDestroy(&tsf));
2435: }
2436: PetscCall(PetscSFDestroy(&migrationSF));
2437: PetscCall(PetscSFDestroy(&gatherSF));
2438: PetscCall(DMDestroy(&gatherDM));
2439: PetscCall(DMCopyDisc(dm, *redundantMesh));
2440: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2441: PetscFunctionReturn(PETSC_SUCCESS);
2442: }
2444: /*@
2445: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2447: Collective
2449: Input Parameter:
2450: . dm - The `DM` object
2452: Output Parameter:
2453: . distributed - Flag whether the `DM` is distributed
2455: Level: intermediate
2457: Notes:
2458: This currently finds out whether at least two ranks have any DAG points.
2459: This involves `MPI_Allreduce()` with one integer.
2460: The result is currently not stashed so every call to this routine involves this global communication.
2462: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2463: @*/
2464: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2465: {
2466: PetscInt pStart, pEnd, count;
2467: MPI_Comm comm;
2468: PetscMPIInt size;
2470: PetscFunctionBegin;
2472: PetscAssertPointer(distributed, 2);
2473: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2474: PetscCallMPI(MPI_Comm_size(comm, &size));
2475: if (size == 1) {
2476: *distributed = PETSC_FALSE;
2477: PetscFunctionReturn(PETSC_SUCCESS);
2478: }
2479: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2480: count = (pEnd - pStart) > 0 ? 1 : 0;
2481: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2482: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2483: PetscFunctionReturn(PETSC_SUCCESS);
2484: }
2486: /*@
2487: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2489: Input Parameters:
2490: + dm - The `DM`
2491: - name - The name of the specific parallel distribution
2493: Level: developer
2495: Note:
2496: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2497: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2498: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2499: loads the parallel distribution stored in file under this name.
2501: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2502: @*/
2503: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2504: {
2505: DM_Plex *mesh = (DM_Plex *)dm->data;
2507: PetscFunctionBegin;
2509: if (name) PetscAssertPointer(name, 2);
2510: PetscCall(PetscFree(mesh->distributionName));
2511: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2512: PetscFunctionReturn(PETSC_SUCCESS);
2513: }
2515: /*@
2516: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2518: Input Parameter:
2519: . dm - The `DM`
2521: Output Parameter:
2522: . name - The name of the specific parallel distribution
2524: Level: developer
2526: Note:
2527: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2528: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2529: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2530: loads the parallel distribution stored in file under this name.
2532: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2533: @*/
2534: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2535: {
2536: DM_Plex *mesh = (DM_Plex *)dm->data;
2538: PetscFunctionBegin;
2540: PetscAssertPointer(name, 2);
2541: *name = mesh->distributionName;
2542: PetscFunctionReturn(PETSC_SUCCESS);
2543: }