Actual source code: sfallgatherv.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
4: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
5: {
6: PetscInt j, k;
7: const PetscInt *range;
8: PetscMPIInt size;
10: PetscFunctionBegin;
11: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
12: if (nroots) *nroots = sf->nroots;
13: if (nleaves) *nleaves = sf->nleaves;
14: if (ilocal) *ilocal = NULL; /* Contiguous leaves */
15: if (iremote) {
16: if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
17: PetscCall(PetscLayoutGetRanges(sf->map, &range));
18: PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
19: sf->remote_alloc = sf->remote;
20: for (PetscMPIInt i = 0; i < size; i++) {
21: for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
22: sf->remote[j].rank = i;
23: sf->remote[j].index = k;
24: }
25: }
26: }
27: *iremote = sf->remote;
28: }
29: PetscFunctionReturn(PETSC_SUCCESS);
30: }
32: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
33: {
34: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
35: PetscMPIInt size;
36: PetscInt i;
37: const PetscInt *range;
38: MPI_Comm comm;
40: PetscFunctionBegin;
41: PetscCall(PetscSFSetUp_Allgather(sf));
42: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
43: PetscCallMPI(MPI_Comm_size(comm, &size));
44: if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
45: PetscBool isallgatherv = PETSC_FALSE;
47: PetscCall(PetscMalloc1(size, &dat->recvcounts));
48: PetscCall(PetscMalloc1(size, &dat->displs));
49: PetscCall(PetscLayoutGetRanges(sf->map, &range));
51: for (i = 0; i < size; i++) {
52: PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
53: PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
54: }
56: /* check if we actually have a one-to-all pattern */
57: PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFALLGATHERV, &isallgatherv));
58: if (isallgatherv) {
59: PetscMPIInt rank, nRanksWithZeroRoots;
61: nRanksWithZeroRoots = (sf->nroots == 0) ? 1 : 0; /* I have no roots */
62: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &nRanksWithZeroRoots, 1, MPI_INT, MPI_SUM, comm));
63: if (nRanksWithZeroRoots == size - 1) { /* Only one rank has roots, which indicates a bcast pattern */
64: dat->bcast_pattern = PETSC_TRUE;
65: PetscCallMPI(MPI_Comm_rank(comm, &rank));
66: dat->bcast_root = sf->nroots > 0 ? rank : -1;
67: PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &dat->bcast_root, 1, MPI_INT, MPI_MAX, comm));
68: }
69: }
70: }
71: PetscFunctionReturn(PETSC_SUCCESS);
72: }
74: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
75: {
76: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
77: PetscSFLink link = dat->avail, next;
79: PetscFunctionBegin;
80: PetscCall(PetscFree(dat->iranks));
81: PetscCall(PetscFree(dat->ioffset));
82: PetscCall(PetscFree(dat->irootloc));
83: PetscCall(PetscFree(dat->recvcounts));
84: PetscCall(PetscFree(dat->displs));
85: PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
86: for (; link; link = next) {
87: next = link->next;
88: PetscCall(PetscSFLinkDestroy(sf, link));
89: }
90: dat->avail = NULL;
91: PetscFunctionReturn(PETSC_SUCCESS);
92: }
94: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
95: {
96: PetscFunctionBegin;
97: PetscCall(PetscSFReset_Allgatherv(sf));
98: PetscCall(PetscFree(sf->data));
99: PetscFunctionReturn(PETSC_SUCCESS);
100: }
102: static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
103: {
104: PetscSFLink link;
105: PetscMPIInt sendcount, rank, nleaves;
106: MPI_Comm comm;
107: void *rootbuf = NULL, *leafbuf = NULL;
108: MPI_Request *req = NULL;
109: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
111: PetscFunctionBegin;
112: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
113: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
114: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
115: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
116: PetscCallMPI(MPI_Comm_rank(comm, &rank));
117: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
118: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
120: if (dat->bcast_pattern && rank == dat->bcast_root) PetscCall((*link->Memcpy)(link, link->leafmtype_mpi, leafbuf, link->rootmtype_mpi, rootbuf, (size_t)sendcount * link->unitbytes));
121: /* Ready the buffers for MPI */
122: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
123: PetscCall(PetscMPIIntCast(sf->nleaves, &nleaves));
124: if (dat->bcast_pattern) PetscCallMPI(MPIU_Ibcast(leafbuf, nleaves, unit, dat->bcast_root, comm, req));
125: else PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
126: PetscFunctionReturn(PETSC_SUCCESS);
127: }
129: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
130: {
131: PetscSFLink link;
132: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
133: PetscInt rstart;
134: PetscMPIInt rank, count, recvcount;
135: MPI_Comm comm;
136: void *rootbuf = NULL, *leafbuf = NULL;
137: MPI_Request *req = NULL;
139: PetscFunctionBegin;
140: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
141: if (op == MPI_REPLACE) {
142: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
143: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
144: PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
145: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
146: } else {
147: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
148: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
149: PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
150: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
151: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
152: if (dat->bcast_pattern) {
153: PetscInt nleaves = sf->nleaves;
154: PetscInt nreal;
155: PetscMPIInt nleavesi;
156: MPI_Datatype baseunit = unit;
158: PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nreal));
159: if (nreal > 0) {
160: baseunit = MPIU_REAL;
161: nleaves *= nreal;
162: #if PetscDefined(HAVE_COMPLEX)
163: } else {
164: PetscInt ncomplex;
166: PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &ncomplex));
167: if (ncomplex > 0) {
168: baseunit = MPIU_COMPLEX;
169: nleaves *= ncomplex;
170: }
171: #endif
172: }
173: PetscCall(PetscMPIIntCast(nleaves, &nleavesi));
174: #if defined(PETSC_HAVE_OPENMPI) /* Workaround: cuda-aware Open MPI 4.1.3 does not support MPI_Ireduce() with device buffers */
175: *req = MPI_REQUEST_NULL; /* Set NULL so that we can safely MPI_Wait(req) */
176: PetscCallMPI(MPIU_Reduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm));
177: #else
178: PetscCallMPI(MPIU_Ireduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm, req));
179: #endif
180: } else { /* Reduce leafdata, then scatter to rootdata */
181: PetscCallMPI(MPI_Comm_rank(comm, &rank));
182: PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
183: /* Allocate a separate leaf buffer on rank 0 */
184: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
185: PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
186: }
187: /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
188: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
189: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
190: PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
191: PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
192: }
193: }
194: PetscFunctionReturn(PETSC_SUCCESS);
195: }
197: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
198: {
199: PetscSFLink link;
201: PetscFunctionBegin;
202: if (op == MPI_REPLACE) {
203: /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
204: we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
205: of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
206: It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
207: PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
208: */
209: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
210: PetscCall(PetscSFLinkReclaim(sf, &link));
211: } else {
212: PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
213: }
214: PetscFunctionReturn(PETSC_SUCCESS);
215: }
217: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
218: {
219: PetscSFLink link;
220: PetscMPIInt rank;
221: PetscMPIInt sendcount;
222: MPI_Comm comm;
223: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
224: void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
225: MPI_Request *req = NULL;
227: PetscFunctionBegin;
228: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
229: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
230: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
231: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
232: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
233: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
234: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
235: PetscCallMPI(MPIU_Igatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, 0 /*rank 0*/, comm, req));
237: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
238: PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
239: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
240: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
241: PetscCall(PetscSFLinkReclaim(sf, &link));
242: PetscFunctionReturn(PETSC_SUCCESS);
243: }
245: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
247: Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
248: to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
249: in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
250: root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank
251: 0,1,2 is 1,3,6 respectively. root is 10.
253: We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
254: rank-0 rank-1 rank-2
255: Root 1
256: Leaf 2 3 4
257: Leafupdate 2 3 4
259: Do MPI_Exscan on leafupdate,
260: rank-0 rank-1 rank-2
261: Root 1
262: Leaf 2 3 4
263: Leafupdate 2 2 5
265: BcastAndOp from root to leafupdate,
266: rank-0 rank-1 rank-2
267: Root 1
268: Leaf 2 3 4
269: Leafupdate 3 3 6
271: Copy root to leafupdate on rank-0
272: rank-0 rank-1 rank-2
273: Root 1
274: Leaf 2 3 4
275: Leafupdate 1 3 6
277: Reduce from leaf to root,
278: rank-0 rank-1 rank-2
279: Root 10
280: Leaf 2 3 4
281: Leafupdate 1 3 6
282: */
283: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
284: {
285: PetscSFLink link;
286: MPI_Comm comm;
287: PetscMPIInt count;
289: PetscFunctionBegin;
290: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
291: PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
292: /* Copy leafdata to leafupdate */
293: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
294: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
295: PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
296: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
298: /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
299: if (op == MPI_REPLACE) {
300: PetscMPIInt size, rank, prev, next;
301: PetscCallMPI(MPI_Comm_rank(comm, &rank));
302: PetscCallMPI(MPI_Comm_size(comm, &size));
303: prev = rank ? rank - 1 : MPI_PROC_NULL;
304: next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
305: PetscCall(PetscMPIIntCast(sf->nleaves, &count));
306: PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
307: } else {
308: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
309: PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
310: }
311: PetscCall(PetscSFLinkReclaim(sf, &link));
312: PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
313: PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
315: /* Bcast roots to rank 0's leafupdate */
316: PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
318: /* Reduce leafdata to rootdata */
319: PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
320: PetscFunctionReturn(PETSC_SUCCESS);
321: }
323: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
324: {
325: PetscFunctionBegin;
326: PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
327: PetscFunctionReturn(PETSC_SUCCESS);
328: }
330: /* Get root ranks accessing my leaves */
331: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscMPIInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
332: {
333: PetscInt j, k, size;
334: const PetscInt *range;
336: PetscFunctionBegin;
337: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
338: if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
339: size = sf->nranks;
340: PetscCall(PetscLayoutGetRanges(sf->map, &range));
341: PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
342: for (PetscMPIInt i = 0; i < size; i++) sf->ranks[i] = i;
343: PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
344: for (PetscInt i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
345: for (PetscMPIInt i = 0; i < size; i++) {
346: for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
347: }
348: }
350: if (nranks) *nranks = sf->nranks;
351: if (ranks) *ranks = sf->ranks;
352: if (roffset) *roffset = sf->roffset;
353: if (rmine) *rmine = sf->rmine;
354: if (rremote) *rremote = sf->rremote;
355: PetscFunctionReturn(PETSC_SUCCESS);
356: }
358: /* Get leaf ranks accessing my roots */
359: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscMPIInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
360: {
361: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
362: MPI_Comm comm;
363: PetscMPIInt size, rank;
365: PetscFunctionBegin;
366: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
367: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
368: PetscCallMPI(MPI_Comm_size(comm, &size));
369: PetscCallMPI(MPI_Comm_rank(comm, &rank));
370: if (niranks) *niranks = size;
372: /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
373: sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
374: */
375: if (iranks) {
376: if (!dat->iranks) {
377: PetscCall(PetscMalloc1(size, &dat->iranks));
378: dat->iranks[0] = rank;
379: for (PetscMPIInt i = 0, j = 1; i < size; i++) {
380: if (i == rank) continue;
381: dat->iranks[j++] = i;
382: }
383: }
384: *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNew */
385: }
387: if (ioffset) {
388: if (!dat->ioffset) {
389: PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
390: for (PetscMPIInt i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
391: }
392: *ioffset = dat->ioffset;
393: }
395: if (irootloc) {
396: if (!dat->irootloc) {
397: PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
398: for (PetscMPIInt i = 0; i < size; i++) {
399: for (PetscInt j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
400: }
401: }
402: *irootloc = dat->irootloc;
403: }
404: PetscFunctionReturn(PETSC_SUCCESS);
405: }
407: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
408: {
409: PetscInt i, nroots, nleaves, rstart, *ilocal;
410: PetscSFNode *iremote;
411: PetscSF lsf;
413: PetscFunctionBegin;
414: nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
415: nroots = nleaves;
416: PetscCall(PetscMalloc1(nleaves, &ilocal));
417: PetscCall(PetscMalloc1(nleaves, &iremote));
418: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
420: for (i = 0; i < nleaves; i++) {
421: ilocal[i] = rstart + i; /* lsf does not change leave indices */
422: iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */
423: iremote[i].index = i; /* root index */
424: }
426: PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
427: PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
428: PetscCall(PetscSFSetUp(lsf));
429: *out = lsf;
430: PetscFunctionReturn(PETSC_SUCCESS);
431: }
433: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
434: {
435: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
437: PetscFunctionBegin;
438: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
439: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
441: sf->ops->SetUp = PetscSFSetUp_Allgatherv;
442: sf->ops->Reset = PetscSFReset_Allgatherv;
443: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
444: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
445: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
446: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
447: sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv;
448: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv;
449: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
450: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
451: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
452: sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv;
454: sf->collective = PETSC_TRUE;
456: PetscCall(PetscNew(&dat));
457: dat->bcast_root = -1;
458: sf->data = (void *)dat;
459: PetscFunctionReturn(PETSC_SUCCESS);
460: }