Actual source code: sfallgather.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
4: typedef PetscSF_Allgatherv PetscSF_Allgather;
6: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
7: {
8: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
10: PetscFunctionBegin;
11: for (PetscInt i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
12: sf->leafbuflen[i] = 0;
13: sf->leafstart[i] = 0;
14: sf->leafcontig[i] = PETSC_TRUE;
15: sf->leafdups[i] = PETSC_FALSE;
16: dat->rootbuflen[i] = 0;
17: dat->rootstart[i] = 0;
18: dat->rootcontig[i] = PETSC_TRUE;
19: dat->rootdups[i] = PETSC_FALSE;
20: }
22: sf->leafbuflen[PETSCSF_REMOTE] = sf->nleaves;
23: dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
24: sf->persistent = PETSC_FALSE;
25: sf->nleafreqs = 0; /* MPI collectives only need one request. We treat it as a root request. */
26: dat->nrootreqs = 1;
27: PetscFunctionReturn(PETSC_SUCCESS);
28: }
30: static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
31: {
32: PetscSFLink link;
33: PetscMPIInt sendcount;
34: MPI_Comm comm;
35: void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
36: MPI_Request *req = NULL;
38: PetscFunctionBegin;
39: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
40: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
41: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
42: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
43: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
44: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
45: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
46: PetscCallMPI(MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req));
47: PetscFunctionReturn(PETSC_SUCCESS);
48: }
50: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
51: {
52: PetscSFLink link;
53: PetscInt rstart;
54: MPI_Comm comm;
55: PetscMPIInt rank, count, recvcount;
56: void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
57: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
58: MPI_Request *req = NULL;
60: PetscFunctionBegin;
61: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
62: if (op == MPI_REPLACE) {
63: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
64: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
65: PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
66: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); /* Sync the device to host memcpy */
67: } else {
68: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
69: PetscCallMPI(MPI_Comm_rank(comm, &rank));
70: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
71: PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
72: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
73: PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
74: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
75: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
76: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
77: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
78: PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
79: PetscCallMPI(MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req));
80: }
81: PetscFunctionReturn(PETSC_SUCCESS);
82: }
84: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
85: {
86: PetscSFLink link;
87: PetscMPIInt rank;
88: PetscMPIInt sendcount;
89: MPI_Comm comm;
90: void *rootbuf = NULL, *leafbuf = NULL;
91: MPI_Request *req = NULL;
93: PetscFunctionBegin;
94: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
95: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
96: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
97: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
98: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
99: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
100: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
101: PetscCallMPI(MPIU_Igather(rootbuf == leafbuf ? MPI_IN_PLACE : rootbuf, sendcount, unit, leafbuf, sendcount, unit, 0 /*rank 0*/, comm, req));
102: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
103: PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
104: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
105: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
106: PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
107: }
108: PetscCall(PetscSFLinkReclaim(sf, &link));
109: PetscFunctionReturn(PETSC_SUCCESS);
110: }
112: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
113: {
114: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
116: PetscFunctionBegin;
117: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
118: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
120: /* Inherit from Allgatherv */
121: sf->ops->Reset = PetscSFReset_Allgatherv;
122: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
123: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
124: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
125: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
126: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
127: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
128: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
130: /* Allgather stuff */
131: sf->ops->SetUp = PetscSFSetUp_Allgather;
132: sf->ops->BcastBegin = PetscSFBcastBegin_Allgather;
133: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
134: sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;
136: sf->collective = PETSC_TRUE;
138: PetscCall(PetscNew(&dat));
139: sf->data = (void *)dat;
140: PetscFunctionReturn(PETSC_SUCCESS);
141: }