Actual source code: sfallgather.c

  1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>

  3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
  4: typedef PetscSF_Allgatherv PetscSF_Allgather;

  6: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
  7: {
  8:   PetscInt           i;
  9:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;

 11:   PetscFunctionBegin;
 12:   for (i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
 13:     sf->leafbuflen[i]  = 0;
 14:     sf->leafstart[i]   = 0;
 15:     sf->leafcontig[i]  = PETSC_TRUE;
 16:     sf->leafdups[i]    = PETSC_FALSE;
 17:     dat->rootbuflen[i] = 0;
 18:     dat->rootstart[i]  = 0;
 19:     dat->rootcontig[i] = PETSC_TRUE;
 20:     dat->rootdups[i]   = PETSC_FALSE;
 21:   }

 23:   sf->leafbuflen[PETSCSF_REMOTE]  = sf->nleaves;
 24:   dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
 25:   sf->persistent                  = PETSC_FALSE;
 26:   sf->nleafreqs                   = 0; /* MPI collectives only need one request. We treat it as a root request. */
 27:   dat->nrootreqs                  = 1;
 28:   PetscFunctionReturn(PETSC_SUCCESS);
 29: }

 31: static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
 32: {
 33:   PetscSFLink  link;
 34:   PetscMPIInt  sendcount;
 35:   MPI_Comm     comm;
 36:   void        *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
 37:   MPI_Request *req = NULL;

 39:   PetscFunctionBegin;
 40:   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
 41:   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
 42:   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
 43:   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
 44:   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
 45:   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
 46:   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
 47:   PetscCallMPI(MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req));
 48:   PetscFunctionReturn(PETSC_SUCCESS);
 49: }

 51: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
 52: {
 53:   PetscSFLink        link;
 54:   PetscInt           rstart;
 55:   MPI_Comm           comm;
 56:   PetscMPIInt        rank, count, recvcount;
 57:   void              *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
 58:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
 59:   MPI_Request       *req = NULL;

 61:   PetscFunctionBegin;
 62:   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
 63:   if (op == MPI_REPLACE) {
 64:     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
 65:     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
 66:     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
 67:     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); /* Sync the device to host memcpy */
 68:   } else {
 69:     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
 70:     PetscCallMPI(MPI_Comm_rank(comm, &rank));
 71:     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
 72:     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
 73:     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
 74:     PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
 75:     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
 76:       PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
 77:     }
 78:     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
 79:     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
 80:     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
 81:     PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
 82:     PetscCallMPI(MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req));
 83:   }
 84:   PetscFunctionReturn(PETSC_SUCCESS);
 85: }

 87: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
 88: {
 89:   PetscSFLink  link;
 90:   PetscMPIInt  rank;
 91:   PetscMPIInt  sendcount;
 92:   MPI_Comm     comm;
 93:   void        *rootbuf = NULL, *leafbuf = NULL;
 94:   MPI_Request *req = NULL;

 96:   PetscFunctionBegin;
 97:   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
 98:   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
 99:   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
100:   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
101:   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
102:   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
103:   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
104:   PetscCallMPI(MPIU_Igather(rootbuf == leafbuf ? MPI_IN_PLACE : rootbuf, sendcount, unit, leafbuf, sendcount, unit, 0 /*rank 0*/, comm, req));
105:   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
106:   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
107:   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
108:   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
109:     PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
110:   }
111:   PetscCall(PetscSFLinkReclaim(sf, &link));
112:   PetscFunctionReturn(PETSC_SUCCESS);
113: }

115: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
116: {
117:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;

119:   PetscFunctionBegin;
120:   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
121:   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;

123:   /* Inherit from Allgatherv */
124:   sf->ops->Reset           = PetscSFReset_Allgatherv;
125:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
126:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
127:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
128:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
129:   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
130:   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
131:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;

133:   /* Allgather stuff */
134:   sf->ops->SetUp       = PetscSFSetUp_Allgather;
135:   sf->ops->BcastBegin  = PetscSFBcastBegin_Allgather;
136:   sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
137:   sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;

139:   sf->collective = PETSC_TRUE;

141:   PetscCall(PetscNew(&dat));
142:   sf->data = (void *)dat;
143:   PetscFunctionReturn(PETSC_SUCCESS);
144: }