Actual source code: sfallgather.c

  1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>

  3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
  4: typedef PetscSF_Allgatherv PetscSF_Allgather;

  6: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gather(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);

  8: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
  9: {
 10:   PetscInt           i;
 11:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;

 13:   for (i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
 14:     sf->leafbuflen[i]  = 0;
 15:     sf->leafstart[i]   = 0;
 16:     sf->leafcontig[i]  = PETSC_TRUE;
 17:     sf->leafdups[i]    = PETSC_FALSE;
 18:     dat->rootbuflen[i] = 0;
 19:     dat->rootstart[i]  = 0;
 20:     dat->rootcontig[i] = PETSC_TRUE;
 21:     dat->rootdups[i]   = PETSC_FALSE;
 22:   }

 24:   sf->leafbuflen[PETSCSF_REMOTE]  = sf->nleaves;
 25:   dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
 26:   sf->persistent                  = PETSC_FALSE;
 27:   sf->nleafreqs                   = 0; /* MPI collectives only need one request. We treat it as a root request. */
 28:   dat->nrootreqs                  = 1;
 29:   return 0;
 30: }

 32: static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
 33: {
 34:   PetscSFLink  link;
 35:   PetscMPIInt  sendcount;
 36:   MPI_Comm     comm;
 37:   void        *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
 38:   MPI_Request *req;

 40:   PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link);
 41:   PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata);
 42:   PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */);
 43:   PetscObjectGetComm((PetscObject)sf, &comm);
 44:   PetscMPIIntCast(sf->nroots, &sendcount);
 45:   PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_../../../../../..2LEAF, &rootbuf, &leafbuf, &req, NULL);
 46:   PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_../../../../../..2LEAF);
 47:   MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req);
 48:   return 0;
 49: }

 51: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
 52: {
 53:   PetscSFLink        link;
 54:   PetscInt           rstart;
 55:   MPI_Comm           comm;
 56:   PetscMPIInt        rank, count, recvcount;
 57:   void              *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
 58:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
 59:   MPI_Request       *req;

 61:   PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link);
 62:   if (op == MPI_REPLACE) {
 63:     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
 64:     PetscLayoutGetRange(sf->map, &rstart, NULL);
 65:     (*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes);
 66:     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) (*link->SyncStream)(link); /* Sync the device to host memcpy */
 67:   } else {
 68:     PetscObjectGetComm((PetscObject)sf, &comm);
 69:     MPI_Comm_rank(comm, &rank);
 70:     PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata);
 71:     PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */);
 72:     PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2../../../../../.., &rootbuf, &leafbuf, &req, NULL);
 73:     PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount);
 74:     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
 75:       PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);
 76:     }
 77:     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
 78:     PetscMPIIntCast(sf->nleaves * link->bs, &count);
 79:     PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2../../../../../..);
 80:     MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm); /* Must do reduce with MPI builtin datatype basicunit */
 81:     MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req);
 82:   }
 83:   return 0;
 84: }

 86: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
 87: {
 88:   PetscSFLink link;
 89:   PetscMPIInt rank;

 91:   PetscSFBcastBegin_Gather(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE);
 92:   PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link);
 93:   PetscSFLinkFinishCommunication(sf, link, PETSCSF_../../../../../..2LEAF);
 94:   MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank);
 95:   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
 96:     (*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes);
 97:   }
 98:   PetscSFLinkReclaim(sf, &link);
 99:   return 0;
100: }

102: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
103: {
104:   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;

106:   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
107:   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;

109:   /* Inherit from Allgatherv */
110:   sf->ops->Reset           = PetscSFReset_Allgatherv;
111:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
112:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
113:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
114:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
115:   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
116:   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
117:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;

119:   /* Allgather stuff */
120:   sf->ops->SetUp       = PetscSFSetUp_Allgather;
121:   sf->ops->BcastBegin  = PetscSFBcastBegin_Allgather;
122:   sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
123:   sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;

125:   PetscNew(&dat);
126:   sf->data = (void *)dat;
127:   return 0;
128: }