Actual source code: garbage.c

  1: #include <petsc/private/garbagecollector.h>

  3: /* Fetches garbage hashmap from communicator */
  4: static PetscErrorCode GarbageGetHMap_Private(MPI_Comm comm, PetscGarbage *garbage)
  5: {
  6:   PetscMPIInt  flag;
  7:   PetscHMapObj garbage_map;

  9:   PetscFunctionBegin;
 10:   PetscCallMPI(MPI_Comm_get_attr(comm, Petsc_Garbage_HMap_keyval, garbage, &flag));
 11:   if (!flag) {
 12:     /* No garbage,create one */
 13:     PetscCall(PetscHMapObjCreate(&garbage_map));
 14:     garbage->map = garbage_map;
 15:     PetscCallMPI(MPI_Comm_set_attr(comm, Petsc_Garbage_HMap_keyval, garbage->ptr));
 16:   }
 17:   PetscFunctionReturn(PETSC_SUCCESS);
 18: }

 20: /*@C
 21:   PetscObjectDelayedDestroy - Adds an object to a data structure for
 22:   later destruction.

 24:   Not Collective

 26:   Input Parameter:
 27: . obj - object to be destroyed

 29:   Level: developer

 31:   Notes:
 32:   Analogue to `PetscObjectDestroy()` for use in managed languages.

 34:   A PETSc object is given a creation index at initialisation based on
 35:   the communicator it was created on and the order in which it is
 36:   created. When this function is passed a PETSc object, a pointer to
 37:   the object is stashed on a garbage dictionary (`PetscHMapObj`) which is
 38:   keyed by its creation index.

 40:   Objects stashed on this garbage dictionary can later be destroyed
 41:   with a call to `PetscGarbageCleanup()`.

 43:   This function is intended for use with managed languages such as
 44:   Python or Julia, which may not destroy objects in a deterministic
 45:   order.

 47:   Serial objects (that have a communicator with size 1) are destroyed
 48:   eagerly since deadlocks cannot occur.

 50: .seealso: `PetscGarbageCleanup()`, `PetscObjectDestroy()`
 51: @*/
 52: PetscErrorCode PetscObjectDelayedDestroy(PetscObject *obj)
 53: {
 54:   MPI_Comm     comm;
 55:   PetscMPIInt  size;
 56:   PetscInt     count;
 57:   PetscGarbage garbage;

 59:   PetscFunctionBegin;
 60:   PetscAssertPointer(obj, 1);
 61:   /* Don't stash NULL pointers */
 62:   if (*obj != NULL) {
 63:     /* Elaborate check for getting non-cyclic reference counts */
 64:     if (!(*obj)->non_cyclic_references) {
 65:       count = --(*obj)->refct;
 66:     } else {
 67:       PetscCall((*obj)->non_cyclic_references(*obj, &count));
 68:       --count;
 69:       --(*obj)->refct;
 70:     }
 71:     /* Only stash if the (non-cyclic) reference count hits 0 */
 72:     if (count == 0) {
 73:       (*obj)->refct = 1;
 74:       PetscCall(PetscObjectGetComm(*obj, &comm));
 75:       PetscCallMPI(MPI_Comm_size(comm, &size));
 76:       /* Eagerly destroy serial objects */
 77:       if (size == 1) {
 78:         PetscCall(PetscObjectDestroy(obj));
 79:       } else {
 80:         PetscCall(GarbageGetHMap_Private(comm, &garbage));
 81:         PetscCall(PetscHMapObjSet(garbage.map, (*obj)->cidx, *obj));
 82:       }
 83:     }
 84:   }
 85:   *obj = NULL;
 86:   PetscFunctionReturn(PETSC_SUCCESS);
 87: }

 89: /* Performs the intersection of 2 sorted arrays seta and setb of lengths
 90:    lena and lenb respectively,returning the result in seta and lena
 91:    This is an O(n) operation */
 92: static PetscErrorCode GarbageKeySortedIntersect_Private(PetscInt64 seta[], PetscInt *lena, PetscInt64 setb[], PetscInt lenb)
 93: {
 94:   /* The arrays seta and setb MUST be sorted! */
 95:   PetscInt ii, jj = 0, counter = 0;

 97:   PetscFunctionBegin;
 98:   if (PetscDefined(USE_DEBUG)) {
 99:     PetscBool sorted = PETSC_FALSE;
100:     /* In debug mode check whether the array are sorted */
101:     PetscCall(PetscSortedInt64(*lena, seta, &sorted));
102:     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Provided array in argument 1 is not sorted");
103:     PetscCall(PetscSortedInt64(lenb, setb, &sorted));
104:     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Provided array in argument 3 is not sorted");
105:   }
106:   for (ii = 0; ii < *lena; ii++) {
107:     while (jj < lenb && seta[ii] > setb[jj]) jj++;
108:     if (jj >= lenb) break;
109:     if (seta[ii] == setb[jj]) {
110:       seta[counter] = seta[ii];
111:       counter++;
112:     }
113:   }

115:   *lena = counter;
116:   PetscFunctionReturn(PETSC_SUCCESS);
117: }

119: /* Wrapper to create MPI reduce operator for set intersection */
120: void PetscGarbageKeySortedIntersect(void *inset, void *inoutset, PetscMPIInt *length, MPI_Datatype *dtype)
121: {
122:   PetscInt64 *seta, *setb;

124:   seta = (PetscInt64 *)inoutset;
125:   setb = (PetscInt64 *)inset;

127:   PetscCallAbort(PETSC_COMM_SELF, GarbageKeySortedIntersect_Private(&seta[1], (PetscInt *)&seta[0], &setb[1], (PetscInt)setb[0]));
128: }

130: /* Performs a collective allreduce intersection of one array per rank */
131: PetscErrorCode GarbageKeyAllReduceIntersect_Private(MPI_Comm comm, PetscInt64 *set, PetscInt *entries)
132: {
133:   PetscInt     ii, max_entries;
134:   PetscInt64  *sendset, *recvset;
135:   MPI_Datatype keyset_type;

137:   PetscFunctionBegin;
138:   /* Sort keys first for use with `GarbageKeySortedIntersect_Private()`*/
139:   PetscCall(PetscSortInt64(*entries, set));

141:   /* Get the maximum size of all key sets */
142:   PetscCall(MPIU_Allreduce(entries, &max_entries, 1, MPIU_INT, MPI_MAX, comm));
143:   PetscCall(PetscMalloc1(max_entries + 1, &sendset));
144:   PetscCall(PetscMalloc1(max_entries + 1, &recvset));
145:   sendset[0] = (PetscInt64)*entries;
146:   for (ii = 1; ii < *entries + 1; ii++) sendset[ii] = set[ii - 1];

148:   /* Create a custom data type to hold the set */
149:   PetscCallMPI(MPI_Type_contiguous(max_entries + 1, MPIU_INT64, &keyset_type));
150:   /* PetscCallMPI(MPI_Type_set_name(keyset_type,"PETSc garbage key set type")); */
151:   PetscCallMPI(MPI_Type_commit(&keyset_type));

153:   /* Perform custom intersect reduce operation over sets */
154:   PetscCallMPI(MPI_Allreduce(sendset, recvset, 1, keyset_type, Petsc_Garbage_SetIntersectOp, comm));

156:   PetscCallMPI(MPI_Type_free(&keyset_type));

158:   *entries = (PetscInt)recvset[0];
159:   for (ii = 0; ii < *entries; ii++) set[ii] = recvset[ii + 1];

161:   PetscCall(PetscFree(sendset));
162:   PetscCall(PetscFree(recvset));
163:   PetscFunctionReturn(PETSC_SUCCESS);
164: }

166: /*@C
167:   PetscGarbageCleanup - Destroys objects placed in the garbage by
168:   `PetscObjectDelayedDestroy()`.

170:   Collective

172:   Input Parameter:
173: . comm - MPI communicator over which to perform collective cleanup

175:   Level: developer

177:   Notes:
178:   Implements a collective garbage collection.
179:   A per- MPI communicator garbage dictionary is created to store
180:   references to objects destroyed using `PetscObjectDelayedDestroy()`.
181:   Objects that appear in this dictionary on all MPI processes can be destroyed
182:   by calling `PetscGarbageCleanup()`.

184:   This is done as follows\:
185:   1.  Keys of the garbage dictionary, which correspond to the creation
186:   indices of the objects stashed, are sorted.
187:   2.  A collective intersection of dictionary keys is performed by all
188:   ranks in the communicator.
189:   3.  The intersection is broadcast back to all ranks in the
190:   communicator.
191:   4.  The objects on the dictionary are collectively destroyed in
192:   creation index order using a call to PetscObjectDestroy().

194:   This function is intended for use with managed languages such as
195:   Python or Julia, which may not destroy objects in a deterministic
196:   order.

198: .seealso: `PetscObjectDelayedDestroy()`
199: @*/
200: PetscErrorCode PetscGarbageCleanup(MPI_Comm comm)
201: {
202:   PetscInt     ii, entries, offset;
203:   PetscInt64  *keys;
204:   PetscObject  obj;
205:   PetscGarbage garbage;

207:   PetscFunctionBegin;
208:   /* Duplicate comm to prevent it being cleaned up by PetscObjectDestroy() */
209:   PetscCall(PetscCommDuplicate(comm, &comm, NULL));

211:   /* Grab garbage from comm and remove it
212:    this avoids calling PetscCommDestroy() and endlessly recursing */
213:   PetscCall(GarbageGetHMap_Private(comm, &garbage));
214:   PetscCallMPI(MPI_Comm_delete_attr(comm, Petsc_Garbage_HMap_keyval));

216:   /* Get keys from garbage hash map */
217:   PetscCall(PetscHMapObjGetSize(garbage.map, &entries));
218:   PetscCall(PetscMalloc1(entries, &keys));
219:   offset = 0;
220:   PetscCall(PetscHMapObjGetKeys(garbage.map, &offset, keys));

222:   /* Gather and intersect */
223:   PetscCall(GarbageKeyAllReduceIntersect_Private(comm, keys, &entries));

225:   /* Collectively destroy objects that appear in garbage in
226:      creation index order */
227:   for (ii = 0; ii < entries; ii++) {
228:     PetscCall(PetscHMapObjGet(garbage.map, keys[ii], &obj));
229:     PetscCall(PetscObjectDestroy(&obj));
230:     PetscCall(PetscFree(obj));
231:     PetscCall(PetscHMapObjDel(garbage.map, keys[ii]));
232:   }
233:   PetscCall(PetscFree(keys));

235:   /* Put garbage back */
236:   PetscCallMPI(MPI_Comm_set_attr(comm, Petsc_Garbage_HMap_keyval, garbage.ptr));
237:   PetscCall(PetscCommDestroy(&comm));
238:   PetscFunctionReturn(PETSC_SUCCESS);
239: }

241: /* Utility function for printing the contents of the garbage on a given comm */
242: PetscErrorCode PetscGarbageView(MPI_Comm comm, PetscViewer viewer)
243: {
244:   char         text[64];
245:   PetscInt     ii, entries, offset;
246:   PetscInt64  *keys;
247:   PetscObject  obj;
248:   PetscGarbage garbage;
249:   PetscMPIInt  rank;

251:   PetscFunctionBegin;
252:   PetscCall(PetscPrintf(comm, "PETSc garbage on "));
253:   if (comm == PETSC_COMM_WORLD) {
254:     PetscCall(PetscPrintf(comm, "PETSC_COMM_WORLD\n"));
255:   } else if (comm == PETSC_COMM_SELF) {
256:     PetscCall(PetscPrintf(comm, "PETSC_COMM_SELF\n"));
257:   } else {
258:     PetscCall(PetscPrintf(comm, "UNKNOWN_COMM\n"));
259:   }
260:   PetscCall(PetscCommDuplicate(comm, &comm, NULL));
261:   PetscCall(GarbageGetHMap_Private(comm, &garbage));

263:   /* Get keys from garbage hash map and sort */
264:   PetscCall(PetscHMapObjGetSize(garbage.map, &entries));
265:   PetscCall(PetscMalloc1(entries, &keys));
266:   offset = 0;
267:   PetscCall(PetscHMapObjGetKeys(garbage.map, &offset, keys));

269:   /* Pretty print entries in a table */
270:   PetscCallMPI(MPI_Comm_rank(comm, &rank));
271:   PetscCall(PetscSynchronizedPrintf(comm, "Rank %i:: ", rank));
272:   PetscCall(PetscFormatConvert("Total entries: %" PetscInt_FMT "\n", text));
273:   PetscCall(PetscSynchronizedPrintf(comm, text, entries));
274:   if (entries) {
275:     PetscCall(PetscSynchronizedPrintf(comm, "| Key   | Type                   | Name                             | Object ID |\n"));
276:     PetscCall(PetscSynchronizedPrintf(comm, "|-------|------------------------|----------------------------------|-----------|\n"));
277:   }
278:   for (ii = 0; ii < entries; ii++) {
279:     PetscCall(PetscHMapObjGet(garbage.map, keys[ii], &obj));
280:     PetscCall(PetscFormatConvert("| %5" PetscInt64_FMT " | %-22s | %-32s | %6" PetscInt_FMT "    |\n", text));
281:     PetscCall(PetscSynchronizedPrintf(comm, text, keys[ii], obj->class_name, obj->description, obj->id));
282:   }
283:   PetscCall(PetscSynchronizedFlush(comm, PETSC_STDOUT));

285:   PetscCall(PetscFree(keys));
286:   PetscCall(PetscCommDestroy(&comm));
287:   PetscFunctionReturn(PETSC_SUCCESS);
288: }