Actual source code: aomemscalable.c
1: /*
2: The memory scalable AO application ordering routines. These store the
3: orderings on each process for that process' range of values, this is more memory-efficient than `AOBASIC`
4: */
6: #include <../src/vec/is/ao/aoimpl.h>
8: typedef struct {
9: PetscInt *app_loc; /* app_loc[i] is the partner for the ith local PETSc slot */
10: PetscInt *petsc_loc; /* petsc_loc[j] is the partner for the jth local app slot */
11: PetscLayout map; /* determines the local sizes of ao */
12: } AO_MemoryScalable;
14: /*
15: All processors ship the data to process 0 to be printed; note that this is not scalable because
16: process 0 allocates space for all the orderings entry across all the processes
17: */
18: static PetscErrorCode AOView_MemoryScalable(AO ao, PetscViewer viewer)
19: {
20: PetscMPIInt rank, size;
21: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
22: PetscBool iascii;
23: PetscMPIInt tag_app, tag_petsc;
24: PetscLayout map = aomems->map;
25: PetscInt *app, *app_loc, *petsc, *petsc_loc, len, j;
26: MPI_Status status;
28: PetscFunctionBegin;
29: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
30: PetscCheck(iascii, PetscObjectComm((PetscObject)viewer), PETSC_ERR_SUP, "Viewer type %s not supported for AO MemoryScalable", ((PetscObject)viewer)->type_name);
32: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)ao), &rank));
33: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)ao), &size));
35: PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag_app));
36: PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag_petsc));
38: if (rank == 0) {
39: PetscCall(PetscViewerASCIIPrintf(viewer, "Number of elements in ordering %" PetscInt_FMT "\n", ao->N));
40: PetscCall(PetscViewerASCIIPrintf(viewer, "PETSc->App App->PETSc\n"));
42: PetscCall(PetscMalloc2(map->N, &app, map->N, &petsc));
43: len = map->n;
44: /* print local AO */
45: PetscCall(PetscViewerASCIIPrintf(viewer, "Process [%d]\n", rank));
46: for (PetscInt i = 0; i < len; i++) PetscCall(PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT " %3" PetscInt_FMT " %3" PetscInt_FMT " %3" PetscInt_FMT "\n", i, aomems->app_loc[i], i, aomems->petsc_loc[i]));
48: /* recv and print off-processor's AO */
49: for (PetscMPIInt i = 1; i < size; i++) {
50: len = map->range[i + 1] - map->range[i];
51: app_loc = app + map->range[i];
52: petsc_loc = petsc + map->range[i];
53: PetscCallMPI(MPIU_Recv(app_loc, len, MPIU_INT, i, tag_app, PetscObjectComm((PetscObject)ao), &status));
54: PetscCallMPI(MPIU_Recv(petsc_loc, len, MPIU_INT, i, tag_petsc, PetscObjectComm((PetscObject)ao), &status));
55: PetscCall(PetscViewerASCIIPrintf(viewer, "Process [%d]\n", i));
56: for (j = 0; j < len; j++) PetscCall(PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT " %3" PetscInt_FMT " %3" PetscInt_FMT " %3" PetscInt_FMT "\n", map->range[i] + j, app_loc[j], map->range[i] + j, petsc_loc[j]));
57: }
58: PetscCall(PetscFree2(app, petsc));
60: } else {
61: /* send values */
62: PetscCallMPI(MPIU_Send((void *)aomems->app_loc, map->n, MPIU_INT, 0, tag_app, PetscObjectComm((PetscObject)ao)));
63: PetscCallMPI(MPIU_Send((void *)aomems->petsc_loc, map->n, MPIU_INT, 0, tag_petsc, PetscObjectComm((PetscObject)ao)));
64: }
65: PetscCall(PetscViewerFlush(viewer));
66: PetscFunctionReturn(PETSC_SUCCESS);
67: }
69: static PetscErrorCode AODestroy_MemoryScalable(AO ao)
70: {
71: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
73: PetscFunctionBegin;
74: PetscCall(PetscFree2(aomems->app_loc, aomems->petsc_loc));
75: PetscCall(PetscLayoutDestroy(&aomems->map));
76: PetscCall(PetscFree(aomems));
77: PetscFunctionReturn(PETSC_SUCCESS);
78: }
80: /*
81: Input Parameters:
82: + ao - the application ordering context
83: . n - the number of integers in ia[]
84: . ia - the integers; these are replaced with their mapped value
85: - maploc - app_loc or petsc_loc in struct "AO_MemoryScalable"
87: Output Parameter:
88: . ia - the mapped interges
89: */
90: static PetscErrorCode AOMap_MemoryScalable_private(AO ao, PetscInt n, PetscInt *ia, const PetscInt *maploc)
91: {
92: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
93: MPI_Comm comm;
94: PetscMPIInt rank, size, tag1, tag2, count;
95: PetscMPIInt *owner, j;
96: PetscInt nmax, *sindices, *rindices, idx, lastidx, *sindices2, *rindices2, *sizes, *start;
97: PetscMPIInt nreceives, nsends;
98: const PetscInt *owners = aomems->map->range;
99: MPI_Request *send_waits, *recv_waits, *send_waits2, *recv_waits2;
100: MPI_Status recv_status;
101: PetscMPIInt source, widx;
102: PetscCount nindices;
103: PetscInt *rbuf, *sbuf, inreceives;
104: MPI_Status *send_status, *send_status2;
106: PetscFunctionBegin;
107: PetscCall(PetscObjectGetComm((PetscObject)ao, &comm));
108: PetscCallMPI(MPI_Comm_rank(comm, &rank));
109: PetscCallMPI(MPI_Comm_size(comm, &size));
111: /* first count number of contributors to each processor */
112: PetscCall(PetscMalloc1(size, &start));
113: PetscCall(PetscCalloc2(2 * size, &sizes, n, &owner));
115: j = 0;
116: lastidx = -1;
117: for (PetscInt i = 0; i < n; i++) {
118: if (ia[i] < 0) owner[i] = -1; /* mark negative entries (which are not to be mapped) with a special negative value */
119: if (ia[i] >= ao->N) owner[i] = -2; /* mark out of range entries with special negative value */
120: else {
121: /* if indices are NOT locally sorted, need to start search at the beginning */
122: if (lastidx > (idx = ia[i])) j = 0;
123: lastidx = idx;
124: for (; j < size; j++) {
125: if (idx >= owners[j] && idx < owners[j + 1]) {
126: sizes[2 * j]++; /* num of indices to be sent */
127: sizes[2 * j + 1] = 1; /* send to proc[j] */
128: owner[i] = j;
129: break;
130: }
131: }
132: }
133: }
134: sizes[2 * rank] = sizes[2 * rank + 1] = 0; /* do not receive from self! */
135: nsends = 0;
136: for (PetscMPIInt i = 0; i < size; i++) nsends += sizes[2 * i + 1];
138: /* inform other processors of number of messages and max length*/
139: PetscCall(PetscMaxSum(comm, sizes, &nmax, &inreceives));
140: PetscCall(PetscMPIIntCast(inreceives, &nreceives));
142: /* allocate arrays */
143: PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag1));
144: PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag2));
146: PetscCall(PetscMalloc2(nreceives * nmax, &rindices, nreceives, &recv_waits));
147: PetscCall(PetscMalloc2(nsends * nmax, &rindices2, nsends, &recv_waits2));
149: PetscCall(PetscMalloc3(n, &sindices, nsends, &send_waits, nsends, &send_status));
150: PetscCall(PetscMalloc3(n, &sindices2, nreceives, &send_waits2, nreceives, &send_status2));
152: /* post 1st receives: receive others requests
153: since we don't know how long each individual message is we
154: allocate the largest needed buffer for each receive. Potentially
155: this is a lot of wasted space.
156: */
157: for (PetscMPIInt i = 0; i < nreceives; i++) PetscCallMPI(MPIU_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag1, comm, recv_waits + i));
159: /* do 1st sends:
160: 1) starts[i] gives the starting index in svalues for stuff going to
161: the ith processor
162: */
163: start[0] = 0;
164: for (PetscMPIInt i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
165: for (PetscInt i = 0; i < n; i++) {
166: j = owner[i];
167: if (j == -1) continue; /* do not remap negative entries in ia[] */
168: else if (j == -2) { /* out of range entries get mapped to -1 */ ia[i] = -1;
169: continue;
170: } else if (j != rank) {
171: sindices[start[j]++] = ia[i];
172: } else { /* compute my own map */
173: ia[i] = maploc[ia[i] - owners[rank]];
174: }
175: }
177: start[0] = 0;
178: for (PetscMPIInt i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
179: count = 0;
180: for (PetscMPIInt i = 0; i < size; i++) {
181: if (sizes[2 * i + 1]) {
182: /* send my request to others */
183: PetscCallMPI(MPIU_Isend(sindices + start[i], sizes[2 * i], MPIU_INT, i, tag1, comm, send_waits + count));
184: /* post receive for the answer of my request */
185: PetscCallMPI(MPIU_Irecv(sindices2 + start[i], sizes[2 * i], MPIU_INT, i, tag2, comm, recv_waits2 + count));
186: count++;
187: }
188: }
189: PetscCheck(nsends == count, comm, PETSC_ERR_SUP, "nsends %d != count %d", nsends, count);
191: /* wait on 1st sends */
192: if (nsends) PetscCallMPI(MPI_Waitall(nsends, send_waits, send_status));
194: /* 1st recvs: other's requests */
195: for (j = 0; j < nreceives; j++) {
196: PetscCallMPI(MPI_Waitany(nreceives, recv_waits, &widx, &recv_status)); /* idx: index of handle for operation that completed */
197: PetscCallMPI(MPIU_Get_count(&recv_status, MPIU_INT, &nindices));
198: rbuf = rindices + nmax * widx; /* global index */
199: source = recv_status.MPI_SOURCE;
201: /* compute mapping */
202: sbuf = rbuf;
203: for (PetscCount i = 0; i < nindices; i++) sbuf[i] = maploc[rbuf[i] - owners[rank]];
205: /* send mapping back to the sender */
206: PetscCallMPI(MPIU_Isend(sbuf, nindices, MPIU_INT, source, tag2, comm, send_waits2 + widx));
207: }
209: /* wait on 2nd sends */
210: if (nreceives) PetscCallMPI(MPI_Waitall(nreceives, send_waits2, send_status2));
212: /* 2nd recvs: for the answer of my request */
213: for (j = 0; j < nsends; j++) {
214: PetscCallMPI(MPI_Waitany(nsends, recv_waits2, &widx, &recv_status));
215: PetscCallMPI(MPIU_Get_count(&recv_status, MPIU_INT, &nindices));
216: source = recv_status.MPI_SOURCE;
217: /* pack output ia[] */
218: rbuf = sindices2 + start[source];
219: count = 0;
220: for (PetscCount i = 0; i < n; i++) {
221: if (source == owner[i]) ia[i] = rbuf[count++];
222: }
223: }
225: /* free arrays */
226: PetscCall(PetscFree(start));
227: PetscCall(PetscFree2(sizes, owner));
228: PetscCall(PetscFree2(rindices, recv_waits));
229: PetscCall(PetscFree2(rindices2, recv_waits2));
230: PetscCall(PetscFree3(sindices, send_waits, send_status));
231: PetscCall(PetscFree3(sindices2, send_waits2, send_status2));
232: PetscFunctionReturn(PETSC_SUCCESS);
233: }
235: static PetscErrorCode AOPetscToApplication_MemoryScalable(AO ao, PetscInt n, PetscInt *ia)
236: {
237: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
238: PetscInt *app_loc = aomems->app_loc;
240: PetscFunctionBegin;
241: PetscCall(AOMap_MemoryScalable_private(ao, n, ia, app_loc));
242: PetscFunctionReturn(PETSC_SUCCESS);
243: }
245: static PetscErrorCode AOApplicationToPetsc_MemoryScalable(AO ao, PetscInt n, PetscInt *ia)
246: {
247: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
248: PetscInt *petsc_loc = aomems->petsc_loc;
250: PetscFunctionBegin;
251: PetscCall(AOMap_MemoryScalable_private(ao, n, ia, petsc_loc));
252: PetscFunctionReturn(PETSC_SUCCESS);
253: }
255: static const struct _AOOps AOOps_MemoryScalable = {
256: PetscDesignatedInitializer(view, AOView_MemoryScalable),
257: PetscDesignatedInitializer(destroy, AODestroy_MemoryScalable),
258: PetscDesignatedInitializer(petsctoapplication, AOPetscToApplication_MemoryScalable),
259: PetscDesignatedInitializer(applicationtopetsc, AOApplicationToPetsc_MemoryScalable),
260: PetscDesignatedInitializer(petsctoapplicationpermuteint, NULL),
261: PetscDesignatedInitializer(applicationtopetscpermuteint, NULL),
262: PetscDesignatedInitializer(petsctoapplicationpermutereal, NULL),
263: PetscDesignatedInitializer(applicationtopetscpermutereal, NULL),
264: };
266: static PetscErrorCode AOCreateMemoryScalable_private(MPI_Comm comm, PetscInt napp, const PetscInt from_array[], const PetscInt to_array[], AO ao, PetscInt *aomap_loc)
267: {
268: AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
269: PetscLayout map = aomems->map;
270: PetscInt n_local = map->n, i, j;
271: PetscMPIInt rank, size, tag;
272: PetscInt *owner, *start, *sizes, nsends, nreceives;
273: PetscInt nmax, count, *sindices, *rindices, idx, lastidx;
274: PetscInt *owners = aomems->map->range;
275: MPI_Request *send_waits, *recv_waits;
276: MPI_Status recv_status;
277: PetscMPIInt widx;
278: PetscInt *rbuf;
279: PetscInt n = napp, ip, ia;
280: MPI_Status *send_status;
281: PetscCount nindices;
282: PetscMPIInt nsends_i, nreceives_i;
284: PetscFunctionBegin;
285: PetscCall(PetscArrayzero(aomap_loc, n_local));
287: PetscCallMPI(MPI_Comm_rank(comm, &rank));
288: PetscCallMPI(MPI_Comm_size(comm, &size));
290: /* first count number of contributors (of from_array[]) to each processor */
291: PetscCall(PetscCalloc1(2 * size, &sizes));
292: PetscCall(PetscMalloc1(n, &owner));
294: j = 0;
295: lastidx = -1;
296: for (i = 0; i < n; i++) {
297: /* if indices are NOT locally sorted, need to start search at the beginning */
298: if (lastidx > (idx = from_array[i])) j = 0;
299: lastidx = idx;
300: for (; j < size; j++) {
301: if (idx >= owners[j] && idx < owners[j + 1]) {
302: sizes[2 * j] += 2; /* num of indices to be sent - in pairs (ip,ia) */
303: sizes[2 * j + 1] = 1; /* send to proc[j] */
304: owner[i] = j;
305: break;
306: }
307: }
308: }
309: sizes[2 * rank] = sizes[2 * rank + 1] = 0; /* do not receive from self! */
310: nsends = 0;
311: for (i = 0; i < size; i++) nsends += sizes[2 * i + 1];
313: /* inform other processors of number of messages and max length*/
314: PetscCall(PetscMaxSum(comm, sizes, &nmax, &nreceives));
316: /* allocate arrays */
317: PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag));
318: PetscCall(PetscMalloc2(nreceives * nmax, &rindices, nreceives, &recv_waits));
319: PetscCall(PetscMalloc3(2 * n, &sindices, nsends, &send_waits, nsends, &send_status));
320: PetscCall(PetscMalloc1(size, &start));
322: /* post receives: */
323: for (i = 0; i < nreceives; i++) PetscCallMPI(MPIU_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag, comm, recv_waits + i));
325: /* do sends:
326: 1) starts[i] gives the starting index in svalues for stuff going to
327: the ith processor
328: */
329: start[0] = 0;
330: for (i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
331: for (i = 0; i < n; i++) {
332: j = owner[i];
333: if (j != rank) {
334: ip = from_array[i];
335: ia = to_array[i];
336: sindices[start[j]++] = ip;
337: sindices[start[j]++] = ia;
338: } else { /* compute my own map */
339: ip = from_array[i] - owners[rank];
340: ia = to_array[i];
341: aomap_loc[ip] = ia;
342: }
343: }
345: start[0] = 0;
346: for (PetscMPIInt i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
347: count = 0;
348: for (PetscMPIInt i = 0; i < size; i++) {
349: if (sizes[2 * i + 1]) {
350: PetscCallMPI(MPIU_Isend(sindices + start[i], sizes[2 * i], MPIU_INT, i, tag, comm, send_waits + count));
351: count++;
352: }
353: }
354: PetscCheck(nsends == count, comm, PETSC_ERR_SUP, "nsends %" PetscInt_FMT " != count %" PetscInt_FMT, nsends, count);
355: PetscCall(PetscMPIIntCast(nsends, &nsends_i));
356: PetscCall(PetscMPIIntCast(nreceives, &nreceives_i));
358: /* wait on sends */
359: if (nsends) PetscCallMPI(MPI_Waitall(nsends_i, send_waits, send_status));
361: /* recvs */
362: count = 0;
363: for (j = nreceives; j > 0; j--) {
364: PetscCallMPI(MPI_Waitany(nreceives_i, recv_waits, &widx, &recv_status));
365: PetscCallMPI(MPIU_Get_count(&recv_status, MPIU_INT, &nindices));
366: rbuf = rindices + nmax * widx; /* global index */
368: /* compute local mapping */
369: for (i = 0; i < nindices; i += 2) { /* pack aomap_loc */
370: ip = rbuf[i] - owners[rank]; /* local index */
371: ia = rbuf[i + 1];
372: aomap_loc[ip] = ia;
373: }
374: count++;
375: }
377: PetscCall(PetscFree(start));
378: PetscCall(PetscFree3(sindices, send_waits, send_status));
379: PetscCall(PetscFree2(rindices, recv_waits));
380: PetscCall(PetscFree(owner));
381: PetscCall(PetscFree(sizes));
382: PetscFunctionReturn(PETSC_SUCCESS);
383: }
385: PETSC_INTERN PetscErrorCode AOCreate_MemoryScalable(AO ao)
386: {
387: IS isapp = ao->isapp, ispetsc = ao->ispetsc;
388: const PetscInt *mypetsc, *myapp;
389: PetscInt napp, n_local, N, i, start, *petsc, *lens, *disp;
390: MPI_Comm comm;
391: AO_MemoryScalable *aomems;
392: PetscLayout map;
393: PetscMPIInt size, rank;
395: PetscFunctionBegin;
396: PetscCheck(isapp, PetscObjectComm((PetscObject)ao), PETSC_ERR_ARG_WRONGSTATE, "AOSetIS() must be called before AOSetType()");
397: /* create special struct aomems */
398: PetscCall(PetscNew(&aomems));
399: ao->data = (void *)aomems;
400: ao->ops[0] = AOOps_MemoryScalable;
401: PetscCall(PetscObjectChangeTypeName((PetscObject)ao, AOMEMORYSCALABLE));
403: /* transmit all local lengths of isapp to all processors */
404: PetscCall(PetscObjectGetComm((PetscObject)isapp, &comm));
405: PetscCallMPI(MPI_Comm_size(comm, &size));
406: PetscCallMPI(MPI_Comm_rank(comm, &rank));
407: PetscCall(PetscMalloc2(size, &lens, size, &disp));
408: PetscCall(ISGetLocalSize(isapp, &napp));
409: PetscCallMPI(MPI_Allgather(&napp, 1, MPIU_INT, lens, 1, MPIU_INT, comm));
411: N = 0;
412: for (i = 0; i < size; i++) {
413: disp[i] = N;
414: N += lens[i];
415: }
417: /* If ispetsc is 0 then use "natural" numbering */
418: if (napp) {
419: if (!ispetsc) {
420: start = disp[rank];
421: PetscCall(PetscMalloc1(napp + 1, &petsc));
422: for (i = 0; i < napp; i++) petsc[i] = start + i;
423: } else {
424: PetscCall(ISGetIndices(ispetsc, &mypetsc));
425: petsc = (PetscInt *)mypetsc;
426: }
427: } else {
428: petsc = NULL;
429: }
431: /* create a map with global size N - used to determine the local sizes of ao - shall we use local napp instead of N? */
432: PetscCall(PetscLayoutCreate(comm, &map));
433: map->bs = 1;
434: map->N = N;
435: PetscCall(PetscLayoutSetUp(map));
437: ao->N = N;
438: ao->n = map->n;
439: aomems->map = map;
441: /* create distributed indices app_loc: petsc->app and petsc_loc: app->petsc */
442: n_local = map->n;
443: PetscCall(PetscCalloc2(n_local, &aomems->app_loc, n_local, &aomems->petsc_loc));
444: PetscCall(ISGetIndices(isapp, &myapp));
446: PetscCall(AOCreateMemoryScalable_private(comm, napp, petsc, myapp, ao, aomems->app_loc));
447: PetscCall(AOCreateMemoryScalable_private(comm, napp, myapp, petsc, ao, aomems->petsc_loc));
449: PetscCall(ISRestoreIndices(isapp, &myapp));
450: if (napp) {
451: if (ispetsc) {
452: PetscCall(ISRestoreIndices(ispetsc, &mypetsc));
453: } else {
454: PetscCall(PetscFree(petsc));
455: }
456: }
457: PetscCall(PetscFree2(lens, disp));
458: PetscFunctionReturn(PETSC_SUCCESS);
459: }
461: /*@
462: AOCreateMemoryScalable - Creates a memory scalable application ordering using two integer arrays.
464: Collective
466: Input Parameters:
467: + comm - MPI communicator that is to share the `AO`
468: . napp - size of `myapp` and `mypetsc`
469: . myapp - integer array that defines an ordering
470: - mypetsc - integer array that defines another ordering (may be `NULL` to indicate the natural ordering, that is 0,1,2,3,...)
472: Output Parameter:
473: . aoout - the new application ordering
475: Level: beginner
477: Note:
478: The arrays `myapp` and `mypetsc` must contain the all the integers 0 to `napp`-1 with no duplicates; that is there cannot be any "holes"
479: in the indices. Use `AOCreateMapping()` or `AOCreateMappingIS()` if you wish to have "holes" in the indices.
480: Comparing with `AOCreateBasic()`, this routine trades memory with message communication.
482: .seealso: [](sec_ao), [](sec_scatter), `AO`, `AOCreateMemoryScalableIS()`, `AODestroy()`, `AOPetscToApplication()`, `AOApplicationToPetsc()`
483: @*/
484: PetscErrorCode AOCreateMemoryScalable(MPI_Comm comm, PetscInt napp, const PetscInt myapp[], const PetscInt mypetsc[], AO *aoout)
485: {
486: IS isapp, ispetsc;
487: const PetscInt *app = myapp, *petsc = mypetsc;
489: PetscFunctionBegin;
490: PetscCall(ISCreateGeneral(comm, napp, app, PETSC_USE_POINTER, &isapp));
491: if (mypetsc) {
492: PetscCall(ISCreateGeneral(comm, napp, petsc, PETSC_USE_POINTER, &ispetsc));
493: } else {
494: ispetsc = NULL;
495: }
496: PetscCall(AOCreateMemoryScalableIS(isapp, ispetsc, aoout));
497: PetscCall(ISDestroy(&isapp));
498: if (mypetsc) PetscCall(ISDestroy(&ispetsc));
499: PetscFunctionReturn(PETSC_SUCCESS);
500: }
502: /*@
503: AOCreateMemoryScalableIS - Creates a memory scalable application ordering using two index sets.
505: Collective
507: Input Parameters:
508: + isapp - index set that defines an ordering
509: - ispetsc - index set that defines another ordering (may be `NULL` to use the natural ordering)
511: Output Parameter:
512: . aoout - the new application ordering
514: Level: beginner
516: Notes:
517: The index sets `isapp` and `ispetsc` must contain the all the integers 0 to napp-1 (where napp is the length of the index sets) with no duplicates;
518: that is there cannot be any "holes".
520: Comparing with `AOCreateBasicIS()`, this routine trades memory with message communication.
522: .seealso: [](sec_ao), [](sec_scatter), `AO`, `AOCreateBasicIS()`, `AOCreateMemoryScalable()`, `AODestroy()`
523: @*/
524: PetscErrorCode AOCreateMemoryScalableIS(IS isapp, IS ispetsc, AO *aoout)
525: {
526: MPI_Comm comm;
527: AO ao;
529: PetscFunctionBegin;
530: PetscCall(PetscObjectGetComm((PetscObject)isapp, &comm));
531: PetscCall(AOCreate(comm, &ao));
532: PetscCall(AOSetIS(ao, isapp, ispetsc));
533: PetscCall(AOSetType(ao, AOMEMORYSCALABLE));
534: PetscCall(AOViewFromOptions(ao, NULL, "-ao_view"));
535: *aoout = ao;
536: PetscFunctionReturn(PETSC_SUCCESS);
537: }