Actual source code: baijov.c

  1: /*
  2:    Routines to compute overlapping regions of a parallel MPI matrix
  3:   and to find submatrices that were shared across processors.
  4: */
  5: #include <../src/mat/impls/baij/mpi/mpibaij.h>
  6: #include <petscbt.h>

  8: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat, PetscInt, char **, PetscInt *, PetscInt **);
  9: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat, PetscInt, PetscInt **, PetscInt **, PetscInt *);
 10: extern PetscErrorCode MatGetRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);
 11: extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);

 13: PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C, PetscInt imax, IS is[], PetscInt ov)
 14: {
 15:   PetscInt        i, N = C->cmap->N, bs = C->rmap->bs, n;
 16:   const PetscInt *idx;
 17:   IS             *is_new;

 19:   PetscFunctionBegin;
 20:   PetscCall(PetscMalloc1(imax, &is_new));
 21:   /* Convert the indices into block format */
 22:   PetscCall(ISCompressIndicesGeneral(N, C->rmap->n, bs, imax, is, is_new));
 23:   PetscCheck(ov >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Negative overlap specified");
 24:   for (i = 0; i < ov; ++i) PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C, imax, is_new));
 25:   for (i = 0; i < imax; i++) {
 26:     PetscCall(ISDestroy(&is[i]));
 27:     PetscCall(ISGetLocalSize(is_new[i], &n));
 28:     PetscCall(ISGetIndices(is_new[i], &idx));
 29:     PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)is_new[i]), bs, n, idx, PETSC_COPY_VALUES, &is[i]));
 30:     PetscCall(ISDestroy(&is_new[i]));
 31:   }
 32:   PetscCall(PetscFree(is_new));
 33:   PetscFunctionReturn(PETSC_SUCCESS);
 34: }

 36: /*
 37:   Sample message format:
 38:   If a processor A wants processor B to process some elements corresponding
 39:   to index sets is[1], is[5]
 40:   mesg [0] = 2   (no of index sets in the mesg)
 41:   -----------
 42:   mesg [1] = 1 => is[1]
 43:   mesg [2] = sizeof(is[1]);
 44:   -----------
 45:   mesg [5] = 5  => is[5]
 46:   mesg [6] = sizeof(is[5]);
 47:   -----------
 48:   mesg [7]
 49:   mesg [n]  data(is[1])
 50:   -----------
 51:   mesg[n+1]
 52:   mesg[m]  data(is[5])
 53:   -----------

 55:   Notes:
 56:   nrqs - no of requests sent (or to be sent out)
 57:   nrqr - no of requests received (which have to be or which have been processed)
 58: */
 59: PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C, PetscInt imax, IS is[])
 60: {
 61:   Mat_MPIBAIJ     *c = (Mat_MPIBAIJ *)C->data;
 62:   const PetscInt **idx, *idx_i;
 63:   PetscInt        *n, *w3, *w4, **data, len;
 64:   PetscMPIInt      size, rank, tag1, tag2, *w2, *w1, nrqs, nrqr, *pa;
 65:   PetscInt         Mbs, **rbuf, row, msz, **outdat, **ptr;
 66:   PetscInt        *ctr, *tmp, *isz, *isz1, **xdata, **rbuf2, *d_p;
 67:   PetscMPIInt     *onodes1, *olengths1, *onodes2, *olengths2, proc = -1;
 68:   PetscBT         *table;
 69:   MPI_Comm         comm, *iscomms;
 70:   MPI_Request     *s_waits1, *r_waits1, *s_waits2, *r_waits2;
 71:   char            *t_p;

 73:   PetscFunctionBegin;
 74:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
 75:   size = c->size;
 76:   rank = c->rank;
 77:   Mbs  = c->Mbs;

 79:   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
 80:   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));

 82:   PetscCall(PetscMalloc2(imax, (PetscInt ***)&idx, imax, &n));

 84:   for (PetscInt i = 0; i < imax; i++) {
 85:     PetscCall(ISGetIndices(is[i], &idx[i]));
 86:     PetscCall(ISGetLocalSize(is[i], &n[i]));
 87:   }

 89:   /* evaluate communication - mesg to who,length of mesg, and buffer space
 90:      required. Based on this, buffers are allocated, and data copied into them*/
 91:   PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4));
 92:   for (PetscInt i = 0; i < imax; i++) {
 93:     PetscCall(PetscArrayzero(w4, size)); /* initialise work vector*/
 94:     idx_i = idx[i];
 95:     len   = n[i];
 96:     for (PetscInt j = 0; j < len; j++) {
 97:       row = idx_i[j];
 98:       PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Index set cannot have negative entries");
 99:       PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc));
100:       w4[proc]++;
101:     }
102:     for (PetscMPIInt j = 0; j < size; j++) {
103:       if (w4[j]) {
104:         w1[j] += w4[j];
105:         w3[j]++;
106:       }
107:     }
108:   }

110:   nrqs     = 0; /* no of outgoing messages */
111:   msz      = 0; /* total mesg length (for all proc */
112:   w1[rank] = 0; /* no mesg sent to itself */
113:   w3[rank] = 0;
114:   for (PetscMPIInt i = 0; i < size; i++) {
115:     if (w1[i]) {
116:       w2[i] = 1;
117:       nrqs++;
118:     } /* there exists a message to proc i */
119:   }
120:   /* pa - is list of processors to communicate with */
121:   PetscCall(PetscMalloc1(nrqs, &pa));
122:   for (PetscMPIInt i = 0, j = 0; i < size; i++) {
123:     if (w1[i]) {
124:       pa[j] = i;
125:       j++;
126:     }
127:   }

129:   /* Each message would have a header = 1 + 2*(no of IS) + data */
130:   for (PetscMPIInt i = 0; i < nrqs; i++) {
131:     PetscMPIInt j = pa[i];
132:     w1[j] += w2[j] + 2 * w3[j];
133:     msz += w1[j];
134:   }

136:   /* Determine the number of messages to expect, their lengths, from from-ids */
137:   PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr));
138:   PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1));

140:   /* Now post the Irecvs corresponding to these messages */
141:   PetscCall(PetscPostIrecvInt(comm, tag1, nrqr, onodes1, olengths1, &rbuf, &r_waits1));

143:   /* Allocate Memory for outgoing messages */
144:   PetscCall(PetscMalloc4(size, &outdat, size, &ptr, msz, &tmp, size, &ctr));
145:   PetscCall(PetscArrayzero(outdat, size));
146:   PetscCall(PetscArrayzero(ptr, size));
147:   {
148:     PetscInt *iptr = tmp, ict = 0;
149:     for (PetscMPIInt i = 0; i < nrqs; i++) {
150:       PetscMPIInt j = pa[i];
151:       iptr += ict;
152:       outdat[j] = iptr;
153:       ict       = w1[j];
154:     }
155:   }

157:   /* Form the outgoing messages */
158:   /*plug in the headers*/
159:   for (PetscMPIInt i = 0; i < nrqs; i++) {
160:     PetscMPIInt j = pa[i];
161:     outdat[j][0]  = 0;
162:     PetscCall(PetscArrayzero(outdat[j] + 1, 2 * w3[j]));
163:     ptr[j] = outdat[j] + 2 * w3[j] + 1;
164:   }

166:   /* Memory for doing local proc's work*/
167:   {
168:     PetscCall(PetscCalloc5(imax, &table, imax, &data, imax, &isz, Mbs * imax, &d_p, (Mbs / PETSC_BITS_PER_BYTE + 1) * imax, &t_p));

170:     for (PetscInt i = 0; i < imax; i++) {
171:       table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i;
172:       data[i]  = d_p + (Mbs)*i;
173:     }
174:   }

176:   /* Parse the IS and update local tables and the outgoing buf with the data*/
177:   {
178:     PetscInt n_i, *data_i, isz_i, *outdat_j, ctr_j, k;
179:     PetscBT  table_i;

181:     for (PetscInt i = 0; i < imax; i++) {
182:       PetscCall(PetscArrayzero(ctr, size));
183:       n_i     = n[i];
184:       table_i = table[i];
185:       idx_i   = idx[i];
186:       data_i  = data[i];
187:       isz_i   = isz[i];
188:       for (PetscInt j = 0; j < n_i; j++) { /* parse the indices of each IS */
189:         row = idx_i[j];
190:         PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc));
191:         if (proc != rank) { /* copy to the outgoing buffer */
192:           ctr[proc]++;
193:           *ptr[proc] = row;
194:           ptr[proc]++;
195:         } else { /* Update the local table */
196:           if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
197:         }
198:       }
199:       /* Update the headers for the current IS */
200:       for (PetscMPIInt j = 0; j < size; j++) { /* Can Optimise this loop by using pa[] */
201:         if ((ctr_j = ctr[j])) {
202:           outdat_j            = outdat[j];
203:           k                   = ++outdat_j[0];
204:           outdat_j[2 * k]     = ctr_j;
205:           outdat_j[2 * k - 1] = i;
206:         }
207:       }
208:       isz[i] = isz_i;
209:     }
210:   }

212:   /*  Now  post the sends */
213:   PetscCall(PetscMalloc1(nrqs, &s_waits1));
214:   for (PetscMPIInt i = 0; i < nrqs; ++i) {
215:     PetscMPIInt j = pa[i];
216:     PetscCallMPI(MPIU_Isend(outdat[j], w1[j], MPIU_INT, j, tag1, comm, s_waits1 + i));
217:   }

219:   /* No longer need the original indices*/
220:   for (PetscInt i = 0; i < imax; ++i) PetscCall(ISRestoreIndices(is[i], idx + i));
221:   PetscCall(PetscFree2(*(PetscInt ***)&idx, n));

223:   PetscCall(PetscMalloc1(imax, &iscomms));
224:   for (PetscInt i = 0; i < imax; ++i) {
225:     PetscCall(PetscCommDuplicate(PetscObjectComm((PetscObject)is[i]), &iscomms[i], NULL));
226:     PetscCall(ISDestroy(&is[i]));
227:   }

229:   /* Do Local work*/
230:   PetscCall(MatIncreaseOverlap_MPIBAIJ_Local(C, imax, table, isz, data));

232:   /* Receive messages*/
233:   PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE));
234:   PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE));

236:   /* Phase 1 sends are complete - deallocate buffers */
237:   PetscCall(PetscFree4(outdat, ptr, tmp, ctr));
238:   PetscCall(PetscFree4(w1, w2, w3, w4));

240:   PetscCall(PetscMalloc1(nrqr, &xdata));
241:   PetscCall(PetscMalloc1(nrqr, &isz1));
242:   PetscCall(MatIncreaseOverlap_MPIBAIJ_Receive(C, nrqr, rbuf, xdata, isz1));
243:   if (rbuf) {
244:     PetscCall(PetscFree(rbuf[0]));
245:     PetscCall(PetscFree(rbuf));
246:   }

248:   /* Send the data back*/
249:   /* Do a global reduction to know the buffer space req for incoming messages*/
250:   {
251:     PetscMPIInt *rw1;

253:     PetscCall(PetscCalloc1(size, &rw1));
254:     for (PetscMPIInt i = 0; i < nrqr; ++i) {
255:       proc = onodes1[i];
256:       PetscCall(PetscMPIIntCast(isz1[i], &rw1[proc]));
257:     }

259:     /* Determine the number of messages to expect, their lengths, from from-ids */
260:     PetscCall(PetscGatherMessageLengths(comm, nrqr, nrqs, rw1, &onodes2, &olengths2));
261:     PetscCall(PetscFree(rw1));
262:   }
263:   /* Now post the Irecvs corresponding to these messages */
264:   PetscCall(PetscPostIrecvInt(comm, tag2, nrqs, onodes2, olengths2, &rbuf2, &r_waits2));

266:   /*  Now  post the sends */
267:   PetscCall(PetscMalloc1(nrqr, &s_waits2));
268:   for (PetscMPIInt i = 0; i < nrqr; ++i) {
269:     PetscMPIInt j = onodes1[i];
270:     PetscCallMPI(MPIU_Isend(xdata[i], isz1[i], MPIU_INT, j, tag2, comm, s_waits2 + i));
271:   }

273:   PetscCall(PetscFree(onodes1));
274:   PetscCall(PetscFree(olengths1));

276:   /* receive work done on other processors*/
277:   {
278:     PetscMPIInt idex;
279:     PetscInt    is_no, ct1, max, *rbuf2_i, isz_i, *data_i, jmax;
280:     PetscBT     table_i;

282:     for (PetscMPIInt i = 0; i < nrqs; ++i) {
283:       PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &idex, MPI_STATUS_IGNORE));
284:       /* Process the message*/
285:       rbuf2_i = rbuf2[idex];
286:       ct1     = 2 * rbuf2_i[0] + 1;
287:       jmax    = rbuf2[idex][0];
288:       for (PetscInt j = 1; j <= jmax; j++) {
289:         max     = rbuf2_i[2 * j];
290:         is_no   = rbuf2_i[2 * j - 1];
291:         isz_i   = isz[is_no];
292:         data_i  = data[is_no];
293:         table_i = table[is_no];
294:         for (PetscInt k = 0; k < max; k++, ct1++) {
295:           row = rbuf2_i[ct1];
296:           if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
297:         }
298:         isz[is_no] = isz_i;
299:       }
300:     }
301:     PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE));
302:   }

304:   for (PetscInt i = 0; i < imax; ++i) {
305:     PetscCall(ISCreateGeneral(iscomms[i], isz[i], data[i], PETSC_COPY_VALUES, is + i));
306:     PetscCall(PetscCommDestroy(&iscomms[i]));
307:   }

309:   PetscCall(PetscFree(iscomms));
310:   PetscCall(PetscFree(onodes2));
311:   PetscCall(PetscFree(olengths2));

313:   PetscCall(PetscFree(pa));
314:   if (rbuf2) {
315:     PetscCall(PetscFree(rbuf2[0]));
316:     PetscCall(PetscFree(rbuf2));
317:   }
318:   PetscCall(PetscFree(s_waits1));
319:   PetscCall(PetscFree(r_waits1));
320:   PetscCall(PetscFree(s_waits2));
321:   PetscCall(PetscFree(r_waits2));
322:   PetscCall(PetscFree5(table, data, isz, d_p, t_p));
323:   if (xdata) {
324:     PetscCall(PetscFree(xdata[0]));
325:     PetscCall(PetscFree(xdata));
326:   }
327:   PetscCall(PetscFree(isz1));
328:   PetscFunctionReturn(PETSC_SUCCESS);
329: }

331: /*
332:    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
333:        the work on the local processor.

335:      Inputs:
336:       C      - MAT_MPIBAIJ;
337:       imax - total no of index sets processed at a time;
338:       table  - an array of char - size = Mbs bits.

340:      Output:
341:       isz    - array containing the count of the solution elements corresponding
342:                to each index set;
343:       data   - pointer to the solutions
344: */
345: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C, PetscInt imax, PetscBT *table, PetscInt *isz, PetscInt **data)
346: {
347:   Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
348:   Mat          A = c->A, B = c->B;
349:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
350:   PetscInt     start, end, val, max, rstart, cstart, *ai, *aj;
351:   PetscInt    *bi, *bj, *garray, i, j, k, row, *data_i, isz_i;
352:   PetscBT      table_i;

354:   PetscFunctionBegin;
355:   rstart = c->rstartbs;
356:   cstart = c->cstartbs;
357:   ai     = a->i;
358:   aj     = a->j;
359:   bi     = b->i;
360:   bj     = b->j;
361:   garray = c->garray;

363:   for (i = 0; i < imax; i++) {
364:     data_i  = data[i];
365:     table_i = table[i];
366:     isz_i   = isz[i];
367:     for (j = 0, max = isz[i]; j < max; j++) {
368:       row   = data_i[j] - rstart;
369:       start = ai[row];
370:       end   = ai[row + 1];
371:       for (k = start; k < end; k++) { /* Amat */
372:         val = aj[k] + cstart;
373:         if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
374:       }
375:       start = bi[row];
376:       end   = bi[row + 1];
377:       for (k = start; k < end; k++) { /* Bmat */
378:         val = garray[bj[k]];
379:         if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
380:       }
381:     }
382:     isz[i] = isz_i;
383:   }
384:   PetscFunctionReturn(PETSC_SUCCESS);
385: }

387: /*
388:       MatIncreaseOverlap_MPIBAIJ_Receive - Process the received messages,
389:          and return the output

391:          Input:
392:            C    - the matrix
393:            nrqr - no of messages being processed.
394:            rbuf - an array of pointers to the received requests

396:          Output:
397:            xdata - array of messages to be sent back
398:            isz1  - size of each message

400:   For better efficiency perhaps we should malloc separately each xdata[i],
401: then if a remalloc is required we need only copy the data for that one row
402: rather than all previous rows as it is now where a single large chunk of
403: memory is used.

405: */
406: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C, PetscInt nrqr, PetscInt **rbuf, PetscInt **xdata, PetscInt *isz1)
407: {
408:   Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
409:   Mat          A = c->A, B = c->B;
410:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
411:   PetscInt     rstart, cstart, *ai, *aj, *bi, *bj, *garray, i, j, k;
412:   PetscInt     row, total_sz, ct, ct1, ct2, ct3, mem_estimate, oct2, l, start, end;
413:   PetscInt     val, max1, max2, Mbs, no_malloc = 0, *tmp, new_estimate, ctr;
414:   PetscInt    *rbuf_i, kmax, rbuf_0;
415:   PetscBT      xtable;

417:   PetscFunctionBegin;
418:   Mbs    = c->Mbs;
419:   rstart = c->rstartbs;
420:   cstart = c->cstartbs;
421:   ai     = a->i;
422:   aj     = a->j;
423:   bi     = b->i;
424:   bj     = b->j;
425:   garray = c->garray;

427:   for (i = 0, ct = 0, total_sz = 0; i < nrqr; ++i) {
428:     rbuf_i = rbuf[i];
429:     rbuf_0 = rbuf_i[0];
430:     ct += rbuf_0;
431:     for (j = 1; j <= rbuf_0; j++) total_sz += rbuf_i[2 * j];
432:   }

434:   if (c->Mbs) max1 = ct * (a->nz + b->nz) / c->Mbs;
435:   else max1 = 1;
436:   mem_estimate = 3 * ((total_sz > max1 ? total_sz : max1) + 1);
437:   if (nrqr) {
438:     PetscCall(PetscMalloc1(mem_estimate, &xdata[0]));
439:     ++no_malloc;
440:   }
441:   PetscCall(PetscBTCreate(Mbs, &xtable));
442:   PetscCall(PetscArrayzero(isz1, nrqr));

444:   ct3 = 0;
445:   for (i = 0; i < nrqr; i++) { /* for easch mesg from proc i */
446:     rbuf_i = rbuf[i];
447:     rbuf_0 = rbuf_i[0];
448:     ct1    = 2 * rbuf_0 + 1;
449:     ct2    = ct1;
450:     ct3 += ct1;
451:     for (j = 1; j <= rbuf_0; j++) { /* for each IS from proc i*/
452:       PetscCall(PetscBTMemzero(Mbs, xtable));
453:       oct2 = ct2;
454:       kmax = rbuf_i[2 * j];
455:       for (k = 0; k < kmax; k++, ct1++) {
456:         row = rbuf_i[ct1];
457:         if (!PetscBTLookupSet(xtable, row)) {
458:           if (!(ct3 < mem_estimate)) {
459:             new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
460:             PetscCall(PetscMalloc1(new_estimate, &tmp));
461:             PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
462:             PetscCall(PetscFree(xdata[0]));
463:             xdata[0]     = tmp;
464:             mem_estimate = new_estimate;
465:             ++no_malloc;
466:             for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
467:           }
468:           xdata[i][ct2++] = row;
469:           ct3++;
470:         }
471:       }
472:       for (k = oct2, max2 = ct2; k < max2; k++) {
473:         row   = xdata[i][k] - rstart;
474:         start = ai[row];
475:         end   = ai[row + 1];
476:         for (l = start; l < end; l++) {
477:           val = aj[l] + cstart;
478:           if (!PetscBTLookupSet(xtable, val)) {
479:             if (!(ct3 < mem_estimate)) {
480:               new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
481:               PetscCall(PetscMalloc1(new_estimate, &tmp));
482:               PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
483:               PetscCall(PetscFree(xdata[0]));
484:               xdata[0]     = tmp;
485:               mem_estimate = new_estimate;
486:               ++no_malloc;
487:               for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
488:             }
489:             xdata[i][ct2++] = val;
490:             ct3++;
491:           }
492:         }
493:         start = bi[row];
494:         end   = bi[row + 1];
495:         for (l = start; l < end; l++) {
496:           val = garray[bj[l]];
497:           if (!PetscBTLookupSet(xtable, val)) {
498:             if (!(ct3 < mem_estimate)) {
499:               new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
500:               PetscCall(PetscMalloc1(new_estimate, &tmp));
501:               PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate));
502:               PetscCall(PetscFree(xdata[0]));
503:               xdata[0]     = tmp;
504:               mem_estimate = new_estimate;
505:               ++no_malloc;
506:               for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
507:             }
508:             xdata[i][ct2++] = val;
509:             ct3++;
510:           }
511:         }
512:       }
513:       /* Update the header*/
514:       xdata[i][2 * j]     = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
515:       xdata[i][2 * j - 1] = rbuf_i[2 * j - 1];
516:     }
517:     xdata[i][0] = rbuf_0;
518:     if (i + 1 < nrqr) xdata[i + 1] = xdata[i] + ct2;
519:     isz1[i] = ct2; /* size of each message */
520:   }
521:   PetscCall(PetscBTDestroy(&xtable));
522:   PetscCall(PetscInfo(C, "Allocated %" PetscInt_FMT " bytes, required %" PetscInt_FMT ", no of mallocs = %" PetscInt_FMT "\n", mem_estimate, ct3, no_malloc));
523:   PetscFunctionReturn(PETSC_SUCCESS);
524: }

526: PetscErrorCode MatCreateSubMatrices_MPIBAIJ(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
527: {
528:   IS          *isrow_block, *iscol_block;
529:   Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
530:   PetscInt     nmax, nstages_local, nstages, i, pos, max_no, N = C->cmap->N, bs = C->rmap->bs;
531:   Mat_SeqBAIJ *subc;
532:   Mat_SubSppt *smat;
533:   PetscBool    sym = PETSC_FALSE, flg[2];

535:   PetscFunctionBegin;
536:   PetscCall(PetscObjectTypeCompare((PetscObject)C, MATMPISBAIJ, flg));
537:   if (flg[0]) {
538:     if (isrow == iscol) sym = PETSC_TRUE;
539:     else {
540:       flg[0] = flg[1] = PETSC_TRUE;
541:       for (i = 0; i < ismax; i++) {
542:         if (isrow[i] != iscol[i]) flg[0] = PETSC_FALSE;
543:         PetscCall(ISGetLocalSize(iscol[0], &nmax));
544:         if (nmax == C->cmap->N && flg[1]) PetscCall(ISIdentity(iscol[0], flg + 1));
545:       }
546:       sym = (PetscBool)(flg[0] || flg[1]);
547:     }
548:   }
549:   /* The compression and expansion should be avoided. Doesn't point
550:      out errors, might change the indices, hence buggey */
551:   PetscCall(PetscMalloc2(ismax, &isrow_block, ismax, &iscol_block));
552:   PetscCall(ISCompressIndicesGeneral(C->rmap->N, C->rmap->n, bs, ismax, isrow, isrow_block));
553:   if (isrow == iscol) {
554:     for (i = 0; i < ismax; i++) {
555:       iscol_block[i] = isrow_block[i];
556:       PetscCall(PetscObjectReference((PetscObject)iscol_block[i]));
557:     }
558:   } else PetscCall(ISCompressIndicesGeneral(N, C->cmap->n, bs, ismax, iscol, iscol_block));

560:   /* Determine the number of stages through which submatrices are done */
561:   if (!C->cmap->N) nmax = 20 * 1000000 / sizeof(PetscInt);
562:   else nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt));
563:   if (!nmax) nmax = 1;

565:   if (scall == MAT_INITIAL_MATRIX) {
566:     nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */

568:     /* Make sure every processor loops through the nstages */
569:     PetscCallMPI(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));

571:     /* Allocate memory to hold all the submatrices and dummy submatrices */
572:     PetscCall(PetscCalloc1(ismax + nstages, submat));
573:   } else { /* MAT_REUSE_MATRIX */
574:     if (ismax) {
575:       subc = (Mat_SeqBAIJ *)((*submat)[0]->data);
576:       smat = subc->submatis1;
577:     } else { /* (*submat)[0] is a dummy matrix */
578:       smat = (Mat_SubSppt *)(*submat)[0]->data;
579:     }
580:     PetscCheck(smat, PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "MatCreateSubMatrices(...,MAT_REUSE_MATRIX,...) requires submat");
581:     nstages = smat->nstages;
582:   }

584:   for (i = 0, pos = 0; i < nstages; i++) {
585:     if (pos + nmax <= ismax) max_no = nmax;
586:     else if (pos >= ismax) max_no = 0;
587:     else max_no = ismax - pos;

589:     PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C, max_no, isrow_block + pos, iscol_block + pos, scall, *submat + pos, sym));
590:     if (!max_no) {
591:       if (scall == MAT_INITIAL_MATRIX) { /* submat[pos] is a dummy matrix */
592:         smat          = (Mat_SubSppt *)(*submat)[pos]->data;
593:         smat->nstages = nstages;
594:       }
595:       pos++; /* advance to next dummy matrix if any */
596:     } else pos += max_no;
597:   }

599:   if (scall == MAT_INITIAL_MATRIX && ismax) {
600:     /* save nstages for reuse */
601:     subc          = (Mat_SeqBAIJ *)((*submat)[0]->data);
602:     smat          = subc->submatis1;
603:     smat->nstages = nstages;
604:   }

606:   for (i = 0; i < ismax; i++) {
607:     PetscCall(ISDestroy(&isrow_block[i]));
608:     PetscCall(ISDestroy(&iscol_block[i]));
609:   }
610:   PetscCall(PetscFree2(isrow_block, iscol_block));
611:   PetscFunctionReturn(PETSC_SUCCESS);
612: }

614: /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
615: PetscErrorCode MatCreateSubMatrices_MPIBAIJ_local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats, PetscBool sym)
616: {
617:   Mat_MPIBAIJ     *c = (Mat_MPIBAIJ *)C->data;
618:   Mat              A = c->A;
619:   Mat_SeqBAIJ     *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)c->B->data, *subc;
620:   const PetscInt **icol, **irow;
621:   PetscInt        *nrow, *ncol, start;
622:   PetscMPIInt     *pa, **row2proc, *row2proc_i, proc = -1;
623:   PetscMPIInt      rank, size, tag0, tag2, tag3, tag4, *w1, *w2, *w3, *w4, nrqr, nrqs = 0, *req_source1 = NULL, *req_source2;
624:   PetscInt       **sbuf1, **sbuf2, *sbuf2_i, ct1, ct2, **rbuf1, row;
625:   PetscInt         msz, **ptr = NULL, *req_size = NULL, *ctr = NULL, *tmp = NULL, tcol;
626:   PetscInt       **rbuf3 = NULL, **sbuf_aj, **rbuf2 = NULL, max1, max2;
627:   PetscInt       **lens, is_no, ncols, *cols, mat_i, *mat_j, tmp2, jmax;
628: #if defined(PETSC_USE_CTABLE)
629:   PetscHMapI *cmap, cmap_i = NULL, *rmap, rmap_i;
630: #else
631:   PetscInt **cmap, *cmap_i = NULL, **rmap, *rmap_i;
632: #endif
633:   const PetscInt *irow_i, *icol_i;
634:   PetscInt        ctr_j, *sbuf1_j, *sbuf_aj_i, *rbuf1_i, kmax, *lens_i, jcnt;
635:   MPI_Request    *s_waits1, *r_waits1, *s_waits2, *r_waits2, *r_waits3;
636:   MPI_Request    *r_waits4, *s_waits3, *s_waits4;
637:   MPI_Comm        comm;
638:   PetscScalar   **rbuf4, *rbuf4_i = NULL, **sbuf_aa, *vals, *mat_a = NULL, *imat_a = NULL, *sbuf_aa_i;
639:   PetscMPIInt    *onodes1, *olengths1, end;
640:   PetscInt       *imat_ilen, *imat_j, *imat_i;
641:   Mat_SubSppt    *smat_i;
642:   PetscBool      *issorted, colflag, iscsorted = PETSC_TRUE;
643:   PetscInt       *sbuf1_i, *rbuf2_i, *rbuf3_i, ilen;
644:   PetscInt        bs = C->rmap->bs, bs2 = c->bs2, rstart = c->rstartbs;
645:   PetscBool       ijonly = c->ijonly; /* private flag indicates only matrix data structures are requested */
646:   PetscInt        nzA, nzB, *a_i = a->i, *b_i = b->i, *a_j = a->j, *b_j = b->j, ctmp, imark, *cworkA, *cworkB;
647:   PetscScalar    *vworkA = NULL, *vworkB = NULL, *a_a = a->a, *b_a = b->a;
648:   PetscInt        cstart = c->cstartbs, *bmap = c->garray;
649:   PetscBool      *allrows, *allcolumns;

651:   PetscFunctionBegin;
652:   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
653:   size = c->size;
654:   rank = c->rank;

656:   PetscCall(PetscMalloc5(ismax, &row2proc, ismax, &cmap, ismax, &rmap, ismax + 1, &allcolumns, ismax, &allrows));
657:   PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, ismax, &issorted));

659:   for (PetscInt i = 0; i < ismax; i++) {
660:     PetscCall(ISSorted(iscol[i], &issorted[i]));
661:     if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */
662:     PetscCall(ISSorted(isrow[i], &issorted[i]));

664:     /* Check for special case: allcolumns */
665:     PetscCall(ISIdentity(iscol[i], &colflag));
666:     PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));

668:     if (colflag && ncol[i] == c->Nbs) {
669:       allcolumns[i] = PETSC_TRUE;
670:       icol[i]       = NULL;
671:     } else {
672:       allcolumns[i] = PETSC_FALSE;
673:       PetscCall(ISGetIndices(iscol[i], &icol[i]));
674:     }

676:     /* Check for special case: allrows */
677:     PetscCall(ISIdentity(isrow[i], &colflag));
678:     PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
679:     if (colflag && nrow[i] == c->Mbs) {
680:       allrows[i] = PETSC_TRUE;
681:       irow[i]    = NULL;
682:     } else {
683:       allrows[i] = PETSC_FALSE;
684:       PetscCall(ISGetIndices(isrow[i], &irow[i]));
685:     }
686:   }

688:   if (scall == MAT_REUSE_MATRIX) {
689:     /* Assumes new rows are same length as the old rows */
690:     for (PetscInt i = 0; i < ismax; i++) {
691:       subc = (Mat_SeqBAIJ *)submats[i]->data;

693:       PetscCheck(subc->mbs == nrow[i] && subc->nbs == ncol[i], PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");

695:       /* Initial matrix as if empty */
696:       PetscCall(PetscArrayzero(subc->ilen, subc->mbs));

698:       /* Initial matrix as if empty */
699:       submats[i]->factortype = C->factortype;

701:       smat_i = subc->submatis1;

703:       nrqs        = smat_i->nrqs;
704:       nrqr        = smat_i->nrqr;
705:       rbuf1       = smat_i->rbuf1;
706:       rbuf2       = smat_i->rbuf2;
707:       rbuf3       = smat_i->rbuf3;
708:       req_source2 = smat_i->req_source2;

710:       sbuf1 = smat_i->sbuf1;
711:       sbuf2 = smat_i->sbuf2;
712:       ptr   = smat_i->ptr;
713:       tmp   = smat_i->tmp;
714:       ctr   = smat_i->ctr;

716:       pa          = smat_i->pa;
717:       req_size    = smat_i->req_size;
718:       req_source1 = smat_i->req_source1;

720:       allcolumns[i] = smat_i->allcolumns;
721:       allrows[i]    = smat_i->allrows;
722:       row2proc[i]   = smat_i->row2proc;
723:       rmap[i]       = smat_i->rmap;
724:       cmap[i]       = smat_i->cmap;
725:     }

727:     if (!ismax) { /* Get dummy submatrices and retrieve struct submatis1 */
728:       PetscCheck(submats[0], PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "submats are null, cannot reuse");
729:       smat_i = (Mat_SubSppt *)submats[0]->data;

731:       nrqs        = smat_i->nrqs;
732:       nrqr        = smat_i->nrqr;
733:       rbuf1       = smat_i->rbuf1;
734:       rbuf2       = smat_i->rbuf2;
735:       rbuf3       = smat_i->rbuf3;
736:       req_source2 = smat_i->req_source2;

738:       sbuf1 = smat_i->sbuf1;
739:       sbuf2 = smat_i->sbuf2;
740:       ptr   = smat_i->ptr;
741:       tmp   = smat_i->tmp;
742:       ctr   = smat_i->ctr;

744:       pa          = smat_i->pa;
745:       req_size    = smat_i->req_size;
746:       req_source1 = smat_i->req_source1;

748:       allcolumns[0] = PETSC_FALSE;
749:     }
750:   } else { /* scall == MAT_INITIAL_MATRIX */
751:     /* Get some new tags to keep the communication clean */
752:     PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));
753:     PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag3));

755:     /* evaluate communication - mesg to who, length of mesg, and buffer space
756:      required. Based on this, buffers are allocated, and data copied into them*/
757:     PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4)); /* mesg size, initialize work vectors */

759:     for (PetscInt i = 0; i < ismax; i++) {
760:       jmax   = nrow[i];
761:       irow_i = irow[i];

763:       PetscCall(PetscMalloc1(jmax, &row2proc_i));
764:       row2proc[i] = row2proc_i;

766:       if (issorted[i]) proc = 0;
767:       for (PetscInt j = 0; j < jmax; j++) {
768:         if (!issorted[i]) proc = 0;
769:         if (allrows[i]) row = j;
770:         else row = irow_i[j];

772:         while (row >= c->rangebs[proc + 1]) proc++;
773:         w4[proc]++;
774:         row2proc_i[j] = proc; /* map row index to proc */
775:       }
776:       for (PetscMPIInt j = 0; j < size; j++) {
777:         if (w4[j]) {
778:           w1[j] += w4[j];
779:           w3[j]++;
780:           w4[j] = 0;
781:         }
782:       }
783:     }

785:     nrqs     = 0; /* no of outgoing messages */
786:     msz      = 0; /* total mesg length (for all procs) */
787:     w1[rank] = 0; /* no mesg sent to self */
788:     w3[rank] = 0;
789:     for (PetscMPIInt i = 0; i < size; i++) {
790:       if (w1[i]) {
791:         w2[i] = 1;
792:         nrqs++;
793:       } /* there exists a message to proc i */
794:     }
795:     PetscCall(PetscMalloc1(nrqs, &pa)); /*(proc -array)*/
796:     for (PetscMPIInt i = 0, j = 0; i < size; i++) {
797:       if (w1[i]) { pa[j++] = i; }
798:     }

800:     /* Each message would have a header = 1 + 2*(no of IS) + data */
801:     for (PetscMPIInt i = 0; i < nrqs; i++) {
802:       PetscMPIInt j = pa[i];
803:       w1[j] += w2[j] + 2 * w3[j];
804:       msz += w1[j];
805:     }
806:     PetscCall(PetscInfo(0, "Number of outgoing messages %d Total message length %" PetscInt_FMT "\n", nrqs, msz));

808:     /* Determine the number of messages to expect, their lengths, from from-ids */
809:     PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr));
810:     PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1));

812:     /* Now post the Irecvs corresponding to these messages */
813:     PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag0));
814:     PetscCall(PetscPostIrecvInt(comm, tag0, nrqr, onodes1, olengths1, &rbuf1, &r_waits1));

816:     /* Allocate Memory for outgoing messages */
817:     PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
818:     PetscCall(PetscArrayzero(sbuf1, size));
819:     PetscCall(PetscArrayzero(ptr, size));

821:     {
822:       PetscInt   *iptr = tmp;
823:       PetscMPIInt k    = 0;
824:       for (PetscMPIInt i = 0; i < nrqs; i++) {
825:         PetscMPIInt j = pa[i];
826:         iptr += k;
827:         sbuf1[j] = iptr;
828:         k        = w1[j];
829:       }
830:     }

832:     /* Form the outgoing messages. Initialize the header space */
833:     for (PetscMPIInt i = 0; i < nrqs; i++) {
834:       PetscMPIInt j = pa[i];

836:       sbuf1[j][0] = 0;
837:       PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
838:       ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
839:     }

841:     /* Parse the isrow and copy data into outbuf */
842:     for (PetscInt i = 0; i < ismax; i++) {
843:       row2proc_i = row2proc[i];
844:       PetscCall(PetscArrayzero(ctr, size));
845:       irow_i = irow[i];
846:       jmax   = nrow[i];
847:       for (PetscInt j = 0; j < jmax; j++) { /* parse the indices of each IS */
848:         proc = row2proc_i[j];
849:         if (allrows[i]) row = j;
850:         else row = irow_i[j];

852:         if (proc != rank) { /* copy to the outgoing buf*/
853:           ctr[proc]++;
854:           *ptr[proc] = row;
855:           ptr[proc]++;
856:         }
857:       }
858:       /* Update the headers for the current IS */
859:       for (PetscMPIInt j = 0; j < size; j++) { /* Can Optimise this loop too */
860:         if ((ctr_j = ctr[j])) {
861:           PetscInt k;

863:           sbuf1_j            = sbuf1[j];
864:           k                  = ++sbuf1_j[0];
865:           sbuf1_j[2 * k]     = ctr_j;
866:           sbuf1_j[2 * k - 1] = i;
867:         }
868:       }
869:     }

871:     /*  Now  post the sends */
872:     PetscCall(PetscMalloc1(nrqs, &s_waits1));
873:     for (PetscMPIInt i = 0; i < nrqs; ++i) {
874:       PetscMPIInt j = pa[i];
875:       PetscCallMPI(MPIU_Isend(sbuf1[j], w1[j], MPIU_INT, j, tag0, comm, s_waits1 + i));
876:     }

878:     /* Post Receives to capture the buffer size */
879:     PetscCall(PetscMalloc1(nrqs, &r_waits2));
880:     PetscCall(PetscMalloc3(nrqs, &req_source2, nrqs, &rbuf2, nrqs, &rbuf3));
881:     if (nrqs) rbuf2[0] = tmp + msz;
882:     for (PetscMPIInt i = 1; i < nrqs; ++i) rbuf2[i] = rbuf2[i - 1] + w1[pa[i - 1]];
883:     for (PetscMPIInt i = 0; i < nrqs; ++i) {
884:       PetscMPIInt j = pa[i];
885:       PetscCallMPI(MPIU_Irecv(rbuf2[i], w1[j], MPIU_INT, j, tag2, comm, r_waits2 + i));
886:     }

888:     /* Send to other procs the buf size they should allocate */
889:     /* Receive messages*/
890:     PetscCall(PetscMalloc1(nrqr, &s_waits2));
891:     PetscCall(PetscMalloc3(nrqr, &sbuf2, nrqr, &req_size, nrqr, &req_source1));

893:     PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE));
894:     for (PetscMPIInt i = 0; i < nrqr; ++i) {
895:       req_size[i] = 0;
896:       rbuf1_i     = rbuf1[i];
897:       start       = 2 * rbuf1_i[0] + 1;
898:       end         = olengths1[i];
899:       PetscCall(PetscMalloc1(end, &sbuf2[i]));
900:       sbuf2_i = sbuf2[i];
901:       for (PetscInt j = start; j < end; j++) {
902:         row        = rbuf1_i[j] - rstart;
903:         ncols      = a_i[row + 1] - a_i[row] + b_i[row + 1] - b_i[row];
904:         sbuf2_i[j] = ncols;
905:         req_size[i] += ncols;
906:       }
907:       req_source1[i] = onodes1[i];
908:       /* form the header */
909:       sbuf2_i[0] = req_size[i];
910:       for (PetscInt j = 1; j < start; j++) sbuf2_i[j] = rbuf1_i[j];

912:       PetscCallMPI(MPIU_Isend(sbuf2_i, end, MPIU_INT, req_source1[i], tag2, comm, s_waits2 + i));
913:     }
914:     PetscCall(PetscFree(onodes1));
915:     PetscCall(PetscFree(olengths1));

917:     PetscCall(PetscFree(r_waits1));
918:     PetscCall(PetscFree4(w1, w2, w3, w4));

920:     /* Receive messages*/
921:     PetscCall(PetscMalloc1(nrqs, &r_waits3));

923:     PetscCallMPI(MPI_Waitall(nrqs, r_waits2, MPI_STATUSES_IGNORE));
924:     for (PetscMPIInt i = 0; i < nrqs; ++i) {
925:       PetscCall(PetscMalloc1(rbuf2[i][0], &rbuf3[i]));
926:       req_source2[i] = pa[i];
927:       PetscCallMPI(MPIU_Irecv(rbuf3[i], rbuf2[i][0], MPIU_INT, req_source2[i], tag3, comm, r_waits3 + i));
928:     }
929:     PetscCall(PetscFree(r_waits2));

931:     /* Wait on sends1 and sends2 */
932:     PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE));
933:     PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE));
934:     PetscCall(PetscFree(s_waits1));
935:     PetscCall(PetscFree(s_waits2));

937:     /* Now allocate sending buffers for a->j, and send them off */
938:     PetscCall(PetscMalloc1(nrqr, &sbuf_aj));
939:     jcnt = 0;
940:     for (PetscMPIInt i = 0; i < nrqr; i++) jcnt += req_size[i];
941:     if (nrqr) PetscCall(PetscMalloc1(jcnt, &sbuf_aj[0]));
942:     for (PetscMPIInt i = 1; i < nrqr; i++) sbuf_aj[i] = sbuf_aj[i - 1] + req_size[i - 1];

944:     PetscCall(PetscMalloc1(nrqr, &s_waits3));
945:     {
946:       for (PetscMPIInt i = 0; i < nrqr; i++) {
947:         rbuf1_i   = rbuf1[i];
948:         sbuf_aj_i = sbuf_aj[i];
949:         ct1       = 2 * rbuf1_i[0] + 1;
950:         ct2       = 0;
951:         for (PetscInt j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
952:           kmax = rbuf1[i][2 * j];
953:           for (PetscInt k = 0; k < kmax; k++, ct1++) {
954:             PetscInt l;
955:             row    = rbuf1_i[ct1] - rstart;
956:             nzA    = a_i[row + 1] - a_i[row];
957:             nzB    = b_i[row + 1] - b_i[row];
958:             ncols  = nzA + nzB;
959:             cworkA = PetscSafePointerPlusOffset(a_j, a_i[row]);
960:             cworkB = PetscSafePointerPlusOffset(b_j, b_i[row]);

962:             /* load the column indices for this row into cols */
963:             cols = sbuf_aj_i + ct2;
964:             for (l = 0; l < nzB; l++) {
965:               if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
966:               else break;
967:             }
968:             imark = l;
969:             for (l = 0; l < nzA; l++) cols[imark + l] = cstart + cworkA[l];
970:             for (l = imark; l < nzB; l++) cols[nzA + l] = bmap[cworkB[l]];
971:             ct2 += ncols;
972:           }
973:         }
974:         PetscCallMPI(MPIU_Isend(sbuf_aj_i, req_size[i], MPIU_INT, req_source1[i], tag3, comm, s_waits3 + i));
975:       }
976:     }

978:     /* create col map: global col of C -> local col of submatrices */
979: #if defined(PETSC_USE_CTABLE)
980:     for (PetscInt i = 0; i < ismax; i++) {
981:       if (!allcolumns[i]) {
982:         PetscCall(PetscHMapICreateWithSize(ncol[i], cmap + i));

984:         jmax   = ncol[i];
985:         icol_i = icol[i];
986:         cmap_i = cmap[i];
987:         for (PetscInt j = 0; j < jmax; j++) PetscCall(PetscHMapISet(cmap[i], icol_i[j] + 1, j + 1));
988:       } else cmap[i] = NULL;
989:     }
990: #else
991:     for (PetscInt i = 0; i < ismax; i++) {
992:       if (!allcolumns[i]) {
993:         PetscCall(PetscCalloc1(c->Nbs, &cmap[i]));
994:         jmax   = ncol[i];
995:         icol_i = icol[i];
996:         cmap_i = cmap[i];
997:         for (PetscInt j = 0; j < jmax; j++) cmap_i[icol_i[j]] = j + 1;
998:       } else cmap[i] = NULL;
999:     }
1000: #endif

1002:     /* Create lens which is required for MatCreate... */
1003:     jcnt = 0;
1004:     for (PetscInt i = 0; i < ismax; i++) jcnt += nrow[i];
1005:     PetscCall(PetscMalloc1(ismax, &lens));
1006:     if (ismax) PetscCall(PetscCalloc1(jcnt, &lens[0]));
1007:     for (PetscInt i = 1; i < ismax; i++) lens[i] = PetscSafePointerPlusOffset(lens[i - 1], nrow[i - 1]);

1009:     /* Update lens from local data */
1010:     for (PetscInt i = 0; i < ismax; i++) {
1011:       row2proc_i = row2proc[i];
1012:       jmax       = nrow[i];
1013:       if (!allcolumns[i]) cmap_i = cmap[i];
1014:       irow_i = irow[i];
1015:       lens_i = lens[i];
1016:       for (PetscInt j = 0; j < jmax; j++) {
1017:         if (allrows[i]) row = j;
1018:         else row = irow_i[j]; /* global blocked row of C */

1020:         proc = row2proc_i[j];
1021:         if (proc == rank) {
1022:           /* Get indices from matA and then from matB */
1023: #if defined(PETSC_USE_CTABLE)
1024:           PetscInt tt;
1025: #endif
1026:           row    = row - rstart;
1027:           nzA    = a_i[row + 1] - a_i[row];
1028:           nzB    = b_i[row + 1] - b_i[row];
1029:           cworkA = a_j + a_i[row];
1030:           cworkB = PetscSafePointerPlusOffset(b_j, b_i[row]);

1032:           if (!allcolumns[i]) {
1033: #if defined(PETSC_USE_CTABLE)
1034:             for (PetscInt k = 0; k < nzA; k++) {
1035:               PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[k] + 1, 0, &tt));
1036:               if (tt) lens_i[j]++;
1037:             }
1038:             for (PetscInt k = 0; k < nzB; k++) {
1039:               PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[k]] + 1, 0, &tt));
1040:               if (tt) lens_i[j]++;
1041:             }

1043: #else
1044:             for (PetscInt k = 0; k < nzA; k++) {
1045:               if (cmap_i[cstart + cworkA[k]]) lens_i[j]++;
1046:             }
1047:             for (PetscInt k = 0; k < nzB; k++) {
1048:               if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++;
1049:             }
1050: #endif
1051:           } else { /* allcolumns */
1052:             lens_i[j] = nzA + nzB;
1053:           }
1054:         }
1055:       }
1056:     }

1058:     /* Create row map: global row of C -> local row of submatrices */
1059:     for (PetscInt i = 0; i < ismax; i++) {
1060:       if (!allrows[i]) {
1061: #if defined(PETSC_USE_CTABLE)
1062:         PetscCall(PetscHMapICreateWithSize(nrow[i], rmap + i));
1063:         irow_i = irow[i];
1064:         jmax   = nrow[i];
1065:         for (PetscInt j = 0; j < jmax; j++) {
1066:           if (allrows[i]) {
1067:             PetscCall(PetscHMapISet(rmap[i], j + 1, j + 1));
1068:           } else {
1069:             PetscCall(PetscHMapISet(rmap[i], irow_i[j] + 1, j + 1));
1070:           }
1071:         }
1072: #else
1073:         PetscCall(PetscCalloc1(c->Mbs, &rmap[i]));
1074:         rmap_i = rmap[i];
1075:         irow_i = irow[i];
1076:         jmax   = nrow[i];
1077:         for (PetscInt j = 0; j < jmax; j++) {
1078:           if (allrows[i]) rmap_i[j] = j;
1079:           else rmap_i[irow_i[j]] = j;
1080:         }
1081: #endif
1082:       } else rmap[i] = NULL;
1083:     }

1085:     /* Update lens from offproc data */
1086:     {
1087:       PetscInt *rbuf2_i, *rbuf3_i, *sbuf1_i;

1089:       PetscCallMPI(MPI_Waitall(nrqs, r_waits3, MPI_STATUSES_IGNORE));
1090:       for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1091:         sbuf1_i = sbuf1[pa[tmp2]];
1092:         jmax    = sbuf1_i[0];
1093:         ct1     = 2 * jmax + 1;
1094:         ct2     = 0;
1095:         rbuf2_i = rbuf2[tmp2];
1096:         rbuf3_i = rbuf3[tmp2];
1097:         for (PetscInt j = 1; j <= jmax; j++) {
1098:           is_no  = sbuf1_i[2 * j - 1];
1099:           max1   = sbuf1_i[2 * j];
1100:           lens_i = lens[is_no];
1101:           if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1102:           rmap_i = rmap[is_no];
1103:           for (PetscInt k = 0; k < max1; k++, ct1++) {
1104:             if (allrows[is_no]) {
1105:               row = sbuf1_i[ct1];
1106:             } else {
1107: #if defined(PETSC_USE_CTABLE)
1108:               PetscCall(PetscHMapIGetWithDefault(rmap_i, sbuf1_i[ct1] + 1, 0, &row));
1109:               row--;
1110:               PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1111: #else
1112:               row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1113: #endif
1114:             }
1115:             max2 = rbuf2_i[ct1];
1116:             for (PetscInt l = 0; l < max2; l++, ct2++) {
1117:               if (!allcolumns[is_no]) {
1118: #if defined(PETSC_USE_CTABLE)
1119:                 PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol));
1120: #else
1121:                 tcol = cmap_i[rbuf3_i[ct2]];
1122: #endif
1123:                 if (tcol) lens_i[row]++;
1124:               } else {         /* allcolumns */
1125:                 lens_i[row]++; /* lens_i[row] += max2 ? */
1126:               }
1127:             }
1128:           }
1129:         }
1130:       }
1131:     }
1132:     PetscCall(PetscFree(r_waits3));
1133:     PetscCallMPI(MPI_Waitall(nrqr, s_waits3, MPI_STATUSES_IGNORE));
1134:     PetscCall(PetscFree(s_waits3));

1136:     /* Create the submatrices */
1137:     for (PetscInt i = 0; i < ismax; i++) {
1138:       PetscInt bs_tmp;
1139:       if (ijonly) bs_tmp = 1;
1140:       else bs_tmp = bs;

1142:       PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
1143:       PetscCall(MatSetSizes(submats[i], nrow[i] * bs_tmp, ncol[i] * bs_tmp, PETSC_DETERMINE, PETSC_DETERMINE));

1145:       PetscCall(MatSetType(submats[i], sym ? ((PetscObject)A)->type_name : MATSEQBAIJ));
1146:       PetscCall(MatSeqBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i]));
1147:       PetscCall(MatSeqSBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i])); /* this subroutine is used by SBAIJ routines */

1149:       /* create struct Mat_SubSppt and attached it to submat */
1150:       PetscCall(PetscNew(&smat_i));
1151:       subc            = (Mat_SeqBAIJ *)submats[i]->data;
1152:       subc->submatis1 = smat_i;

1154:       smat_i->destroy          = submats[i]->ops->destroy;
1155:       submats[i]->ops->destroy = MatDestroySubMatrix_SeqBAIJ;
1156:       submats[i]->factortype   = C->factortype;

1158:       smat_i->id          = i;
1159:       smat_i->nrqs        = nrqs;
1160:       smat_i->nrqr        = nrqr;
1161:       smat_i->rbuf1       = rbuf1;
1162:       smat_i->rbuf2       = rbuf2;
1163:       smat_i->rbuf3       = rbuf3;
1164:       smat_i->sbuf2       = sbuf2;
1165:       smat_i->req_source2 = req_source2;

1167:       smat_i->sbuf1 = sbuf1;
1168:       smat_i->ptr   = ptr;
1169:       smat_i->tmp   = tmp;
1170:       smat_i->ctr   = ctr;

1172:       smat_i->pa          = pa;
1173:       smat_i->req_size    = req_size;
1174:       smat_i->req_source1 = req_source1;

1176:       smat_i->allcolumns = allcolumns[i];
1177:       smat_i->allrows    = allrows[i];
1178:       smat_i->singleis   = PETSC_FALSE;
1179:       smat_i->row2proc   = row2proc[i];
1180:       smat_i->rmap       = rmap[i];
1181:       smat_i->cmap       = cmap[i];
1182:     }

1184:     if (!ismax) { /* Create dummy submats[0] for reuse struct subc */
1185:       PetscCall(MatCreate(PETSC_COMM_SELF, &submats[0]));
1186:       PetscCall(MatSetSizes(submats[0], 0, 0, PETSC_DETERMINE, PETSC_DETERMINE));
1187:       PetscCall(MatSetType(submats[0], MATDUMMY));

1189:       /* create struct Mat_SubSppt and attached it to submat */
1190:       PetscCall(PetscNew(&smat_i));
1191:       submats[0]->data = (void *)smat_i;

1193:       smat_i->destroy          = submats[0]->ops->destroy;
1194:       submats[0]->ops->destroy = MatDestroySubMatrix_Dummy;
1195:       submats[0]->factortype   = C->factortype;

1197:       smat_i->id          = 0;
1198:       smat_i->nrqs        = nrqs;
1199:       smat_i->nrqr        = nrqr;
1200:       smat_i->rbuf1       = rbuf1;
1201:       smat_i->rbuf2       = rbuf2;
1202:       smat_i->rbuf3       = rbuf3;
1203:       smat_i->sbuf2       = sbuf2;
1204:       smat_i->req_source2 = req_source2;

1206:       smat_i->sbuf1 = sbuf1;
1207:       smat_i->ptr   = ptr;
1208:       smat_i->tmp   = tmp;
1209:       smat_i->ctr   = ctr;

1211:       smat_i->pa          = pa;
1212:       smat_i->req_size    = req_size;
1213:       smat_i->req_source1 = req_source1;

1215:       smat_i->allcolumns = PETSC_FALSE;
1216:       smat_i->singleis   = PETSC_FALSE;
1217:       smat_i->row2proc   = NULL;
1218:       smat_i->rmap       = NULL;
1219:       smat_i->cmap       = NULL;
1220:     }

1222:     if (ismax) PetscCall(PetscFree(lens[0]));
1223:     PetscCall(PetscFree(lens));
1224:     if (sbuf_aj) {
1225:       PetscCall(PetscFree(sbuf_aj[0]));
1226:       PetscCall(PetscFree(sbuf_aj));
1227:     }

1229:   } /* endof scall == MAT_INITIAL_MATRIX */

1231:   /* Post recv matrix values */
1232:   if (!ijonly) {
1233:     PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag4));
1234:     PetscCall(PetscMalloc1(nrqs, &rbuf4));
1235:     PetscCall(PetscMalloc1(nrqs, &r_waits4));
1236:     for (PetscMPIInt i = 0; i < nrqs; ++i) {
1237:       PetscCall(PetscMalloc1(rbuf2[i][0] * bs2, &rbuf4[i]));
1238:       PetscCallMPI(MPIU_Irecv(rbuf4[i], rbuf2[i][0] * bs2, MPIU_SCALAR, req_source2[i], tag4, comm, r_waits4 + i));
1239:     }

1241:     /* Allocate sending buffers for a->a, and send them off */
1242:     PetscCall(PetscMalloc1(nrqr, &sbuf_aa));
1243:     jcnt = 0;
1244:     for (PetscMPIInt i = 0; i < nrqr; i++) jcnt += req_size[i];
1245:     if (nrqr) PetscCall(PetscMalloc1(jcnt * bs2, &sbuf_aa[0]));
1246:     for (PetscMPIInt i = 1; i < nrqr; i++) sbuf_aa[i] = sbuf_aa[i - 1] + req_size[i - 1] * bs2;

1248:     PetscCall(PetscMalloc1(nrqr, &s_waits4));

1250:     for (PetscMPIInt i = 0; i < nrqr; i++) {
1251:       rbuf1_i   = rbuf1[i];
1252:       sbuf_aa_i = sbuf_aa[i];
1253:       ct1       = 2 * rbuf1_i[0] + 1;
1254:       ct2       = 0;
1255:       for (PetscInt j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
1256:         kmax = rbuf1_i[2 * j];
1257:         for (PetscInt k = 0; k < kmax; k++, ct1++) {
1258:           PetscInt l;

1260:           row    = rbuf1_i[ct1] - rstart;
1261:           nzA    = a_i[row + 1] - a_i[row];
1262:           nzB    = b_i[row + 1] - b_i[row];
1263:           ncols  = nzA + nzB;
1264:           cworkB = PetscSafePointerPlusOffset(b_j, b_i[row]);
1265:           vworkA = PetscSafePointerPlusOffset(a_a, a_i[row] * bs2);
1266:           vworkB = PetscSafePointerPlusOffset(b_a, b_i[row] * bs2);

1268:           /* load the column values for this row into vals*/
1269:           vals = sbuf_aa_i + ct2 * bs2;
1270:           for (l = 0; l < nzB; l++) {
1271:             if ((bmap[cworkB[l]]) < cstart) {
1272:               PetscCall(PetscArraycpy(vals + l * bs2, vworkB + l * bs2, bs2));
1273:             } else break;
1274:           }
1275:           imark = l;
1276:           for (l = 0; l < nzA; l++) PetscCall(PetscArraycpy(vals + (imark + l) * bs2, vworkA + l * bs2, bs2));
1277:           for (l = imark; l < nzB; l++) PetscCall(PetscArraycpy(vals + (nzA + l) * bs2, vworkB + l * bs2, bs2));

1279:           ct2 += ncols;
1280:         }
1281:       }
1282:       PetscCallMPI(MPIU_Isend(sbuf_aa_i, req_size[i] * bs2, MPIU_SCALAR, req_source1[i], tag4, comm, s_waits4 + i));
1283:     }
1284:   }

1286:   /* Assemble the matrices */
1287:   /* First assemble the local rows */
1288:   for (PetscInt i = 0; i < ismax; i++) {
1289:     row2proc_i = row2proc[i];
1290:     subc       = (Mat_SeqBAIJ *)submats[i]->data;
1291:     imat_ilen  = subc->ilen;
1292:     imat_j     = subc->j;
1293:     imat_i     = subc->i;
1294:     imat_a     = subc->a;

1296:     if (!allcolumns[i]) cmap_i = cmap[i];
1297:     rmap_i = rmap[i];
1298:     irow_i = irow[i];
1299:     jmax   = nrow[i];
1300:     for (PetscInt j = 0; j < jmax; j++) {
1301:       if (allrows[i]) row = j;
1302:       else row = irow_i[j];
1303:       proc = row2proc_i[j];

1305:       if (proc == rank) {
1306:         row    = row - rstart;
1307:         nzA    = a_i[row + 1] - a_i[row];
1308:         nzB    = b_i[row + 1] - b_i[row];
1309:         cworkA = a_j + a_i[row];
1310:         cworkB = PetscSafePointerPlusOffset(b_j, b_i[row]);
1311:         if (!ijonly) {
1312:           vworkA = a_a + a_i[row] * bs2;
1313:           vworkB = PetscSafePointerPlusOffset(b_a, b_i[row] * bs2);
1314:         }

1316:         if (allrows[i]) {
1317:           row = row + rstart;
1318:         } else {
1319: #if defined(PETSC_USE_CTABLE)
1320:           PetscCall(PetscHMapIGetWithDefault(rmap_i, row + rstart + 1, 0, &row));
1321:           row--;

1323:           PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1324: #else
1325:           row = rmap_i[row + rstart];
1326: #endif
1327:         }
1328:         mat_i = imat_i[row];
1329:         if (!ijonly) mat_a = PetscSafePointerPlusOffset(imat_a, mat_i * bs2);
1330:         mat_j = PetscSafePointerPlusOffset(imat_j, mat_i);
1331:         ilen  = imat_ilen[row];

1333:         /* load the column indices for this row into cols*/
1334:         if (!allcolumns[i]) {
1335:           PetscInt l;

1337:           for (l = 0; l < nzB; l++) {
1338:             if ((ctmp = bmap[cworkB[l]]) < cstart) {
1339: #if defined(PETSC_USE_CTABLE)
1340:               PetscCall(PetscHMapIGetWithDefault(cmap_i, ctmp + 1, 0, &tcol));
1341:               if (tcol) {
1342: #else
1343:               if ((tcol = cmap_i[ctmp])) {
1344: #endif
1345:                 *mat_j++ = tcol - 1;
1346:                 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1347:                 mat_a += bs2;
1348:                 ilen++;
1349:               }
1350:             } else break;
1351:           }
1352:           imark = l;
1353:           for (PetscInt l = 0; l < nzA; l++) {
1354: #if defined(PETSC_USE_CTABLE)
1355:             PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[l] + 1, 0, &tcol));
1356:             if (tcol) {
1357: #else
1358:             if ((tcol = cmap_i[cstart + cworkA[l]])) {
1359: #endif
1360:               *mat_j++ = tcol - 1;
1361:               if (!ijonly) {
1362:                 PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2));
1363:                 mat_a += bs2;
1364:               }
1365:               ilen++;
1366:             }
1367:           }
1368:           for (l = imark; l < nzB; l++) {
1369: #if defined(PETSC_USE_CTABLE)
1370:             PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[l]] + 1, 0, &tcol));
1371:             if (tcol) {
1372: #else
1373:             if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1374: #endif
1375:               *mat_j++ = tcol - 1;
1376:               if (!ijonly) {
1377:                 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1378:                 mat_a += bs2;
1379:               }
1380:               ilen++;
1381:             }
1382:           }
1383:         } else { /* allcolumns */
1384:           PetscInt l;
1385:           for (l = 0; l < nzB; l++) {
1386:             if ((ctmp = bmap[cworkB[l]]) < cstart) {
1387:               *mat_j++ = ctmp;
1388:               PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1389:               mat_a += bs2;
1390:               ilen++;
1391:             } else break;
1392:           }
1393:           imark = l;
1394:           for (l = 0; l < nzA; l++) {
1395:             *mat_j++ = cstart + cworkA[l];
1396:             if (!ijonly) {
1397:               PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2));
1398:               mat_a += bs2;
1399:             }
1400:             ilen++;
1401:           }
1402:           for (l = imark; l < nzB; l++) {
1403:             *mat_j++ = bmap[cworkB[l]];
1404:             if (!ijonly) {
1405:               PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2));
1406:               mat_a += bs2;
1407:             }
1408:             ilen++;
1409:           }
1410:         }
1411:         imat_ilen[row] = ilen;
1412:       }
1413:     }
1414:   }

1416:   /* Now assemble the off proc rows */
1417:   if (!ijonly) PetscCallMPI(MPI_Waitall(nrqs, r_waits4, MPI_STATUSES_IGNORE));
1418:   for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1419:     sbuf1_i = sbuf1[pa[tmp2]];
1420:     jmax    = sbuf1_i[0];
1421:     ct1     = 2 * jmax + 1;
1422:     ct2     = 0;
1423:     rbuf2_i = rbuf2[tmp2];
1424:     rbuf3_i = rbuf3[tmp2];
1425:     if (!ijonly) rbuf4_i = rbuf4[tmp2];
1426:     for (PetscInt j = 1; j <= jmax; j++) {
1427:       is_no  = sbuf1_i[2 * j - 1];
1428:       rmap_i = rmap[is_no];
1429:       if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1430:       subc      = (Mat_SeqBAIJ *)submats[is_no]->data;
1431:       imat_ilen = subc->ilen;
1432:       imat_j    = subc->j;
1433:       imat_i    = subc->i;
1434:       if (!ijonly) imat_a = subc->a;
1435:       max1 = sbuf1_i[2 * j];
1436:       for (PetscInt k = 0; k < max1; k++, ct1++) { /* for each recved block row */
1437:         row = sbuf1_i[ct1];

1439:         if (allrows[is_no]) {
1440:           row = sbuf1_i[ct1];
1441:         } else {
1442: #if defined(PETSC_USE_CTABLE)
1443:           PetscCall(PetscHMapIGetWithDefault(rmap_i, row + 1, 0, &row));
1444:           row--;
1445:           PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table");
1446: #else
1447:           row = rmap_i[row];
1448: #endif
1449:         }
1450:         ilen  = imat_ilen[row];
1451:         mat_i = imat_i[row];
1452:         if (!ijonly) mat_a = imat_a + mat_i * bs2;
1453:         mat_j = imat_j + mat_i;
1454:         max2  = rbuf2_i[ct1];
1455:         if (!allcolumns[is_no]) {
1456:           for (PetscInt l = 0; l < max2; l++, ct2++) {
1457: #if defined(PETSC_USE_CTABLE)
1458:             PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol));
1459: #else
1460:             tcol = cmap_i[rbuf3_i[ct2]];
1461: #endif
1462:             if (tcol) {
1463:               *mat_j++ = tcol - 1;
1464:               if (!ijonly) {
1465:                 PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2));
1466:                 mat_a += bs2;
1467:               }
1468:               ilen++;
1469:             }
1470:           }
1471:         } else { /* allcolumns */
1472:           for (PetscInt l = 0; l < max2; l++, ct2++) {
1473:             *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */
1474:             if (!ijonly) {
1475:               PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2));
1476:               mat_a += bs2;
1477:             }
1478:             ilen++;
1479:           }
1480:         }
1481:         imat_ilen[row] = ilen;
1482:       }
1483:     }
1484:   }

1486:   if (!iscsorted) { /* sort column indices of the rows */
1487:     MatScalar *work;

1489:     PetscCall(PetscMalloc1(bs2, &work));
1490:     for (PetscInt i = 0; i < ismax; i++) {
1491:       subc      = (Mat_SeqBAIJ *)submats[i]->data;
1492:       imat_ilen = subc->ilen;
1493:       imat_j    = subc->j;
1494:       imat_i    = subc->i;
1495:       if (!ijonly) imat_a = subc->a;
1496:       if (allcolumns[i]) continue;

1498:       jmax = nrow[i];
1499:       for (PetscInt j = 0; j < jmax; j++) {
1500:         mat_i = imat_i[j];
1501:         mat_j = imat_j + mat_i;
1502:         ilen  = imat_ilen[j];
1503:         if (ijonly) {
1504:           PetscCall(PetscSortInt(ilen, mat_j));
1505:         } else {
1506:           mat_a = imat_a + mat_i * bs2;
1507:           PetscCall(PetscSortIntWithDataArray(ilen, mat_j, mat_a, bs2 * sizeof(MatScalar), work));
1508:         }
1509:       }
1510:     }
1511:     PetscCall(PetscFree(work));
1512:   }

1514:   if (!ijonly) {
1515:     PetscCall(PetscFree(r_waits4));
1516:     PetscCallMPI(MPI_Waitall(nrqr, s_waits4, MPI_STATUSES_IGNORE));
1517:     PetscCall(PetscFree(s_waits4));
1518:   }

1520:   /* Restore the indices */
1521:   for (PetscInt i = 0; i < ismax; i++) {
1522:     if (!allrows[i]) PetscCall(ISRestoreIndices(isrow[i], irow + i));
1523:     if (!allcolumns[i]) PetscCall(ISRestoreIndices(iscol[i], icol + i));
1524:   }

1526:   for (PetscInt i = 0; i < ismax; i++) {
1527:     PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
1528:     PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
1529:   }

1531:   PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, issorted));
1532:   PetscCall(PetscFree5(row2proc, cmap, rmap, allcolumns, allrows));

1534:   if (!ijonly) {
1535:     if (sbuf_aa) {
1536:       PetscCall(PetscFree(sbuf_aa[0]));
1537:       PetscCall(PetscFree(sbuf_aa));
1538:     }

1540:     for (PetscMPIInt i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf4[i]));
1541:     PetscCall(PetscFree(rbuf4));
1542:   }
1543:   c->ijonly = PETSC_FALSE; /* set back to the default */
1544:   PetscFunctionReturn(PETSC_SUCCESS);
1545: }