Actual source code: baijov.c


  2: /*
  3:    Routines to compute overlapping regions of a parallel MPI matrix
  4:   and to find submatrices that were shared across processors.
  5: */
  6: #include <../src/mat/impls/baij/mpi/mpibaij.h>
  7: #include <petscbt.h>

  9: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**);
 10: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*);
 11: extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 12: extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);

 14: PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov)
 15: {
 17:   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs;
 18:   IS             *is_new;

 21:   PetscMalloc1(imax,&is_new);
 22:   /* Convert the indices into block format */
 23:   ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);
 24:   if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");
 25:   for (i=0; i<ov; ++i) {
 26:     MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);
 27:   }
 28:   for (i=0; i<imax; i++) {ISDestroy(&is[i]);}
 29:   ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);
 30:   for (i=0; i<imax; i++) {ISDestroy(&is_new[i]);}
 31:   PetscFree(is_new);
 32:   return(0);
 33: }

 35: /*
 36:   Sample message format:
 37:   If a processor A wants processor B to process some elements corresponding
 38:   to index sets is[1], is[5]
 39:   mesg [0] = 2   (no of index sets in the mesg)
 40:   -----------
 41:   mesg [1] = 1 => is[1]
 42:   mesg [2] = sizeof(is[1]);
 43:   -----------
 44:   mesg [5] = 5  => is[5]
 45:   mesg [6] = sizeof(is[5]);
 46:   -----------
 47:   mesg [7]
 48:   mesg [n]  data(is[1])
 49:   -----------
 50:   mesg[n+1]
 51:   mesg[m]  data(is[5])
 52:   -----------

 54:   Notes:
 55:   nrqs - no of requests sent (or to be sent out)
 56:   nrqr - no of requests received (which have to be or which have been processed)
 57: */
 58: PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[])
 59: {
 60:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
 61:   const PetscInt **idx,*idx_i;
 62:   PetscInt       *n,*w3,*w4,**data,len;
 64:   PetscMPIInt    size,rank,tag1,tag2,*w2,*w1,nrqr;
 65:   PetscInt       Mbs,i,j,k,**rbuf,row,nrqs,msz,**outdat,**ptr;
 66:   PetscInt       *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p;
 67:   PetscMPIInt    *onodes1,*olengths1,*onodes2,*olengths2,proc=-1;
 68:   PetscBT        *table;
 69:   MPI_Comm       comm,*iscomms;
 70:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 71:   MPI_Status     *s_status,*recv_status;
 72:   char           *t_p;

 75:   PetscObjectGetComm((PetscObject)C,&comm);
 76:   size = c->size;
 77:   rank = c->rank;
 78:   Mbs  = c->Mbs;

 80:   PetscObjectGetNewTag((PetscObject)C,&tag1);
 81:   PetscObjectGetNewTag((PetscObject)C,&tag2);

 83:   PetscMalloc2(imax+1,(PetscInt***)&idx,imax,&n);

 85:   for (i=0; i<imax; i++) {
 86:     ISGetIndices(is[i],&idx[i]);
 87:     ISGetLocalSize(is[i],&n[i]);
 88:   }

 90:   /* evaluate communication - mesg to who,length of mesg, and buffer space
 91:      required. Based on this, buffers are allocated, and data copied into them*/
 92:   PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);
 93:   for (i=0; i<imax; i++) {
 94:     PetscArrayzero(w4,size); /* initialise work vector*/
 95:     idx_i = idx[i];
 96:     len   = n[i];
 97:     for (j=0; j<len; j++) {
 98:       row = idx_i[j];
 99:       if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries");
100:       PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);
101:       w4[proc]++;
102:     }
103:     for (j=0; j<size; j++) {
104:       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
105:     }
106:   }

108:   nrqs     = 0;              /* no of outgoing messages */
109:   msz      = 0;              /* total mesg length (for all proc */
110:   w1[rank] = 0;              /* no mesg sent to itself */
111:   w3[rank] = 0;
112:   for (i=0; i<size; i++) {
113:     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
114:   }
115:   /* pa - is list of processors to communicate with */
116:   PetscMalloc1(nrqs+1,&pa);
117:   for (i=0,j=0; i<size; i++) {
118:     if (w1[i]) {pa[j] = i; j++;}
119:   }

121:   /* Each message would have a header = 1 + 2*(no of IS) + data */
122:   for (i=0; i<nrqs; i++) {
123:     j      = pa[i];
124:     w1[j] += w2[j] + 2*w3[j];
125:     msz   += w1[j];
126:   }

128:   /* Determine the number of messages to expect, their lengths, from from-ids */
129:   PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
130:   PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);

132:   /* Now post the Irecvs corresponding to these messages */
133:   PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);

135:   /* Allocate Memory for outgoing messages */
136:   PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);
137:   PetscArrayzero(outdat,size);
138:   PetscArrayzero(ptr,size);
139:   {
140:     PetscInt *iptr = tmp,ict  = 0;
141:     for (i=0; i<nrqs; i++) {
142:       j         = pa[i];
143:       iptr     +=  ict;
144:       outdat[j] = iptr;
145:       ict       = w1[j];
146:     }
147:   }

149:   /* Form the outgoing messages */
150:   /*plug in the headers*/
151:   for (i=0; i<nrqs; i++) {
152:     j            = pa[i];
153:     outdat[j][0] = 0;
154:     PetscArrayzero(outdat[j]+1,2*w3[j]);
155:     ptr[j]       = outdat[j] + 2*w3[j] + 1;
156:   }

158:   /* Memory for doing local proc's work*/
159:   {
160:     PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mbs*imax,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,&t_p);

162:     for (i=0; i<imax; i++) {
163:       table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
164:       data[i]  = d_p + (Mbs)*i;
165:     }
166:   }

168:   /* Parse the IS and update local tables and the outgoing buf with the data*/
169:   {
170:     PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j;
171:     PetscBT  table_i;

173:     for (i=0; i<imax; i++) {
174:       PetscArrayzero(ctr,size);
175:       n_i     = n[i];
176:       table_i = table[i];
177:       idx_i   = idx[i];
178:       data_i  = data[i];
179:       isz_i   = isz[i];
180:       for (j=0; j<n_i; j++) {   /* parse the indices of each IS */
181:         row  = idx_i[j];
182:         PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);
183:         if (proc != rank) { /* copy to the outgoing buffer */
184:           ctr[proc]++;
185:           *ptr[proc] = row;
186:           ptr[proc]++;
187:         } else { /* Update the local table */
188:           if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row;
189:         }
190:       }
191:       /* Update the headers for the current IS */
192:       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
193:         if ((ctr_j = ctr[j])) {
194:           outdat_j        = outdat[j];
195:           k               = ++outdat_j[0];
196:           outdat_j[2*k]   = ctr_j;
197:           outdat_j[2*k-1] = i;
198:         }
199:       }
200:       isz[i] = isz_i;
201:     }
202:   }

204:   /*  Now  post the sends */
205:   PetscMalloc1(nrqs+1,&s_waits1);
206:   for (i=0; i<nrqs; ++i) {
207:     j    = pa[i];
208:     MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);
209:   }

211:   /* No longer need the original indices*/
212:   for (i=0; i<imax; ++i) {
213:     ISRestoreIndices(is[i],idx+i);
214:   }
215:   PetscFree2(*(PetscInt***)&idx,n);

217:   PetscMalloc1(imax,&iscomms);
218:   for (i=0; i<imax; ++i) {
219:     PetscCommDuplicate(PetscObjectComm((PetscObject)is[i]),&iscomms[i],NULL);
220:     ISDestroy(&is[i]);
221:   }

223:   /* Do Local work*/
224:   MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);

226:   /* Receive messages*/
227:   PetscMalloc1(nrqr+1,&recv_status);
228:   if (nrqr) {MPI_Waitall(nrqr,r_waits1,recv_status);}

230:   PetscMalloc1(nrqs+1,&s_status);
231:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}

233:   /* Phase 1 sends are complete - deallocate buffers */
234:   PetscFree4(outdat,ptr,tmp,ctr);
235:   PetscFree4(w1,w2,w3,w4);

237:   PetscMalloc1(nrqr+1,&xdata);
238:   PetscMalloc1(nrqr+1,&isz1);
239:   MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);
240:   PetscFree(rbuf[0]);
241:   PetscFree(rbuf);

243:   /* Send the data back*/
244:   /* Do a global reduction to know the buffer space req for incoming messages*/
245:   {
246:     PetscMPIInt *rw1;

248:     PetscCalloc1(size,&rw1);

250:     for (i=0; i<nrqr; ++i) {
251:       proc = recv_status[i].MPI_SOURCE;
252:       if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch");
253:       rw1[proc] = isz1[i];
254:     }

256:     PetscFree(onodes1);
257:     PetscFree(olengths1);

259:     /* Determine the number of messages to expect, their lengths, from from-ids */
260:     PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);
261:     PetscFree(rw1);
262:   }
263:   /* Now post the Irecvs corresponding to these messages */
264:   PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);

266:   /*  Now  post the sends */
267:   PetscMalloc1(nrqr+1,&s_waits2);
268:   for (i=0; i<nrqr; ++i) {
269:     j    = recv_status[i].MPI_SOURCE;
270:     MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);
271:   }

273:   /* receive work done on other processors*/
274:   {
275:     PetscMPIInt idex;
276:     PetscInt    is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
277:     PetscBT     table_i;
278:     MPI_Status  *status2;

280:     PetscMalloc1(PetscMax(nrqr,nrqs)+1,&status2);
281:     for (i=0; i<nrqs; ++i) {
282:       MPI_Waitany(nrqs,r_waits2,&idex,status2+i);
283:       /* Process the message*/
284:       rbuf2_i = rbuf2[idex];
285:       ct1     = 2*rbuf2_i[0]+1;
286:       jmax    = rbuf2[idex][0];
287:       for (j=1; j<=jmax; j++) {
288:         max     = rbuf2_i[2*j];
289:         is_no   = rbuf2_i[2*j-1];
290:         isz_i   = isz[is_no];
291:         data_i  = data[is_no];
292:         table_i = table[is_no];
293:         for (k=0; k<max; k++,ct1++) {
294:           row = rbuf2_i[ct1];
295:           if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row;
296:         }
297:         isz[is_no] = isz_i;
298:       }
299:     }
300:     if (nrqr) {MPI_Waitall(nrqr,s_waits2,status2);}
301:     PetscFree(status2);
302:   }

304:   for (i=0; i<imax; ++i) {
305:     ISCreateGeneral(iscomms[i],isz[i],data[i],PETSC_COPY_VALUES,is+i);
306:     PetscCommDestroy(&iscomms[i]);
307:   }

309:   PetscFree(iscomms);
310:   PetscFree(onodes2);
311:   PetscFree(olengths2);

313:   PetscFree(pa);
314:   PetscFree(rbuf2[0]);
315:   PetscFree(rbuf2);
316:   PetscFree(s_waits1);
317:   PetscFree(r_waits1);
318:   PetscFree(s_waits2);
319:   PetscFree(r_waits2);
320:   PetscFree5(table,data,isz,d_p,t_p);
321:   PetscFree(s_status);
322:   PetscFree(recv_status);
323:   PetscFree(xdata[0]);
324:   PetscFree(xdata);
325:   PetscFree(isz1);
326:   return(0);
327: }

329: /*
330:    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
331:        the work on the local processor.

333:      Inputs:
334:       C      - MAT_MPIBAIJ;
335:       imax - total no of index sets processed at a time;
336:       table  - an array of char - size = Mbs bits.

338:      Output:
339:       isz    - array containing the count of the solution elements corresponding
340:                to each index set;
341:       data   - pointer to the solutions
342: */
343: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data)
344: {
345:   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
346:   Mat         A  = c->A,B = c->B;
347:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
348:   PetscInt    start,end,val,max,rstart,cstart,*ai,*aj;
349:   PetscInt    *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
350:   PetscBT     table_i;

353:   rstart = c->rstartbs;
354:   cstart = c->cstartbs;
355:   ai     = a->i;
356:   aj     = a->j;
357:   bi     = b->i;
358:   bj     = b->j;
359:   garray = c->garray;

361:   for (i=0; i<imax; i++) {
362:     data_i  = data[i];
363:     table_i = table[i];
364:     isz_i   = isz[i];
365:     for (j=0,max=isz[i]; j<max; j++) {
366:       row   = data_i[j] - rstart;
367:       start = ai[row];
368:       end   = ai[row+1];
369:       for (k=start; k<end; k++) { /* Amat */
370:         val = aj[k] + cstart;
371:         if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val;
372:       }
373:       start = bi[row];
374:       end   = bi[row+1];
375:       for (k=start; k<end; k++) { /* Bmat */
376:         val = garray[bj[k]];
377:         if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val;
378:       }
379:     }
380:     isz[i] = isz_i;
381:   }
382:   return(0);
383: }
384: /*
385:       MatIncreaseOverlap_MPIBAIJ_Receive - Process the received messages,
386:          and return the output

388:          Input:
389:            C    - the matrix
390:            nrqr - no of messages being processed.
391:            rbuf - an array of pointers to the received requests

393:          Output:
394:            xdata - array of messages to be sent back
395:            isz1  - size of each message

397:   For better efficiency perhaps we should malloc separately each xdata[i],
398: then if a remalloc is required we need only copy the data for that one row
399: rather than all previous rows as it is now where a single large chunck of
400: memory is used.

402: */
403: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1)
404: {
405:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
406:   Mat            A  = c->A,B = c->B;
407:   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
409:   PetscInt       rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
410:   PetscInt       row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
411:   PetscInt       val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
412:   PetscInt       *rbuf_i,kmax,rbuf_0;
413:   PetscBT        xtable;

416:   Mbs    = c->Mbs;
417:   rstart = c->rstartbs;
418:   cstart = c->cstartbs;
419:   ai     = a->i;
420:   aj     = a->j;
421:   bi     = b->i;
422:   bj     = b->j;
423:   garray = c->garray;

425:   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
426:     rbuf_i =  rbuf[i];
427:     rbuf_0 =  rbuf_i[0];
428:     ct    += rbuf_0;
429:     for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j];
430:   }

432:   if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
433:   else        max1 = 1;
434:   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
435:   PetscMalloc1(mem_estimate,&xdata[0]);
436:   ++no_malloc;
437:   PetscBTCreate(Mbs,&xtable);
438:   PetscArrayzero(isz1,nrqr);

440:   ct3 = 0;
441:   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
442:     rbuf_i =  rbuf[i];
443:     rbuf_0 =  rbuf_i[0];
444:     ct1    =  2*rbuf_0+1;
445:     ct2    =  ct1;
446:     ct3   += ct1;
447:     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
448:       PetscBTMemzero(Mbs,xtable);
449:       oct2 = ct2;
450:       kmax = rbuf_i[2*j];
451:       for (k=0; k<kmax; k++,ct1++) {
452:         row = rbuf_i[ct1];
453:         if (!PetscBTLookupSet(xtable,row)) {
454:           if (!(ct3 < mem_estimate)) {
455:             new_estimate = (PetscInt)(1.5*mem_estimate)+1;
456:             PetscMalloc1(new_estimate,&tmp);
457:             PetscArraycpy(tmp,xdata[0],mem_estimate);
458:             PetscFree(xdata[0]);
459:             xdata[0]     = tmp;
460:             mem_estimate = new_estimate; ++no_malloc;
461:             for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
462:           }
463:           xdata[i][ct2++] = row;
464:           ct3++;
465:         }
466:       }
467:       for (k=oct2,max2=ct2; k<max2; k++)  {
468:         row   = xdata[i][k] - rstart;
469:         start = ai[row];
470:         end   = ai[row+1];
471:         for (l=start; l<end; l++) {
472:           val = aj[l] + cstart;
473:           if (!PetscBTLookupSet(xtable,val)) {
474:             if (!(ct3 < mem_estimate)) {
475:               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
476:               PetscMalloc1(new_estimate,&tmp);
477:               PetscArraycpy(tmp,xdata[0],mem_estimate);
478:               PetscFree(xdata[0]);
479:               xdata[0]     = tmp;
480:               mem_estimate = new_estimate; ++no_malloc;
481:               for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
482:             }
483:             xdata[i][ct2++] = val;
484:             ct3++;
485:           }
486:         }
487:         start = bi[row];
488:         end   = bi[row+1];
489:         for (l=start; l<end; l++) {
490:           val = garray[bj[l]];
491:           if (!PetscBTLookupSet(xtable,val)) {
492:             if (!(ct3 < mem_estimate)) {
493:               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
494:               PetscMalloc1(new_estimate,&tmp);
495:               PetscArraycpy(tmp,xdata[0],mem_estimate);
496:               PetscFree(xdata[0]);
497:               xdata[0]     = tmp;
498:               mem_estimate = new_estimate; ++no_malloc;
499:               for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
500:             }
501:             xdata[i][ct2++] = val;
502:             ct3++;
503:           }
504:         }
505:       }
506:       /* Update the header*/
507:       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
508:       xdata[i][2*j-1] = rbuf_i[2*j-1];
509:     }
510:     xdata[i][0] = rbuf_0;
511:     xdata[i+1]  = xdata[i] + ct2;
512:     isz1[i]     = ct2; /* size of each message */
513:   }
514:   PetscBTDestroy(&xtable);
515:   PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);
516:   return(0);
517: }

519: PetscErrorCode MatCreateSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
520: {
521:   IS             *isrow_block,*iscol_block;
522:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
524:   PetscInt       nmax,nstages_local,nstages,i,pos,max_no,N=C->cmap->N,bs=C->rmap->bs;
525:   Mat_SeqBAIJ    *subc;
526:   Mat_SubSppt    *smat;

529:   /* The compression and expansion should be avoided. Doesn't point
530:      out errors, might change the indices, hence buggey */
531:   PetscMalloc2(ismax+1,&isrow_block,ismax+1,&iscol_block);
532:   ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_block);
533:   ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_block);

535:   /* Determine the number of stages through which submatrices are done */
536:   if (!C->cmap->N) nmax=20*1000000/sizeof(PetscInt);
537:   else nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
538:   if (!nmax) nmax = 1;

540:   if (scall == MAT_INITIAL_MATRIX) {
541:     nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */

543:     /* Make sure every processor loops through the nstages */
544:     MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));

546:     /* Allocate memory to hold all the submatrices and dummy submatrices */
547:     PetscCalloc1(ismax+nstages,submat);
548:   } else { /* MAT_REUSE_MATRIX */
549:     if (ismax) {
550:       subc = (Mat_SeqBAIJ*)((*submat)[0]->data);
551:       smat   = subc->submatis1;
552:     } else { /* (*submat)[0] is a dummy matrix */
553:       smat = (Mat_SubSppt*)(*submat)[0]->data;
554:     }
555:     if (!smat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"MatCreateSubMatrices(...,MAT_REUSE_MATRIX,...) requires submat");
556:     nstages = smat->nstages;
557:   }

559:   for (i=0,pos=0; i<nstages; i++) {
560:     if (pos+nmax <= ismax) max_no = nmax;
561:     else if (pos == ismax) max_no = 0;
562:     else                   max_no = ismax-pos;

564:     MatCreateSubMatrices_MPIBAIJ_local(C,max_no,isrow_block+pos,iscol_block+pos,scall,*submat+pos);
565:     if (!max_no && scall == MAT_INITIAL_MATRIX) { /* submat[pos] is a dummy matrix */
566:       smat = (Mat_SubSppt*)(*submat)[pos]->data;
567:       smat->nstages = nstages;
568:     }
569:     pos += max_no;
570:   }

572:   if (scall == MAT_INITIAL_MATRIX && ismax) {
573:     /* save nstages for reuse */
574:     subc = (Mat_SeqBAIJ*)((*submat)[0]->data);
575:     smat = subc->submatis1;
576:     smat->nstages = nstages;
577:   }

579:   for (i=0; i<ismax; i++) {
580:     ISDestroy(&isrow_block[i]);
581:     ISDestroy(&iscol_block[i]);
582:   }
583:   PetscFree2(isrow_block,iscol_block);
584:   return(0);
585: }

587: #if defined(PETSC_USE_CTABLE)
588: PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
589: {
590:   PetscInt       nGlobalNd = proc_gnode[size];
591:   PetscMPIInt    fproc;

595:   PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)),&fproc);
596:   if (fproc > size) fproc = size;
597:   while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) {
598:     if (row < proc_gnode[fproc]) fproc--;
599:     else                         fproc++;
600:   }
601:   *rank = fproc;
602:   return(0);
603: }
604: #endif

606: /* -------------------------------------------------------------------------*/
607: /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
608: PetscErrorCode MatCreateSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
609: {
610:   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
611:   Mat            A  = c->A;
612:   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*subc;
613:   const PetscInt **icol,**irow;
614:   PetscInt       *nrow,*ncol,start;
616:   PetscMPIInt    rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr;
617:   PetscInt       **sbuf1,**sbuf2,*sbuf2_i,i,j,k,l,ct1,ct2,**rbuf1,row,proc=-1;
618:   PetscInt       nrqs=0,msz,**ptr=NULL,*req_size=NULL,*ctr=NULL,*pa,*tmp=NULL,tcol;
619:   PetscInt       **rbuf3=NULL,*req_source1=NULL,*req_source2,**sbuf_aj,**rbuf2=NULL,max1,max2;
620:   PetscInt       **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax;
621: #if defined(PETSC_USE_CTABLE)
622:   PetscTable     *cmap,cmap_i=NULL,*rmap,rmap_i;
623: #else
624:   PetscInt       **cmap,*cmap_i=NULL,**rmap,*rmap_i;
625: #endif
626:   const PetscInt *irow_i,*icol_i;
627:   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
628:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
629:   MPI_Request    *r_waits4,*s_waits3,*s_waits4;
630:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
631:   MPI_Status     *r_status3,*r_status4,*s_status4;
632:   MPI_Comm       comm;
633:   PetscScalar    **rbuf4,*rbuf4_i=NULL,**sbuf_aa,*vals,*mat_a=NULL,*imat_a=NULL,*sbuf_aa_i;
634:   PetscMPIInt    *onodes1,*olengths1,end;
635:   PetscInt       **row2proc,*row2proc_i,*imat_ilen,*imat_j,*imat_i;
636:   Mat_SubSppt    *smat_i;
637:   PetscBool      *issorted,colflag,iscsorted=PETSC_TRUE;
638:   PetscInt       *sbuf1_i,*rbuf2_i,*rbuf3_i,ilen;
639:   PetscInt       bs=C->rmap->bs,bs2=c->bs2,rstart = c->rstartbs;
640:   PetscBool      ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */
641:   PetscInt       nzA,nzB,*a_i=a->i,*b_i=b->i,*a_j = a->j,*b_j = b->j,ctmp,imark,*cworkA,*cworkB;
642:   PetscScalar    *vworkA=NULL,*vworkB=NULL,*a_a = a->a,*b_a = b->a;
643:   PetscInt       cstart = c->cstartbs,*bmap = c->garray;
644:   PetscBool      *allrows,*allcolumns;

647:   PetscObjectGetComm((PetscObject)C,&comm);
648:   size = c->size;
649:   rank = c->rank;

651:   PetscMalloc5(ismax,&row2proc,ismax,&cmap,ismax,&rmap,ismax+1,&allcolumns,ismax,&allrows);
652:   PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);

654:   for (i=0; i<ismax; i++) {
655:     ISSorted(iscol[i],&issorted[i]);
656:     if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */
657:     ISSorted(isrow[i],&issorted[i]);

659:     /* Check for special case: allcolumns */
660:     ISIdentity(iscol[i],&colflag);
661:     ISGetLocalSize(iscol[i],&ncol[i]);

663:     if (colflag && ncol[i] == c->Nbs) {
664:       allcolumns[i] = PETSC_TRUE;
665:       icol[i]       = NULL;
666:     } else {
667:       allcolumns[i] = PETSC_FALSE;
668:       ISGetIndices(iscol[i],&icol[i]);
669:     }

671:     /* Check for special case: allrows */
672:     ISIdentity(isrow[i],&colflag);
673:     ISGetLocalSize(isrow[i],&nrow[i]);
674:     if (colflag && nrow[i] == c->Mbs) {
675:       allrows[i] = PETSC_TRUE;
676:       irow[i]    = NULL;
677:     } else {
678:       allrows[i] = PETSC_FALSE;
679:       ISGetIndices(isrow[i],&irow[i]);
680:     }
681:   }

683:   if (scall == MAT_REUSE_MATRIX) {
684:     /* Assumes new rows are same length as the old rows */
685:     for (i=0; i<ismax; i++) {
686:       subc = (Mat_SeqBAIJ*)(submats[i]->data);
687:       if (subc->mbs != nrow[i] || subc->nbs != ncol[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");

689:       /* Initial matrix as if empty */
690:       PetscArrayzero(subc->ilen,subc->mbs);

692:       /* Initial matrix as if empty */
693:       submats[i]->factortype = C->factortype;

695:       smat_i   = subc->submatis1;

697:       nrqs        = smat_i->nrqs;
698:       nrqr        = smat_i->nrqr;
699:       rbuf1       = smat_i->rbuf1;
700:       rbuf2       = smat_i->rbuf2;
701:       rbuf3       = smat_i->rbuf3;
702:       req_source2 = smat_i->req_source2;

704:       sbuf1     = smat_i->sbuf1;
705:       sbuf2     = smat_i->sbuf2;
706:       ptr       = smat_i->ptr;
707:       tmp       = smat_i->tmp;
708:       ctr       = smat_i->ctr;

710:       pa          = smat_i->pa;
711:       req_size    = smat_i->req_size;
712:       req_source1 = smat_i->req_source1;

714:       allcolumns[i] = smat_i->allcolumns;
715:       allrows[i]    = smat_i->allrows;
716:       row2proc[i]   = smat_i->row2proc;
717:       rmap[i]       = smat_i->rmap;
718:       cmap[i]       = smat_i->cmap;
719:     }

721:     if (!ismax){ /* Get dummy submatrices and retrieve struct submatis1 */
722:       if (!submats[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"submats are null, cannot reuse");
723:       smat_i = (Mat_SubSppt*)submats[0]->data;

725:       nrqs        = smat_i->nrqs;
726:       nrqr        = smat_i->nrqr;
727:       rbuf1       = smat_i->rbuf1;
728:       rbuf2       = smat_i->rbuf2;
729:       rbuf3       = smat_i->rbuf3;
730:       req_source2 = smat_i->req_source2;

732:       sbuf1       = smat_i->sbuf1;
733:       sbuf2       = smat_i->sbuf2;
734:       ptr         = smat_i->ptr;
735:       tmp         = smat_i->tmp;
736:       ctr         = smat_i->ctr;

738:       pa          = smat_i->pa;
739:       req_size    = smat_i->req_size;
740:       req_source1 = smat_i->req_source1;

742:       allcolumns[0] = PETSC_FALSE;
743:     }
744:   } else { /* scall == MAT_INITIAL_MATRIX */
745:     /* Get some new tags to keep the communication clean */
746:     PetscObjectGetNewTag((PetscObject)C,&tag2);
747:     PetscObjectGetNewTag((PetscObject)C,&tag3);

749:     /* evaluate communication - mesg to who, length of mesg, and buffer space
750:      required. Based on this, buffers are allocated, and data copied into them*/
751:     PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);   /* mesg size, initialize work vectors */

753:     for (i=0; i<ismax; i++) {
754:       jmax   = nrow[i];
755:       irow_i = irow[i];

757:       PetscMalloc1(jmax,&row2proc_i);
758:       row2proc[i] = row2proc_i;

760:       if (issorted[i]) proc = 0;
761:       for (j=0; j<jmax; j++) {
762:         if (!issorted[i]) proc = 0;
763:         if (allrows[i]) row = j;
764:         else row = irow_i[j];

766:         while (row >= c->rangebs[proc+1]) proc++;
767:         w4[proc]++;
768:         row2proc_i[j] = proc; /* map row index to proc */
769:       }
770:       for (j=0; j<size; j++) {
771:         if (w4[j]) { w1[j] += w4[j];  w3[j]++; w4[j] = 0;}
772:       }
773:     }

775:     nrqs     = 0;              /* no of outgoing messages */
776:     msz      = 0;              /* total mesg length (for all procs) */
777:     w1[rank] = 0;              /* no mesg sent to self */
778:     w3[rank] = 0;
779:     for (i=0; i<size; i++) {
780:       if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
781:     }
782:     PetscMalloc1(nrqs+1,&pa); /*(proc -array)*/
783:     for (i=0,j=0; i<size; i++) {
784:       if (w1[i]) { pa[j] = i; j++; }
785:     }

787:     /* Each message would have a header = 1 + 2*(no of IS) + data */
788:     for (i=0; i<nrqs; i++) {
789:       j      = pa[i];
790:       w1[j] += w2[j] + 2* w3[j];
791:       msz   += w1[j];
792:     }
793:     PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);

795:     /* Determine the number of messages to expect, their lengths, from from-ids */
796:     PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);
797:     PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);

799:     /* Now post the Irecvs corresponding to these messages */
800:     tag0 = ((PetscObject)C)->tag;
801:     PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);

803:     PetscFree(onodes1);
804:     PetscFree(olengths1);

806:     /* Allocate Memory for outgoing messages */
807:     PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);
808:     PetscArrayzero(sbuf1,size);
809:     PetscArrayzero(ptr,size);

811:     {
812:       PetscInt *iptr = tmp;
813:       k    = 0;
814:       for (i=0; i<nrqs; i++) {
815:         j        = pa[i];
816:         iptr    += k;
817:         sbuf1[j] = iptr;
818:         k        = w1[j];
819:       }
820:     }

822:     /* Form the outgoing messages. Initialize the header space */
823:     for (i=0; i<nrqs; i++) {
824:       j           = pa[i];
825:       sbuf1[j][0] = 0;
826:       PetscArrayzero(sbuf1[j]+1,2*w3[j]);
827:       ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
828:     }

830:     /* Parse the isrow and copy data into outbuf */
831:     for (i=0; i<ismax; i++) {
832:       row2proc_i = row2proc[i];
833:       PetscArrayzero(ctr,size);
834:       irow_i = irow[i];
835:       jmax   = nrow[i];
836:       for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
837:         proc = row2proc_i[j];
838:         if (allrows[i]) row = j;
839:         else row = irow_i[j];

841:         if (proc != rank) { /* copy to the outgoing buf*/
842:           ctr[proc]++;
843:           *ptr[proc] = row;
844:           ptr[proc]++;
845:         }
846:       }
847:       /* Update the headers for the current IS */
848:       for (j=0; j<size; j++) { /* Can Optimise this loop too */
849:         if ((ctr_j = ctr[j])) {
850:           sbuf1_j        = sbuf1[j];
851:           k              = ++sbuf1_j[0];
852:           sbuf1_j[2*k]   = ctr_j;
853:           sbuf1_j[2*k-1] = i;
854:         }
855:       }
856:     }

858:     /*  Now  post the sends */
859:     PetscMalloc1(nrqs+1,&s_waits1);
860:     for (i=0; i<nrqs; ++i) {
861:       j    = pa[i];
862:       MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);
863:     }

865:     /* Post Receives to capture the buffer size */
866:     PetscMalloc1(nrqs+1,&r_waits2);
867:     PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);
868:     rbuf2[0] = tmp + msz;
869:     for (i=1; i<nrqs; ++i) {
870:       rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
871:     }
872:     for (i=0; i<nrqs; ++i) {
873:       j    = pa[i];
874:       MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);
875:     }

877:     /* Send to other procs the buf size they should allocate */
878:     /* Receive messages*/
879:     PetscMalloc1(nrqr+1,&s_waits2);
880:     PetscMalloc1(nrqr+1,&r_status1);
881:     PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);

883:     MPI_Waitall(nrqr,r_waits1,r_status1);
884:     for (i=0; i<nrqr; ++i) {
885:       req_size[i] = 0;
886:       rbuf1_i        = rbuf1[i];
887:       start          = 2*rbuf1_i[0] + 1;
888:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
889:       PetscMalloc1(end+1,&sbuf2[i]);
890:       sbuf2_i        = sbuf2[i];
891:       for (j=start; j<end; j++) {
892:         row             = rbuf1_i[j] - rstart;
893:         ncols           = a_i[row+1] - a_i[row] + b_i[row+1] - b_i[row];
894:         sbuf2_i[j]      = ncols;
895:         req_size[i] += ncols;
896:       }
897:       req_source1[i] = r_status1[i].MPI_SOURCE;
898:       /* form the header */
899:       sbuf2_i[0] = req_size[i];
900:       for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j];

902:       MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);
903:     }

905:     PetscFree(r_status1);
906:     PetscFree(r_waits1);
907:     PetscFree4(w1,w2,w3,w4);

909:     /* Receive messages*/
910:     PetscMalloc1(nrqs+1,&r_waits3);
911:     PetscMalloc1(nrqs+1,&r_status2);

913:     MPI_Waitall(nrqs,r_waits2,r_status2);
914:     for (i=0; i<nrqs; ++i) {
915:       PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);
916:       req_source2[i] = r_status2[i].MPI_SOURCE;
917:       MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);
918:     }
919:     PetscFree(r_status2);
920:     PetscFree(r_waits2);

922:     /* Wait on sends1 and sends2 */
923:     PetscMalloc1(nrqs+1,&s_status1);
924:     PetscMalloc1(nrqr+1,&s_status2);

926:     if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
927:     if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
928:     PetscFree(s_status1);
929:     PetscFree(s_status2);
930:     PetscFree(s_waits1);
931:     PetscFree(s_waits2);

933:     /* Now allocate sending buffers for a->j, and send them off */
934:     PetscMalloc1(nrqr+1,&sbuf_aj);
935:     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
936:     PetscMalloc1(j+1,&sbuf_aj[0]);
937:     for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];

939:     PetscMalloc1(nrqr+1,&s_waits3);
940:     {

942:       for (i=0; i<nrqr; i++) {
943:         rbuf1_i   = rbuf1[i];
944:         sbuf_aj_i = sbuf_aj[i];
945:         ct1       = 2*rbuf1_i[0] + 1;
946:         ct2       = 0;
947:         for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
948:           kmax = rbuf1[i][2*j];
949:           for (k=0; k<kmax; k++,ct1++) {
950:             row    = rbuf1_i[ct1] - rstart;
951:             nzA    = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row];
952:             ncols  = nzA + nzB;
953:             cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];

955:             /* load the column indices for this row into cols */
956:             cols = sbuf_aj_i + ct2;
957:             for (l=0; l<nzB; l++) {
958:               if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
959:               else break;
960:             }
961:             imark = l;
962:             for (l=0; l<nzA; l++) {cols[imark+l] = cstart + cworkA[l];}
963:             for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
964:             ct2 += ncols;
965:           }
966:         }
967:         MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);
968:       }
969:     }
970:     PetscMalloc2(nrqs+1,&r_status3,nrqr+1,&s_status3);

972:     /* create col map: global col of C -> local col of submatrices */
973: #if defined(PETSC_USE_CTABLE)
974:     for (i=0; i<ismax; i++) {
975:       if (!allcolumns[i]) {
976:         PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);

978:         jmax   = ncol[i];
979:         icol_i = icol[i];
980:         cmap_i = cmap[i];
981:         for (j=0; j<jmax; j++) {
982:           PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);
983:         }
984:       } else cmap[i] = NULL;
985:     }
986: #else
987:     for (i=0; i<ismax; i++) {
988:       if (!allcolumns[i]) {
989:         PetscCalloc1(c->Nbs,&cmap[i]);
990:         jmax   = ncol[i];
991:         icol_i = icol[i];
992:         cmap_i = cmap[i];
993:         for (j=0; j<jmax; j++) cmap_i[icol_i[j]] = j+1;
994:       } else cmap[i] = NULL;
995:     }
996: #endif

998:     /* Create lens which is required for MatCreate... */
999:     for (i=0,j=0; i<ismax; i++) j += nrow[i];
1000:     PetscMalloc1(ismax,&lens);

1002:     if (ismax) {
1003:       PetscCalloc1(j,&lens[0]);
1004:     }
1005:     for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1];

1007:     /* Update lens from local data */
1008:     for (i=0; i<ismax; i++) {
1009:       row2proc_i = row2proc[i];
1010:       jmax = nrow[i];
1011:       if (!allcolumns[i]) cmap_i = cmap[i];
1012:       irow_i = irow[i];
1013:       lens_i = lens[i];
1014:       for (j=0; j<jmax; j++) {
1015:         if (allrows[i]) row = j;
1016:         else row = irow_i[j]; /* global blocked row of C */

1018:         proc = row2proc_i[j];
1019:         if (proc == rank) {
1020:           /* Get indices from matA and then from matB */
1021: #if defined(PETSC_USE_CTABLE)
1022:           PetscInt   tt;
1023: #endif
1024:           row    = row - rstart;
1025:           nzA    = a_i[row+1] - a_i[row];
1026:           nzB    = b_i[row+1] - b_i[row];
1027:           cworkA =  a_j + a_i[row];
1028:           cworkB = b_j + b_i[row];

1030:           if (!allcolumns[i]) {
1031: #if defined(PETSC_USE_CTABLE)
1032:             for (k=0; k<nzA; k++) {
1033:               PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);
1034:               if (tt) lens_i[j]++;
1035:             }
1036:             for (k=0; k<nzB; k++) {
1037:               PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);
1038:               if (tt) lens_i[j]++;
1039:             }

1041: #else
1042:             for (k=0; k<nzA; k++) {
1043:               if (cmap_i[cstart + cworkA[k]]) lens_i[j]++;
1044:             }
1045:             for (k=0; k<nzB; k++) {
1046:               if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++;
1047:             }
1048: #endif
1049:           } else { /* allcolumns */
1050:             lens_i[j] = nzA + nzB;
1051:           }
1052:         }
1053:       }
1054:     }

1056:     /* Create row map: global row of C -> local row of submatrices */
1057:     for (i=0; i<ismax; i++) {
1058:       if (!allrows[i]) {
1059: #if defined(PETSC_USE_CTABLE)
1060:         PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);
1061:         irow_i = irow[i];
1062:         jmax   = nrow[i];
1063:         for (j=0; j<jmax; j++) {
1064:           if (allrows[i]) {
1065:             PetscTableAdd(rmap[i],j+1,j+1,INSERT_VALUES);
1066:           } else {
1067:             PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);
1068:           }
1069:         }
1070: #else
1071:         PetscCalloc1(c->Mbs,&rmap[i]);
1072:         rmap_i = rmap[i];
1073:         irow_i = irow[i];
1074:         jmax   = nrow[i];
1075:         for (j=0; j<jmax; j++) {
1076:           if (allrows[i]) rmap_i[j] = j;
1077:           else rmap_i[irow_i[j]] = j;
1078:         }
1079: #endif
1080:       } else rmap[i] = NULL;
1081:     }

1083:     /* Update lens from offproc data */
1084:     {
1085:       PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i;

1087:       MPI_Waitall(nrqs,r_waits3,r_status3);
1088:       for (tmp2=0; tmp2<nrqs; tmp2++) {
1089:         sbuf1_i = sbuf1[pa[tmp2]];
1090:         jmax    = sbuf1_i[0];
1091:         ct1     = 2*jmax+1;
1092:         ct2     = 0;
1093:         rbuf2_i = rbuf2[tmp2];
1094:         rbuf3_i = rbuf3[tmp2];
1095:         for (j=1; j<=jmax; j++) {
1096:           is_no  = sbuf1_i[2*j-1];
1097:           max1   = sbuf1_i[2*j];
1098:           lens_i = lens[is_no];
1099:           if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1100:           rmap_i = rmap[is_no];
1101:           for (k=0; k<max1; k++,ct1++) {
1102:             if (allrows[is_no]) {
1103:               row = sbuf1_i[ct1];
1104:             } else {
1105: #if defined(PETSC_USE_CTABLE)
1106:               PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);
1107:               row--;
1108:               if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1109: #else
1110:               row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1111: #endif
1112:             }
1113:             max2 = rbuf2_i[ct1];
1114:             for (l=0; l<max2; l++,ct2++) {
1115:               if (!allcolumns[is_no]) {
1116: #if defined(PETSC_USE_CTABLE)
1117:                 PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);
1118: #else
1119:                 tcol = cmap_i[rbuf3_i[ct2]];
1120: #endif
1121:                 if (tcol) lens_i[row]++;
1122:               } else { /* allcolumns */
1123:                 lens_i[row]++; /* lens_i[row] += max2 ? */
1124:               }
1125:             }
1126:           }
1127:         }
1128:       }
1129:     }
1130:     PetscFree(r_waits3);
1131:     if (nrqr) {MPI_Waitall(nrqr,s_waits3,s_status3);}
1132:     PetscFree2(r_status3,s_status3);
1133:     PetscFree(s_waits3);

1135:     /* Create the submatrices */
1136:     for (i=0; i<ismax; i++) {
1137:       PetscInt bs_tmp;
1138:       if (ijonly) bs_tmp = 1;
1139:       else        bs_tmp = bs;

1141:       MatCreate(PETSC_COMM_SELF,submats+i);
1142:       MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,PETSC_DETERMINE,PETSC_DETERMINE);

1144:       MatSetType(submats[i],((PetscObject)A)->type_name);
1145:       MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);
1146:       MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]); /* this subroutine is used by SBAIJ routines */

1148:       /* create struct Mat_SubSppt and attached it to submat */
1149:       PetscNew(&smat_i);
1150:       subc = (Mat_SeqBAIJ*)submats[i]->data;
1151:       subc->submatis1 = smat_i;

1153:       smat_i->destroy          = submats[i]->ops->destroy;
1154:       submats[i]->ops->destroy = MatDestroySubMatrix_SeqBAIJ;
1155:       submats[i]->factortype   = C->factortype;

1157:       smat_i->id          = i;
1158:       smat_i->nrqs        = nrqs;
1159:       smat_i->nrqr        = nrqr;
1160:       smat_i->rbuf1       = rbuf1;
1161:       smat_i->rbuf2       = rbuf2;
1162:       smat_i->rbuf3       = rbuf3;
1163:       smat_i->sbuf2       = sbuf2;
1164:       smat_i->req_source2 = req_source2;

1166:       smat_i->sbuf1       = sbuf1;
1167:       smat_i->ptr         = ptr;
1168:       smat_i->tmp         = tmp;
1169:       smat_i->ctr         = ctr;

1171:       smat_i->pa           = pa;
1172:       smat_i->req_size     = req_size;
1173:       smat_i->req_source1  = req_source1;

1175:       smat_i->allcolumns  = allcolumns[i];
1176:       smat_i->allrows     = allrows[i];
1177:       smat_i->singleis    = PETSC_FALSE;
1178:       smat_i->row2proc    = row2proc[i];
1179:       smat_i->rmap        = rmap[i];
1180:       smat_i->cmap        = cmap[i];
1181:     }

1183:     if (!ismax) { /* Create dummy submats[0] for reuse struct subc */
1184:       MatCreate(PETSC_COMM_SELF,&submats[0]);
1185:       MatSetSizes(submats[0],0,0,PETSC_DETERMINE,PETSC_DETERMINE);
1186:       MatSetType(submats[0],MATDUMMY);

1188:       /* create struct Mat_SubSppt and attached it to submat */
1189:       PetscNewLog(submats[0],&smat_i);
1190:       submats[0]->data = (void*)smat_i;

1192:       smat_i->destroy          = submats[0]->ops->destroy;
1193:       submats[0]->ops->destroy = MatDestroySubMatrix_Dummy;
1194:       submats[0]->factortype   = C->factortype;

1196:       smat_i->id          = 0;
1197:       smat_i->nrqs        = nrqs;
1198:       smat_i->nrqr        = nrqr;
1199:       smat_i->rbuf1       = rbuf1;
1200:       smat_i->rbuf2       = rbuf2;
1201:       smat_i->rbuf3       = rbuf3;
1202:       smat_i->sbuf2       = sbuf2;
1203:       smat_i->req_source2 = req_source2;

1205:       smat_i->sbuf1       = sbuf1;
1206:       smat_i->ptr         = ptr;
1207:       smat_i->tmp         = tmp;
1208:       smat_i->ctr         = ctr;

1210:       smat_i->pa           = pa;
1211:       smat_i->req_size     = req_size;
1212:       smat_i->req_source1  = req_source1;

1214:       smat_i->allcolumns  = PETSC_FALSE;
1215:       smat_i->singleis    = PETSC_FALSE;
1216:       smat_i->row2proc    = NULL;
1217:       smat_i->rmap        = NULL;
1218:       smat_i->cmap        = NULL;
1219:     }

1221:     if (ismax) {PetscFree(lens[0]);}
1222:     PetscFree(lens);
1223:     PetscFree(sbuf_aj[0]);
1224:     PetscFree(sbuf_aj);

1226:   } /* endof scall == MAT_INITIAL_MATRIX */

1228:   /* Post recv matrix values */
1229:   if (!ijonly) {
1230:     PetscObjectGetNewTag((PetscObject)C,&tag4);
1231:     PetscMalloc1(nrqs+1,&rbuf4);
1232:     PetscMalloc1(nrqs+1,&r_waits4);
1233:     PetscMalloc1(nrqs+1,&r_status4);
1234:     PetscMalloc1(nrqr+1,&s_status4);
1235:     for (i=0; i<nrqs; ++i) {
1236:       PetscMalloc1(rbuf2[i][0]*bs2,&rbuf4[i]);
1237:       MPI_Irecv(rbuf4[i],rbuf2[i][0]*bs2,MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);
1238:     }

1240:     /* Allocate sending buffers for a->a, and send them off */
1241:     PetscMalloc1(nrqr+1,&sbuf_aa);
1242:     for (i=0,j=0; i<nrqr; i++) j += req_size[i];

1244:     PetscMalloc1((j+1)*bs2,&sbuf_aa[0]);
1245:     for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;

1247:     PetscMalloc1(nrqr+1,&s_waits4);

1249:     for (i=0; i<nrqr; i++) {
1250:       rbuf1_i   = rbuf1[i];
1251:       sbuf_aa_i = sbuf_aa[i];
1252:       ct1       = 2*rbuf1_i[0]+1;
1253:       ct2       = 0;
1254:       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1255:         kmax = rbuf1_i[2*j];
1256:         for (k=0; k<kmax; k++,ct1++) {
1257:           row    = rbuf1_i[ct1] - rstart;
1258:           nzA    = a_i[row+1] - a_i[row];
1259:           nzB    = b_i[row+1] - b_i[row];
1260:           ncols  = nzA + nzB;
1261:           cworkB = b_j + b_i[row];
1262:           vworkA = a_a + a_i[row]*bs2;
1263:           vworkB = b_a + b_i[row]*bs2;

1265:           /* load the column values for this row into vals*/
1266:           vals = sbuf_aa_i+ct2*bs2;
1267:           for (l=0; l<nzB; l++) {
1268:             if ((bmap[cworkB[l]]) < cstart) {
1269:               PetscArraycpy(vals+l*bs2,vworkB+l*bs2,bs2);
1270:             } else break;
1271:           }
1272:           imark = l;
1273:           for (l=0; l<nzA; l++) {
1274:             PetscArraycpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2);
1275:           }
1276:           for (l=imark; l<nzB; l++) {
1277:             PetscArraycpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2);
1278:           }

1280:           ct2 += ncols;
1281:         }
1282:       }
1283:       MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);
1284:     }
1285:   }

1287:   /* Assemble the matrices */
1288:   /* First assemble the local rows */
1289:   for (i=0; i<ismax; i++) {
1290:     row2proc_i = row2proc[i];
1291:     subc      = (Mat_SeqBAIJ*)submats[i]->data;
1292:     imat_ilen = subc->ilen;
1293:     imat_j    = subc->j;
1294:     imat_i    = subc->i;
1295:     imat_a    = subc->a;

1297:     if (!allcolumns[i]) cmap_i = cmap[i];
1298:     rmap_i = rmap[i];
1299:     irow_i = irow[i];
1300:     jmax   = nrow[i];
1301:     for (j=0; j<jmax; j++) {
1302:       if (allrows[i]) row = j;
1303:       else row  = irow_i[j];
1304:       proc = row2proc_i[j];

1306:       if (proc == rank) {

1308:         row    = row - rstart;
1309:         nzA    = a_i[row+1] - a_i[row];
1310:         nzB    = b_i[row+1] - b_i[row];
1311:         cworkA = a_j + a_i[row];
1312:         cworkB = b_j + b_i[row];
1313:         if (!ijonly) {
1314:           vworkA = a_a + a_i[row]*bs2;
1315:           vworkB = b_a + b_i[row]*bs2;
1316:         }

1318:         if (allrows[i]) {
1319:           row = row+rstart;
1320:         } else {
1321: #if defined(PETSC_USE_CTABLE)
1322:           PetscTableFind(rmap_i,row+rstart+1,&row);
1323:           row--;

1325:           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1326: #else
1327:           row = rmap_i[row + rstart];
1328: #endif
1329:         }
1330:         mat_i = imat_i[row];
1331:         if (!ijonly) mat_a = imat_a + mat_i*bs2;
1332:         mat_j    = imat_j + mat_i;
1333:         ilen = imat_ilen[row];

1335:         /* load the column indices for this row into cols*/
1336:         if (!allcolumns[i]) {
1337:           for (l=0; l<nzB; l++) {
1338:             if ((ctmp = bmap[cworkB[l]]) < cstart) {
1339: #if defined(PETSC_USE_CTABLE)
1340:               PetscTableFind(cmap_i,ctmp+1,&tcol);
1341:               if (tcol) {
1342: #else
1343:               if ((tcol = cmap_i[ctmp])) {
1344: #endif
1345:                 *mat_j++ = tcol - 1;
1346:                 PetscArraycpy(mat_a,vworkB+l*bs2,bs2);
1347:                 mat_a   += bs2;
1348:                 ilen++;
1349:               }
1350:             } else break;
1351:           }
1352:           imark = l;
1353:           for (l=0; l<nzA; l++) {
1354: #if defined(PETSC_USE_CTABLE)
1355:             PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);
1356:             if (tcol) {
1357: #else
1358:             if ((tcol = cmap_i[cstart + cworkA[l]])) {
1359: #endif
1360:               *mat_j++ = tcol - 1;
1361:               if (!ijonly) {
1362:                 PetscArraycpy(mat_a,vworkA+l*bs2,bs2);
1363:                 mat_a += bs2;
1364:               }
1365:               ilen++;
1366:             }
1367:           }
1368:           for (l=imark; l<nzB; l++) {
1369: #if defined(PETSC_USE_CTABLE)
1370:             PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);
1371:             if (tcol) {
1372: #else
1373:             if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1374: #endif
1375:               *mat_j++ = tcol - 1;
1376:               if (!ijonly) {
1377:                 PetscArraycpy(mat_a,vworkB+l*bs2,bs2);
1378:                 mat_a += bs2;
1379:               }
1380:               ilen++;
1381:             }
1382:           }
1383:         } else { /* allcolumns */
1384:           for (l=0; l<nzB; l++) {
1385:             if ((ctmp = bmap[cworkB[l]]) < cstart) {
1386:               *mat_j++ = ctmp;
1387:               PetscArraycpy(mat_a,vworkB+l*bs2,bs2);
1388:               mat_a   += bs2;
1389:               ilen++;
1390:             } else break;
1391:           }
1392:           imark = l;
1393:           for (l=0; l<nzA; l++) {
1394:             *mat_j++ = cstart+cworkA[l];
1395:             if (!ijonly) {
1396:               PetscArraycpy(mat_a,vworkA+l*bs2,bs2);
1397:               mat_a += bs2;
1398:             }
1399:             ilen++;
1400:           }
1401:           for (l=imark; l<nzB; l++) {
1402:             *mat_j++ = bmap[cworkB[l]];
1403:             if (!ijonly) {
1404:               PetscArraycpy(mat_a,vworkB+l*bs2,bs2);
1405:               mat_a += bs2;
1406:             }
1407:             ilen++;
1408:           }
1409:         }
1410:         imat_ilen[row] = ilen;
1411:       }
1412:     }
1413:   }

1415:   /* Now assemble the off proc rows */
1416:   if (!ijonly) {
1417:     MPI_Waitall(nrqs,r_waits4,r_status4);
1418:   }
1419:   for (tmp2=0; tmp2<nrqs; tmp2++) {
1420:     sbuf1_i = sbuf1[pa[tmp2]];
1421:     jmax    = sbuf1_i[0];
1422:     ct1     = 2*jmax + 1;
1423:     ct2     = 0;
1424:     rbuf2_i = rbuf2[tmp2];
1425:     rbuf3_i = rbuf3[tmp2];
1426:     if (!ijonly) rbuf4_i = rbuf4[tmp2];
1427:     for (j=1; j<=jmax; j++) {
1428:       is_no     = sbuf1_i[2*j-1];
1429:       rmap_i    = rmap[is_no];
1430:       if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1431:       subc      = (Mat_SeqBAIJ*)submats[is_no]->data;
1432:       imat_ilen = subc->ilen;
1433:       imat_j    = subc->j;
1434:       imat_i    = subc->i;
1435:       if (!ijonly) imat_a    = subc->a;
1436:       max1      = sbuf1_i[2*j];
1437:       for (k=0; k<max1; k++,ct1++) { /* for each recved block row */
1438:         row = sbuf1_i[ct1];

1440:         if (allrows[is_no]) {
1441:           row = sbuf1_i[ct1];
1442:         } else {
1443: #if defined(PETSC_USE_CTABLE)
1444:           PetscTableFind(rmap_i,row+1,&row);
1445:           row--;
1446:           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1447: #else
1448:           row = rmap_i[row];
1449: #endif
1450:         }
1451:         ilen  = imat_ilen[row];
1452:         mat_i = imat_i[row];
1453:         if (!ijonly) mat_a = imat_a + mat_i*bs2;
1454:         mat_j = imat_j + mat_i;
1455:         max2  = rbuf2_i[ct1];
1456:         if (!allcolumns[is_no]) {
1457:           for (l=0; l<max2; l++,ct2++) {
1458: #if defined(PETSC_USE_CTABLE)
1459:             PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);
1460: #else
1461:             tcol = cmap_i[rbuf3_i[ct2]];
1462: #endif
1463:             if (tcol) {
1464:               *mat_j++ = tcol - 1;
1465:               if (!ijonly) {
1466:                 PetscArraycpy(mat_a,rbuf4_i+ct2*bs2,bs2);
1467:                 mat_a += bs2;
1468:               }
1469:               ilen++;
1470:             }
1471:           }
1472:         } else { /* allcolumns */
1473:           for (l=0; l<max2; l++,ct2++) {
1474:             *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */
1475:             if (!ijonly) {
1476:               PetscArraycpy(mat_a,rbuf4_i+ct2*bs2,bs2);
1477:               mat_a += bs2;
1478:             }
1479:             ilen++;
1480:           }
1481:         }
1482:         imat_ilen[row] = ilen;
1483:       }
1484:     }
1485:   }

1487:   if (!iscsorted) { /* sort column indices of the rows */
1488:     MatScalar *work;

1490:     PetscMalloc1(bs2,&work);
1491:     for (i=0; i<ismax; i++) {
1492:       subc      = (Mat_SeqBAIJ*)submats[i]->data;
1493:       imat_ilen = subc->ilen;
1494:       imat_j    = subc->j;
1495:       imat_i    = subc->i;
1496:       if (!ijonly) imat_a = subc->a;
1497:       if (allcolumns[i]) continue;

1499:       jmax = nrow[i];
1500:       for (j=0; j<jmax; j++) {
1501:         mat_i = imat_i[j];
1502:         mat_j = imat_j + mat_i;
1503:         ilen  = imat_ilen[j];
1504:         if (ijonly) {
1505:           PetscSortInt(ilen,mat_j);
1506:         } else {
1507:           mat_a = imat_a + mat_i*bs2;
1508:           PetscSortIntWithDataArray(ilen,mat_j,mat_a,bs2*sizeof(MatScalar),work);
1509:         }
1510:       }
1511:     }
1512:     PetscFree(work);
1513:   }

1515:   if (!ijonly) {
1516:     PetscFree(r_status4);
1517:     PetscFree(r_waits4);
1518:     if (nrqr) {MPI_Waitall(nrqr,s_waits4,s_status4);}
1519:     PetscFree(s_waits4);
1520:     PetscFree(s_status4);
1521:   }

1523:   /* Restore the indices */
1524:   for (i=0; i<ismax; i++) {
1525:     if (!allrows[i]) {
1526:       ISRestoreIndices(isrow[i],irow+i);
1527:     }
1528:     if (!allcolumns[i]) {
1529:       ISRestoreIndices(iscol[i],icol+i);
1530:     }
1531:   }

1533:   for (i=0; i<ismax; i++) {
1534:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
1535:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
1536:   }

1538:   PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,issorted);
1539:   PetscFree5(row2proc,cmap,rmap,allcolumns,allrows);

1541:   if (!ijonly) {
1542:     PetscFree(sbuf_aa[0]);
1543:     PetscFree(sbuf_aa);

1545:     for (i=0; i<nrqs; ++i) {
1546:       PetscFree(rbuf4[i]);
1547:     }
1548:     PetscFree(rbuf4);
1549:   }
1550:   c->ijonly = PETSC_FALSE; /* set back to the default */
1551:   return(0);
1552: }