Actual source code: iscoloring.c


  2: #include <petsc/private/isimpl.h>
  3: #include <petscviewer.h>
  4: #include <petscsf.h>

  6: const char *const ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",NULL};

  8: PetscErrorCode ISColoringReference(ISColoring coloring)
  9: {
 11:   coloring->refct++;
 12:   return(0);
 13: }

 15: /*@C

 17:     ISColoringSetType - indicates if the coloring is for the local representation (including ghost points) or the global representation

 19:    Collective on coloring

 21:    Input Parameters:
 22: +    coloring - the coloring object
 23: -    type - either IS_COLORING_LOCAL or IS_COLORING_GLOBAL

 25:    Notes:
 26:      With IS_COLORING_LOCAL the coloring is in the numbering of the local vector, for IS_COLORING_GLOBAL it is in the number of the global vector

 28:    Level: intermediate

 30: .seealso: MatFDColoringCreate(), ISColoring, ISColoringCreate(), IS_COLORING_LOCAL, IS_COLORING_GLOBAL, ISColoringGetType()

 32: @*/
 33: PetscErrorCode ISColoringSetType(ISColoring coloring,ISColoringType type)
 34: {
 36:   coloring->ctype = type;
 37:   return(0);
 38: }

 40: /*@C

 42:     ISColoringGetType - gets if the coloring is for the local representation (including ghost points) or the global representation

 44:    Collective on coloring

 46:    Input Parameter:
 47: .   coloring - the coloring object

 49:    Output Parameter:
 50: .    type - either IS_COLORING_LOCAL or IS_COLORING_GLOBAL

 52:    Level: intermediate

 54: .seealso: MatFDColoringCreate(), ISColoring, ISColoringCreate(), IS_COLORING_LOCAL, IS_COLORING_GLOBAL, ISColoringSetType()

 56: @*/
 57: PetscErrorCode ISColoringGetType(ISColoring coloring,ISColoringType *type)
 58: {
 60:   *type = coloring->ctype;
 61:   return(0);
 62: }

 64: /*@
 65:    ISColoringDestroy - Destroys a coloring context.

 67:    Collective on ISColoring

 69:    Input Parameter:
 70: .  iscoloring - the coloring context

 72:    Level: advanced

 74: .seealso: ISColoringView(), MatColoring
 75: @*/
 76: PetscErrorCode  ISColoringDestroy(ISColoring *iscoloring)
 77: {
 78:   PetscInt       i;

 82:   if (!*iscoloring) return(0);
 84:   if (--(*iscoloring)->refct > 0) {*iscoloring = NULL; return(0);}

 86:   if ((*iscoloring)->is) {
 87:     for (i=0; i<(*iscoloring)->n; i++) {
 88:       ISDestroy(&(*iscoloring)->is[i]);
 89:     }
 90:     PetscFree((*iscoloring)->is);
 91:   }
 92:   if ((*iscoloring)->allocated) {PetscFree((*iscoloring)->colors);}
 93:   PetscCommDestroy(&(*iscoloring)->comm);
 94:   PetscFree((*iscoloring));
 95:   return(0);
 96: }

 98: /*
 99:   ISColoringViewFromOptions - Processes command line options to determine if/how an ISColoring object is to be viewed.

101:   Collective on ISColoring

103:   Input Parameters:
104: + obj   - the ISColoring object
105: . prefix - prefix to use for viewing, or NULL to use prefix of 'mat'
106: - optionname - option to activate viewing

108:   Level: intermediate

110:   Developer Note: This cannot use PetscObjectViewFromOptions() because ISColoring is not a PetscObject

112: */
113: PetscErrorCode ISColoringViewFromOptions(ISColoring obj,PetscObject bobj,const char optionname[])
114: {
115:   PetscErrorCode    ierr;
116:   PetscViewer       viewer;
117:   PetscBool         flg;
118:   PetscViewerFormat format;
119:   char              *prefix;

122:   prefix = bobj ? bobj->prefix : NULL;
123:   PetscOptionsGetViewer(obj->comm,NULL,prefix,optionname,&viewer,&format,&flg);
124:   if (flg) {
125:     PetscViewerPushFormat(viewer,format);
126:     ISColoringView(obj,viewer);
127:     PetscViewerPopFormat(viewer);
128:     PetscViewerDestroy(&viewer);
129:   }
130:   return(0);
131: }

133: /*@C
134:    ISColoringView - Views a coloring context.

136:    Collective on ISColoring

138:    Input Parameters:
139: +  iscoloring - the coloring context
140: -  viewer - the viewer

142:    Level: advanced

144: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatColoring
145: @*/
146: PetscErrorCode  ISColoringView(ISColoring iscoloring,PetscViewer viewer)
147: {
148:   PetscInt       i;
150:   PetscBool      iascii;
151:   IS             *is;

155:   if (!viewer) {
156:     PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
157:   }

160:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
161:   if (iascii) {
162:     MPI_Comm    comm;
163:     PetscMPIInt size,rank;

165:     PetscObjectGetComm((PetscObject)viewer,&comm);
166:     MPI_Comm_size(comm,&size);
167:     MPI_Comm_rank(comm,&rank);
168:     PetscViewerASCIIPrintf(viewer,"ISColoring Object: %d MPI processes\n",size);
169:     PetscViewerASCIIPrintf(viewer,"ISColoringType: %s\n",ISColoringTypes[iscoloring->ctype]);
170:     PetscViewerASCIIPushSynchronized(viewer);
171:     PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
172:     PetscViewerFlush(viewer);
173:     PetscViewerASCIIPopSynchronized(viewer);
174:   }

176:   ISColoringGetIS(iscoloring,PETSC_USE_POINTER,PETSC_IGNORE,&is);
177:   for (i=0; i<iscoloring->n; i++) {
178:     ISView(iscoloring->is[i],viewer);
179:   }
180:   ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
181:   return(0);
182: }

184: /*@C
185:    ISColoringGetColors - Returns an array with the color for each node

187:    Not Collective

189:    Input Parameter:
190: .  iscoloring - the coloring context

192:    Output Parameters:
193: +  n - number of nodes
194: .  nc - number of colors
195: -  colors - color for each node

197:    Level: advanced

199: .seealso: ISColoringRestoreIS(), ISColoringView(), ISColoringGetIS()
200: @*/
201: PetscErrorCode  ISColoringGetColors(ISColoring iscoloring,PetscInt *n,PetscInt *nc,const ISColoringValue **colors)
202: {

206:   if (n) *n = iscoloring->N;
207:   if (nc) *nc = iscoloring->n;
208:   if (colors) *colors = iscoloring->colors;
209:   return(0);
210: }

212: /*@C
213:    ISColoringGetIS - Extracts index sets from the coloring context. Each is contains the nodes of one color

215:    Collective on ISColoring

217:    Input Parameters:
218: +  iscoloring - the coloring context
219: -  mode - if this value is PETSC_OWN_POINTER then the caller owns the pointer and must free the array of IS and each IS in the array

221:    Output Parameters:
222: +  nn - number of index sets in the coloring context
223: -  is - array of index sets

225:    Level: advanced

227: .seealso: ISColoringRestoreIS(), ISColoringView(), ISColoringGetColoring()
228: @*/
229: PetscErrorCode  ISColoringGetIS(ISColoring iscoloring,PetscCopyMode mode, PetscInt *nn,IS *isis[])
230: {


236:   if (nn) *nn = iscoloring->n;
237:   if (isis) {
238:     if (!iscoloring->is) {
239:       PetscInt        *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
240:       ISColoringValue *colors = iscoloring->colors;
241:       IS              *is;

243:       if (PetscDefined(USE_DEBUG)) {
244:         for (i=0; i<n; i++) {
245:           if (((PetscInt)colors[i]) >= nc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
246:         }
247:       }

249:       /* generate the lists of nodes for each color */
250:       PetscCalloc1(nc,&mcolors);
251:       for (i=0; i<n; i++) mcolors[colors[i]]++;

253:       PetscMalloc1(nc,&ii);
254:       PetscMalloc1(n,&ii[0]);
255:       for (i=1; i<nc; i++) ii[i] = ii[i-1] + mcolors[i-1];
256:       PetscArrayzero(mcolors,nc);

258:       if (iscoloring->ctype == IS_COLORING_GLOBAL) {
259:         MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
260:         base -= iscoloring->N;
261:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
262:       } else if (iscoloring->ctype == IS_COLORING_LOCAL) {
263:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i;   /* local idx */
264:       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this ISColoringType type");

266:       PetscMalloc1(nc,&is);
267:       for (i=0; i<nc; i++) {
268:         ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],PETSC_COPY_VALUES,is+i);
269:       }

271:       if (mode != PETSC_OWN_POINTER) iscoloring->is = is;
272:       *isis = is;
273:       PetscFree(ii[0]);
274:       PetscFree(ii);
275:       PetscFree(mcolors);
276:     } else {
277:       *isis = iscoloring->is;
278:     }
279:   }
280:   return(0);
281: }

283: /*@C
284:    ISColoringRestoreIS - Restores the index sets extracted from the coloring context

286:    Collective on ISColoring

288:    Input Parameters:
289: +  iscoloring - the coloring context
290: .  mode - who retains ownership of the is
291: -  is - array of index sets

293:    Level: advanced

295: .seealso: ISColoringGetIS(), ISColoringView()
296: @*/
297: PetscErrorCode  ISColoringRestoreIS(ISColoring iscoloring,PetscCopyMode mode,IS *is[])
298: {

302:   /* currently nothing is done here */
303:   return(0);
304: }

306: /*@
307:     ISColoringCreate - Generates an ISColoring context from lists (provided
308:     by each processor) of colors for each node.

310:     Collective

312:     Input Parameters:
313: +   comm - communicator for the processors creating the coloring
314: .   ncolors - max color value
315: .   n - number of nodes on this processor
316: .   colors - array containing the colors for this processor, color numbers begin at 0.
317: -   mode - see PetscCopyMode for meaning of this flag.

319:     Output Parameter:
320: .   iscoloring - the resulting coloring data structure

322:     Options Database Key:
323: .   -is_coloring_view - Activates ISColoringView()

325:    Level: advanced

327:     Notes:
328:     By default sets coloring type to  IS_COLORING_GLOBAL

330: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()

332: @*/
333: PetscErrorCode  ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],PetscCopyMode mode,ISColoring *iscoloring)
334: {
336:   PetscMPIInt    size,rank,tag;
337:   PetscInt       base,top,i;
338:   PetscInt       nc,ncwork;
339:   MPI_Status     status;

342:   if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
343:     if (ncolors > PETSC_MAX_UINT16) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exceeds %d limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user requested: %D",PETSC_MAX_UINT16,PETSC_IS_COLORING_MAX,ncolors);
344:     else                 SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exceeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user requested: %D",PETSC_IS_COLORING_MAX,ncolors);
345:   }
346:   PetscNew(iscoloring);
347:   PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
348:   comm = (*iscoloring)->comm;

350:   /* compute the number of the first node on my processor */
351:   MPI_Comm_size(comm,&size);

353:   /* should use MPI_Scan() */
354:   MPI_Comm_rank(comm,&rank);
355:   if (rank == 0) {
356:     base = 0;
357:     top  = n;
358:   } else {
359:     MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
360:     top  = base+n;
361:   }
362:   if (rank < size-1) {
363:     MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
364:   }

366:   /* compute the total number of colors */
367:   ncwork = 0;
368:   for (i=0; i<n; i++) {
369:     if (ncwork < colors[i]) ncwork = colors[i];
370:   }
371:   ncwork++;
372:   MPIU_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
373:   if (nc > ncolors) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
374:   (*iscoloring)->n      = nc;
375:   (*iscoloring)->is     = NULL;
376:   (*iscoloring)->N      = n;
377:   (*iscoloring)->refct  = 1;
378:   (*iscoloring)->ctype  = IS_COLORING_GLOBAL;
379:   if (mode == PETSC_COPY_VALUES) {
380:     PetscMalloc1(n,&(*iscoloring)->colors);
381:     PetscLogObjectMemory((PetscObject)(*iscoloring),n*sizeof(ISColoringValue));
382:     PetscArraycpy((*iscoloring)->colors,colors,n);
383:     (*iscoloring)->allocated = PETSC_TRUE;
384:   } else if (mode == PETSC_OWN_POINTER) {
385:     (*iscoloring)->colors    = (ISColoringValue*)colors;
386:     (*iscoloring)->allocated = PETSC_TRUE;
387:   } else {
388:     (*iscoloring)->colors    = (ISColoringValue*)colors;
389:     (*iscoloring)->allocated = PETSC_FALSE;
390:   }
391:   ISColoringViewFromOptions(*iscoloring,NULL,"-is_coloring_view");
392:   PetscInfo1(0,"Number of colors %D\n",nc);
393:   return(0);
394: }

396: /*@
397:     ISBuildTwoSided - Takes an IS that describes where we will go. Generates an IS that contains new numbers from remote or local
398:     on the IS.

400:     Collective on IS

402:     Input Parameters:
403: +   ito - an IS describes where we will go. Negative target rank will be ignored
404: -   toindx - an IS describes what indices should send. NULL means sending natural numbering

406:     Output Parameter:
407: .   rows - contains new numbers from remote or local

409:    Level: advanced

411: .seealso: MatPartitioningCreate(), ISPartitioningToNumbering(), ISPartitioningCount()

413: @*/
414: PetscErrorCode  ISBuildTwoSided(IS ito,IS toindx, IS *rows)
415: {
416:    const PetscInt *ito_indices,*toindx_indices;
417:    PetscInt       *send_indices,rstart,*recv_indices,nrecvs,nsends;
418:    PetscInt       *tosizes,*fromsizes,i,j,*tosizes_tmp,*tooffsets_tmp,ito_ln;
419:    PetscMPIInt    *toranks,*fromranks,size,target_rank,*fromperm_newtoold,nto,nfrom;
420:    PetscLayout     isrmap;
421:    MPI_Comm        comm;
422:    PetscSF         sf;
423:    PetscSFNode    *iremote;
424:    PetscErrorCode  ierr;

427:    PetscObjectGetComm((PetscObject)ito,&comm);
428:    MPI_Comm_size(comm,&size);
429:    ISGetLocalSize(ito,&ito_ln);
430:    /* why we do not have ISGetLayout? */
431:    isrmap = ito->map;
432:    PetscLayoutGetRange(isrmap,&rstart,NULL);
433:    ISGetIndices(ito,&ito_indices);
434:    PetscCalloc2(size,&tosizes_tmp,size+1,&tooffsets_tmp);
435:    for (i=0; i<ito_ln; i++) {
436:      if (ito_indices[i]<0) continue;
437:      if (ito_indices[i]>=size) SETERRQ2(comm,PETSC_ERR_ARG_OUTOFRANGE,"target rank %d is larger than communicator size %d ",ito_indices[i],size);
438:      tosizes_tmp[ito_indices[i]]++;
439:    }
440:    nto = 0;
441:    for (i=0; i<size; i++) {
442:      tooffsets_tmp[i+1] = tooffsets_tmp[i]+tosizes_tmp[i];
443:      if (tosizes_tmp[i]>0) nto++;
444:    }
445:    PetscCalloc2(nto,&toranks,2*nto,&tosizes);
446:    nto  = 0;
447:    for (i=0; i<size; i++) {
448:      if (tosizes_tmp[i]>0) {
449:        toranks[nto]     = i;
450:        tosizes[2*nto]   = tosizes_tmp[i];/* size */
451:        tosizes[2*nto+1] = tooffsets_tmp[i];/* offset */
452:        nto++;
453:      }
454:    }
455:    nsends = tooffsets_tmp[size];
456:    PetscCalloc1(nsends,&send_indices);
457:    if (toindx) {
458:      ISGetIndices(toindx,&toindx_indices);
459:    }
460:    for (i=0; i<ito_ln; i++) {
461:      if (ito_indices[i]<0) continue;
462:      target_rank = ito_indices[i];
463:      send_indices[tooffsets_tmp[target_rank]] = toindx? toindx_indices[i]:(i+rstart);
464:      tooffsets_tmp[target_rank]++;
465:    }
466:    if (toindx) {
467:      ISRestoreIndices(toindx,&toindx_indices);
468:    }
469:    ISRestoreIndices(ito,&ito_indices);
470:    PetscFree2(tosizes_tmp,tooffsets_tmp);
471:    PetscCommBuildTwoSided(comm,2,MPIU_INT,nto,toranks,tosizes,&nfrom,&fromranks,&fromsizes);
472:    PetscFree2(toranks,tosizes);
473:    PetscMalloc1(nfrom,&fromperm_newtoold);
474:    for (i=0; i<nfrom; i++) fromperm_newtoold[i] = i;
475:    PetscSortMPIIntWithArray(nfrom,fromranks,fromperm_newtoold);
476:    nrecvs = 0;
477:    for (i=0; i<nfrom; i++) nrecvs += fromsizes[i*2];
478:    PetscCalloc1(nrecvs,&recv_indices);
479:    PetscMalloc1(nrecvs,&iremote);
480:    nrecvs = 0;
481:    for (i=0; i<nfrom; i++) {
482:      for (j=0; j<fromsizes[2*fromperm_newtoold[i]]; j++) {
483:        iremote[nrecvs].rank    = fromranks[i];
484:        iremote[nrecvs++].index = fromsizes[2*fromperm_newtoold[i]+1]+j;
485:      }
486:    }
487:    PetscSFCreate(comm,&sf);
488:    PetscSFSetGraph(sf,nsends,nrecvs,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
489:    PetscSFSetType(sf,PETSCSFBASIC);
490:    /* how to put a prefix ? */
491:    PetscSFSetFromOptions(sf);
492:    PetscSFBcastBegin(sf,MPIU_INT,send_indices,recv_indices,MPI_REPLACE);
493:    PetscSFBcastEnd(sf,MPIU_INT,send_indices,recv_indices,MPI_REPLACE);
494:    PetscSFDestroy(&sf);
495:    PetscFree(fromranks);
496:    PetscFree(fromsizes);
497:    PetscFree(fromperm_newtoold);
498:    PetscFree(send_indices);
499:    if (rows) {
500:      PetscSortInt(nrecvs,recv_indices);
501:      ISCreateGeneral(comm,nrecvs,recv_indices,PETSC_OWN_POINTER,rows);
502:    } else {
503:      PetscFree(recv_indices);
504:    }
505:    return(0);
506: }

508: /*@
509:     ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
510:     generates an IS that contains a new global node number for each index based
511:     on the partitioing.

513:     Collective on IS

515:     Input Parameters:
516: .   partitioning - a partitioning as generated by MatPartitioningApply()
517:                    or MatPartitioningApplyND()

519:     Output Parameter:
520: .   is - on each processor the index set that defines the global numbers
521:          (in the new numbering) for all the nodes currently (before the partitioning)
522:          on that processor

524:    Level: advanced

526: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()

528: @*/
529: PetscErrorCode  ISPartitioningToNumbering(IS part,IS *is)
530: {
531:   MPI_Comm       comm;
532:   IS             ndorder;
533:   PetscInt       i,np,npt,n,*starts = NULL,*sums = NULL,*lsizes = NULL,*newi = NULL;
534:   const PetscInt *indices = NULL;

540:   /* see if the partitioning comes from nested dissection */
541:   PetscObjectQuery((PetscObject)part,"_petsc_matpartitioning_ndorder",(PetscObject*)&ndorder);
542:   if (ndorder) {
543:     PetscObjectReference((PetscObject)ndorder);
544:     *is  = ndorder;
545:     return(0);
546:   }

548:   PetscObjectGetComm((PetscObject)part,&comm);
549:   /* count the number of partitions, i.e., virtual processors */
550:   ISGetLocalSize(part,&n);
551:   ISGetIndices(part,&indices);
552:   np   = 0;
553:   for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
554:   MPIU_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
555:   np   = npt+1; /* so that it looks like a MPI_Comm_size output */

557:   /*
558:         lsizes - number of elements of each partition on this particular processor
559:         sums - total number of "previous" nodes for any particular partition
560:         starts - global number of first element in each partition on this processor
561:   */
562:   PetscMalloc3(np,&lsizes,np,&starts,np,&sums);
563:   PetscArrayzero(lsizes,np);
564:   for (i=0; i<n; i++) lsizes[indices[i]]++;
565:   MPIU_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
566:   MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
567:   for (i=0; i<np; i++) starts[i] -= lsizes[i];
568:   for (i=1; i<np; i++) {
569:     sums[i]   += sums[i-1];
570:     starts[i] += sums[i-1];
571:   }

573:   /*
574:       For each local index give it the new global number
575:   */
576:   PetscMalloc1(n,&newi);
577:   for (i=0; i<n; i++) newi[i] = starts[indices[i]]++;
578:   PetscFree3(lsizes,starts,sums);

580:   ISRestoreIndices(part,&indices);
581:   ISCreateGeneral(comm,n,newi,PETSC_OWN_POINTER,is);
582:   ISSetPermutation(*is);
583:   return(0);
584: }

586: /*@
587:     ISPartitioningCount - Takes a ISPartitioning and determines the number of
588:     resulting elements on each (partition) process

590:     Collective on IS

592:     Input Parameters:
593: +   partitioning - a partitioning as generated by MatPartitioningApply() or
594:                    MatPartitioningApplyND()
595: -   len - length of the array count, this is the total number of partitions

597:     Output Parameter:
598: .   count - array of length size, to contain the number of elements assigned
599:         to each partition, where size is the number of partitions generated
600:          (see notes below).

602:    Level: advanced

604:     Notes:
605:         By default the number of partitions generated (and thus the length
606:         of count) is the size of the communicator associated with IS,
607:         but it can be set by MatPartitioningSetNParts. The resulting array
608:         of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.
609:         If the partitioning has been obtained by MatPartitioningApplyND(),
610:         the returned count does not include the separators.

612: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
613:         MatPartitioningSetNParts(), MatPartitioningApply(), MatPartitioningApplyND()

615: @*/
616: PetscErrorCode  ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
617: {
618:   MPI_Comm       comm;
619:   PetscInt       i,n,*lsizes;
620:   const PetscInt *indices;
622:   PetscMPIInt    npp;

625:   PetscObjectGetComm((PetscObject)part,&comm);
626:   if (len == PETSC_DEFAULT) {
627:     PetscMPIInt size;
628:     MPI_Comm_size(comm,&size);
629:     len  = (PetscInt) size;
630:   }

632:   /* count the number of partitions */
633:   ISGetLocalSize(part,&n);
634:   ISGetIndices(part,&indices);
635:   if (PetscDefined(USE_DEBUG)) {
636:     PetscInt np = 0,npt;
637:     for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
638:     MPIU_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
639:     np   = npt+1; /* so that it looks like a MPI_Comm_size output */
640:     if (np > len) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
641:   }

643:   /*
644:         lsizes - number of elements of each partition on this particular processor
645:         sums - total number of "previous" nodes for any particular partition
646:         starts - global number of first element in each partition on this processor
647:   */
648:   PetscCalloc1(len,&lsizes);
649:   for (i=0; i<n; i++) {
650:     if (indices[i] > -1) lsizes[indices[i]]++;
651:   }
652:   ISRestoreIndices(part,&indices);
653:   PetscMPIIntCast(len,&npp);
654:   MPIU_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
655:   PetscFree(lsizes);
656:   return(0);
657: }

659: /*@
660:     ISAllGather - Given an index set (IS) on each processor, generates a large
661:     index set (same on each processor) by concatenating together each
662:     processors index set.

664:     Collective on IS

666:     Input Parameter:
667: .   is - the distributed index set

669:     Output Parameter:
670: .   isout - the concatenated index set (same on all processors)

672:     Notes:
673:     ISAllGather() is clearly not scalable for large index sets.

675:     The IS created on each processor must be created with a common
676:     communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
677:     with PETSC_COMM_SELF, this routine will not work as expected, since
678:     each process will generate its own new IS that consists only of
679:     itself.

681:     The communicator for this new IS is PETSC_COMM_SELF

683:     Level: intermediate

685: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
686: @*/
687: PetscErrorCode  ISAllGather(IS is,IS *isout)
688: {
690:   PetscInt       *indices,n,i,N,step,first;
691:   const PetscInt *lindices;
692:   MPI_Comm       comm;
693:   PetscMPIInt    size,*sizes = NULL,*offsets = NULL,nn;
694:   PetscBool      stride;


700:   PetscObjectGetComm((PetscObject)is,&comm);
701:   MPI_Comm_size(comm,&size);
702:   ISGetLocalSize(is,&n);
703:   PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);
704:   if (size == 1 && stride) { /* should handle parallel ISStride also */
705:     ISStrideGetInfo(is,&first,&step);
706:     ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
707:   } else {
708:     PetscMalloc2(size,&sizes,size,&offsets);

710:     PetscMPIIntCast(n,&nn);
711:     MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
712:     offsets[0] = 0;
713:     for (i=1; i<size; i++) {
714:       PetscInt s = offsets[i-1] + sizes[i-1];
715:       PetscMPIIntCast(s,&offsets[i]);
716:     }
717:     N = offsets[size-1] + sizes[size-1];

719:     PetscMalloc1(N,&indices);
720:     ISGetIndices(is,&lindices);
721:     MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
722:     ISRestoreIndices(is,&lindices);
723:     PetscFree2(sizes,offsets);

725:     ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);
726:   }
727:   return(0);
728: }

730: /*@C
731:     ISAllGatherColors - Given a a set of colors on each processor, generates a large
732:     set (same on each processor) by concatenating together each processors colors

734:     Collective

736:     Input Parameters:
737: +   comm - communicator to share the indices
738: .   n - local size of set
739: -   lindices - local colors

741:     Output Parameters:
742: +   outN - total number of indices
743: -   outindices - all of the colors

745:     Notes:
746:     ISAllGatherColors() is clearly not scalable for large index sets.

748:     Level: intermediate

750: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
751: @*/
752: PetscErrorCode  ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
753: {
754:   ISColoringValue *indices;
755:   PetscErrorCode  ierr;
756:   PetscInt        i,N;
757:   PetscMPIInt     size,*offsets = NULL,*sizes = NULL, nn = n;

760:   MPI_Comm_size(comm,&size);
761:   PetscMalloc2(size,&sizes,size,&offsets);

763:   MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
764:   offsets[0] = 0;
765:   for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
766:   N    = offsets[size-1] + sizes[size-1];
767:   PetscFree2(sizes,offsets);

769:   PetscMalloc1(N+1,&indices);
770:   MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);

772:   *outindices = indices;
773:   if (outN) *outN = N;
774:   return(0);
775: }

777: /*@
778:     ISComplement - Given an index set (IS) generates the complement index set. That is all
779:        all indices that are NOT in the given set.

781:     Collective on IS

783:     Input Parameters:
784: +   is - the index set
785: .   nmin - the first index desired in the local part of the complement
786: -   nmax - the largest index desired in the local part of the complement (note that all indices in is must be greater or equal to nmin and less than nmax)

788:     Output Parameter:
789: .   isout - the complement

791:     Notes:
792:     The communicator for this new IS is the same as for the input IS

794:       For a parallel IS, this will generate the local part of the complement on each process

796:       To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
797:     call this routine.

799:     Level: intermediate

801: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
802: @*/
803: PetscErrorCode  ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
804: {
806:   const PetscInt *indices;
807:   PetscInt       n,i,j,unique,cnt,*nindices;
808:   PetscBool      sorted;

813:   if (nmin < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
814:   if (nmin > nmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
815:   ISSorted(is,&sorted);
816:   if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set must be sorted");

818:   ISGetLocalSize(is,&n);
819:   ISGetIndices(is,&indices);
820:   if (PetscDefined(USE_DEBUG)) {
821:     for (i=0; i<n; i++) {
822:       if (indices[i] <  nmin) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
823:       if (indices[i] >= nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
824:     }
825:   }
826:   /* Count number of unique entries */
827:   unique = (n>0);
828:   for (i=0; i<n-1; i++) {
829:     if (indices[i+1] != indices[i]) unique++;
830:   }
831:   PetscMalloc1(nmax-nmin-unique,&nindices);
832:   cnt  = 0;
833:   for (i=nmin,j=0; i<nmax; i++) {
834:     if (j<n && i==indices[j]) do { j++; } while (j<n && i==indices[j]);
835:     else nindices[cnt++] = i;
836:   }
837:   if (cnt != nmax-nmin-unique) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of entries found in complement %D does not match expected %D",cnt,nmax-nmin-unique);
838:   ISCreateGeneral(PetscObjectComm((PetscObject)is),cnt,nindices,PETSC_OWN_POINTER,isout);
839:   ISRestoreIndices(is,&indices);
840:   return(0);
841: }