Actual source code: ex9.c


  2: static char help[] = "The solution of 2 different linear systems with different linear solvers.\n\
  3: Also, this example illustrates the repeated\n\
  4: solution of linear systems, while reusing matrix, vector, and solver data\n\
  5: structures throughout the process.  Note the various stages of event logging.\n\n";

  7: /*T
  8:    Concepts: KSP^repeatedly solving linear systems;
  9:    Concepts: PetscLog^profiling multiple stages of code;
 10:    Concepts: PetscLog^user-defined event profiling;
 11:    Processors: n
 12: T*/

 14: /*
 15:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 16:   automatically includes:
 17:      petscsys.h       - base PETSc routines   petscvec.h - vectors
 18:      petscmat.h - matrices
 19:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 20:      petscviewer.h - viewers               petscpc.h  - preconditioners
 21: */
 22: #include <petscksp.h>

 24: /*
 25:    Declare user-defined routines
 26: */
 27: extern PetscErrorCode CheckError(Vec,Vec,Vec,PetscInt,PetscReal,PetscLogEvent);
 28: extern PetscErrorCode MyKSPMonitor(KSP,PetscInt,PetscReal,void*);

 30: int main(int argc,char **args)
 31: {
 32:   Vec            x1,b1,x2,b2; /* solution and RHS vectors for systems #1 and #2 */
 33:   Vec            u;              /* exact solution vector */
 34:   Mat            C1,C2;         /* matrices for systems #1 and #2 */
 35:   KSP            ksp1,ksp2;   /* KSP contexts for systems #1 and #2 */
 36:   PetscInt       ntimes = 3;     /* number of times to solve the linear systems */
 37:   PetscLogEvent  CHECK_ERROR;    /* event number for error checking */
 38:   PetscInt       ldim,low,high,iglobal,Istart,Iend,Istart2,Iend2;
 39:   PetscInt       Ii,J,i,j,m = 3,n = 2,its,t;
 41:   PetscBool      flg = PETSC_FALSE, unsym = PETSC_TRUE;
 42:   PetscScalar    v;
 43:   PetscMPIInt    rank,size;
 44: #if defined(PETSC_USE_LOG)
 45:   PetscLogStage stages[3];
 46: #endif

 48:   PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
 49:   PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
 50:   PetscOptionsGetInt(NULL,NULL,"-t",&ntimes,NULL);
 51:   PetscOptionsGetBool(NULL,NULL,"-unsym",&unsym,NULL);
 52:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
 53:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 54:   n    = 2*size;

 56:   /*
 57:      Register various stages for profiling
 58:   */
 59:   PetscLogStageRegister("Prelim setup",&stages[0]);
 60:   PetscLogStageRegister("Linear System 1",&stages[1]);
 61:   PetscLogStageRegister("Linear System 2",&stages[2]);

 63:   /*
 64:      Register a user-defined event for profiling (error checking).
 65:   */
 66:   CHECK_ERROR = 0;
 67:   PetscLogEventRegister("Check Error",KSP_CLASSID,&CHECK_ERROR);

 69:   /* - - - - - - - - - - - - Stage 0: - - - - - - - - - - - - - -
 70:                         Preliminary Setup
 71:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

 73:   PetscLogStagePush(stages[0]);

 75:   /*
 76:      Create data structures for first linear system.
 77:       - Create parallel matrix, specifying only its global dimensions.
 78:         When using MatCreate(), the matrix format can be specified at
 79:         runtime. Also, the parallel partitioning of the matrix is
 80:         determined by PETSc at runtime.
 81:       - Create parallel vectors.
 82:         - When using VecSetSizes(), we specify only the vector's global
 83:           dimension; the parallel partitioning is determined at runtime.
 84:         - Note: We form 1 vector from scratch and then duplicate as needed.
 85:   */
 86:   MatCreate(PETSC_COMM_WORLD,&C1);
 87:   MatSetSizes(C1,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
 88:   MatSetFromOptions(C1);
 89:   MatSetUp(C1);
 90:   MatGetOwnershipRange(C1,&Istart,&Iend);
 91:   VecCreate(PETSC_COMM_WORLD,&u);
 92:   VecSetSizes(u,PETSC_DECIDE,m*n);
 93:   VecSetFromOptions(u);
 94:   VecDuplicate(u,&b1);
 95:   VecDuplicate(u,&x1);

 97:   /*
 98:      Create first linear solver context.
 99:      Set runtime options (e.g., -pc_type <type>).
100:      Note that the first linear system uses the default option
101:      names, while the second linear system uses a different
102:      options prefix.
103:   */
104:   KSPCreate(PETSC_COMM_WORLD,&ksp1);
105:   KSPSetFromOptions(ksp1);

107:   /*
108:      Set user-defined monitoring routine for first linear system.
109:   */
110:   PetscOptionsGetBool(NULL,NULL,"-my_ksp_monitor",&flg,NULL);
111:   if (flg) {KSPMonitorSet(ksp1,MyKSPMonitor,NULL,0);}

113:   /*
114:      Create data structures for second linear system.
115:   */
116:   MatCreate(PETSC_COMM_WORLD,&C2);
117:   MatSetSizes(C2,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
118:   MatSetFromOptions(C2);
119:   MatSetUp(C2);
120:   MatGetOwnershipRange(C2,&Istart2,&Iend2);
121:   VecDuplicate(u,&b2);
122:   VecDuplicate(u,&x2);

124:   /*
125:      Create second linear solver context
126:   */
127:   KSPCreate(PETSC_COMM_WORLD,&ksp2);

129:   /*
130:      Set different options prefix for second linear system.
131:      Set runtime options (e.g., -s2_pc_type <type>)
132:   */
133:   KSPAppendOptionsPrefix(ksp2,"s2_");
134:   KSPSetFromOptions(ksp2);

136:   /*
137:      Assemble exact solution vector in parallel.  Note that each
138:      processor needs to set only its local part of the vector.
139:   */
140:   VecGetLocalSize(u,&ldim);
141:   VecGetOwnershipRange(u,&low,&high);
142:   for (i=0; i<ldim; i++) {
143:     iglobal = i + low;
144:     v       = (PetscScalar)(i + 100*rank);
145:     VecSetValues(u,1,&iglobal,&v,ADD_VALUES);
146:   }
147:   VecAssemblyBegin(u);
148:   VecAssemblyEnd(u);

150:   /*
151:      Log the number of flops for computing vector entries
152:   */
153:   PetscLogFlops(2.0*ldim);

155:   /*
156:      End curent profiling stage
157:   */
158:   PetscLogStagePop();

160:   /* --------------------------------------------------------------
161:                         Linear solver loop:
162:       Solve 2 different linear systems several times in succession
163:      -------------------------------------------------------------- */

165:   for (t=0; t<ntimes; t++) {

167:     /* - - - - - - - - - - - - Stage 1: - - - - - - - - - - - - - -
168:                  Assemble and solve first linear system
169:        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

171:     /*
172:        Begin profiling stage #1
173:     */
174:     PetscLogStagePush(stages[1]);

176:     /*
177:        Initialize all matrix entries to zero.  MatZeroEntries() retains
178:        the nonzero structure of the matrix for sparse formats.
179:     */
180:     if (t > 0) {MatZeroEntries(C1);}

182:     /*
183:        Set matrix entries in parallel.  Also, log the number of flops
184:        for computing matrix entries.
185:         - Each processor needs to insert only elements that it owns
186:           locally (but any non-local elements will be sent to the
187:           appropriate processor during matrix assembly).
188:         - Always specify global row and columns of matrix entries.
189:     */
190:     for (Ii=Istart; Ii<Iend; Ii++) {
191:       v = -1.0; i = Ii/n; j = Ii - i*n;
192:       if (i>0)   {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
193:       if (i<m-1) {J = Ii + n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
194:       if (j>0)   {J = Ii - 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
195:       if (j<n-1) {J = Ii + 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
196:       v = 4.0; MatSetValues(C1,1,&Ii,1,&Ii,&v,ADD_VALUES);
197:     }
198:     if (unsym) {
199:       for (Ii=Istart; Ii<Iend; Ii++) { /* Make matrix nonsymmetric */
200:         v = -1.0*(t+0.5); i = Ii/n;
201:         if (i>0)   {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
202:       }
203:       PetscLogFlops(2.0*(Iend-Istart));
204:     }

206:     /*
207:        Assemble matrix, using the 2-step process:
208:          MatAssemblyBegin(), MatAssemblyEnd()
209:        Computations can be done while messages are in transition
210:        by placing code between these two statements.
211:     */
212:     MatAssemblyBegin(C1,MAT_FINAL_ASSEMBLY);
213:     MatAssemblyEnd(C1,MAT_FINAL_ASSEMBLY);

215:     /*
216:        Indicate same nonzero structure of successive linear system matrices
217:     */
218:     MatSetOption(C1,MAT_NEW_NONZERO_LOCATIONS,PETSC_TRUE);

220:     /*
221:        Compute right-hand-side vector
222:     */
223:     MatMult(C1,u,b1);

225:     /*
226:        Set operators. Here the matrix that defines the linear system
227:        also serves as the preconditioning matrix.
228:     */
229:     KSPSetOperators(ksp1,C1,C1);

231:     /*
232:        Use the previous solution of linear system #1 as the initial
233:        guess for the next solve of linear system #1.  The user MUST
234:        call KSPSetInitialGuessNonzero() in indicate use of an initial
235:        guess vector; otherwise, an initial guess of zero is used.
236:     */
237:     if (t>0) {
238:       KSPSetInitialGuessNonzero(ksp1,PETSC_TRUE);
239:     }

241:     /*
242:        Solve the first linear system.  Here we explicitly call
243:        KSPSetUp() for more detailed performance monitoring of
244:        certain preconditioners, such as ICC and ILU.  This call
245:        is optional, ase KSPSetUp() will automatically be called
246:        within KSPSolve() if it hasn't been called already.
247:     */
248:     KSPSetUp(ksp1);
249:     KSPSolve(ksp1,b1,x1);
250:     KSPGetIterationNumber(ksp1,&its);

252:     /*
253:        Check error of solution to first linear system
254:     */
255:     CheckError(u,x1,b1,its,1.e-4,CHECK_ERROR);

257:     /* - - - - - - - - - - - - Stage 2: - - - - - - - - - - - - - -
258:                  Assemble and solve second linear system
259:        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

261:     /*
262:        Conclude profiling stage #1; begin profiling stage #2
263:     */
264:     PetscLogStagePop();
265:     PetscLogStagePush(stages[2]);

267:     /*
268:        Initialize all matrix entries to zero
269:     */
270:     if (t > 0) {MatZeroEntries(C2);}

272:     /*
273:        Assemble matrix in parallel. Also, log the number of flops
274:        for computing matrix entries.
275:         - To illustrate the features of parallel matrix assembly, we
276:           intentionally set the values differently from the way in
277:           which the matrix is distributed across the processors.  Each
278:           entry that is not owned locally will be sent to the appropriate
279:           processor during MatAssemblyBegin() and MatAssemblyEnd().
280:         - For best efficiency the user should strive to set as many
281:           entries locally as possible.
282:      */
283:     for (i=0; i<m; i++) {
284:       for (j=2*rank; j<2*rank+2; j++) {
285:         v = -1.0;  Ii = j + n*i;
286:         if (i>0)   {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
287:         if (i<m-1) {J = Ii + n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
288:         if (j>0)   {J = Ii - 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
289:         if (j<n-1) {J = Ii + 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
290:         v = 6.0 + t*0.5; MatSetValues(C2,1,&Ii,1,&Ii,&v,ADD_VALUES);
291:       }
292:     }
293:     if (unsym) {
294:       for (Ii=Istart2; Ii<Iend2; Ii++) { /* Make matrix nonsymmetric */
295:         v = -1.0*(t+0.5); i = Ii/n;
296:         if (i>0)   {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
297:       }
298:     }
299:     MatAssemblyBegin(C2,MAT_FINAL_ASSEMBLY);
300:     MatAssemblyEnd(C2,MAT_FINAL_ASSEMBLY);
301:     PetscLogFlops(2.0*(Iend-Istart));

303:     /*
304:        Indicate same nonzero structure of successive linear system matrices
305:     */
306:     MatSetOption(C2,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);

308:     /*
309:        Compute right-hand-side vector
310:     */
311:     MatMult(C2,u,b2);

313:     /*
314:        Set operators. Here the matrix that defines the linear system
315:        also serves as the preconditioning matrix.  Indicate same nonzero
316:        structure of successive preconditioner matrices by setting flag
317:        SAME_NONZERO_PATTERN.
318:     */
319:     KSPSetOperators(ksp2,C2,C2);

321:     /*
322:        Solve the second linear system
323:     */
324:     KSPSetUp(ksp2);
325:     KSPSolve(ksp2,b2,x2);
326:     KSPGetIterationNumber(ksp2,&its);

328:     /*
329:        Check error of solution to second linear system
330:     */
331:     CheckError(u,x2,b2,its,1.e-4,CHECK_ERROR);

333:     /*
334:        Conclude profiling stage #2
335:     */
336:     PetscLogStagePop();
337:   }
338:   /* --------------------------------------------------------------
339:                        End of linear solver loop
340:      -------------------------------------------------------------- */

342:   /*
343:      Free work space.  All PETSc objects should be destroyed when they
344:      are no longer needed.
345:   */
346:   KSPDestroy(&ksp1); KSPDestroy(&ksp2);
347:   VecDestroy(&x1);   VecDestroy(&x2);
348:   VecDestroy(&b1);   VecDestroy(&b2);
349:   MatDestroy(&C1);   MatDestroy(&C2);
350:   VecDestroy(&u);

352:   PetscFinalize();
353:   return ierr;
354: }
355: /* ------------------------------------------------------------- */
356: /*
357:     CheckError - Checks the error of the solution.

359:     Input Parameters:
360:     u - exact solution
361:     x - approximate solution
362:     b - work vector
363:     its - number of iterations for convergence
364:     tol - tolerance
365:     CHECK_ERROR - the event number for error checking
366:                   (for use with profiling)

368:     Notes:
369:     In order to profile this section of code separately from the
370:     rest of the program, we register it as an "event" with
371:     PetscLogEventRegister() in the main program.  Then, we indicate
372:     the start and end of this event by respectively calling
373:         PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
374:         PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
375:     Here, we specify the objects most closely associated with
376:     the event (the vectors u,x,b).  Such information is optional;
377:     we could instead just use 0 instead for all objects.
378: */
379: PetscErrorCode CheckError(Vec u,Vec x,Vec b,PetscInt its,PetscReal tol,PetscLogEvent CHECK_ERROR)
380: {
381:   PetscScalar    none = -1.0;
382:   PetscReal      norm;

385:   PetscLogEventBegin(CHECK_ERROR,u,x,b,0);

387:   /*
388:      Compute error of the solution, using b as a work vector.
389:   */
390:   VecCopy(x,b);
391:   VecAXPY(b,none,u);
392:   VecNorm(b,NORM_2,&norm);
393:   if (norm > tol) {
394:     PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g, Iterations %D\n",(double)norm,its);
395:   }
396:   PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
397:   return 0;
398: }
399: /* ------------------------------------------------------------- */
400: /*
401:    MyKSPMonitor - This is a user-defined routine for monitoring
402:    the KSP iterative solvers.

404:    Input Parameters:
405:      ksp   - iterative context
406:      n     - iteration number
407:      rnorm - 2-norm (preconditioned) residual value (may be estimated)
408:      dummy - optional user-defined monitor context (unused here)
409: */
410: PetscErrorCode MyKSPMonitor(KSP ksp,PetscInt n,PetscReal rnorm,void *dummy)
411: {
412:   Vec            x;

415:   /*
416:      Build the solution vector
417:   */
418:   KSPBuildSolution(ksp,NULL,&x);

420:   /*
421:      Write the solution vector and residual norm to stdout.
422:       - PetscPrintf() handles output for multiprocessor jobs
423:         by printing from only one processor in the communicator.
424:       - The parallel viewer PETSC_VIEWER_STDOUT_WORLD handles
425:         data from multiple processors so that the output
426:         is not jumbled.
427:   */
428:   PetscPrintf(PETSC_COMM_WORLD,"iteration %D solution vector:\n",n);
429:   VecView(x,PETSC_VIEWER_STDOUT_WORLD);
430:   PetscPrintf(PETSC_COMM_WORLD,"iteration %D KSP Residual norm %14.12e \n",n,rnorm);
431:   return 0;
432: }

434: /*TEST

436:    test:
437:       args: -t 2 -pc_type jacobi -ksp_monitor_short -ksp_type gmres -ksp_gmres_cgs_refinement_type refine_always -s2_ksp_type bcgs -s2_pc_type jacobi -s2_ksp_monitor_short

439:    test:
440:       requires: hpddm
441:       suffix: hpddm
442:       args: -t 2 -pc_type jacobi -ksp_monitor_short -ksp_type {{gmres hpddm}} -s2_ksp_type {{gmres hpddm}} -s2_pc_type jacobi -s2_ksp_monitor_short

444:    test:
445:       requires: hpddm
446:       suffix: hpddm_2
447:       args: -t 2 -pc_type jacobi -ksp_monitor_short -ksp_type gmres -s2_ksp_type hpddm -s2_ksp_hpddm_type gcrodr -s2_ksp_hpddm_recycle 10 -s2_pc_type jacobi -s2_ksp_monitor_short

449:    testset:
450:       requires: hpddm
451:       output_file: output/ex9_hpddm_cg.out
452:       args: -unsym 0 -t 2 -pc_type jacobi -ksp_monitor_short -s2_pc_type jacobi -s2_ksp_monitor_short -ksp_rtol 1.e-2 -s2_ksp_rtol 1.e-2
453:       test:
454:          suffix: hpddm_cg_p_p
455:          args: -ksp_type cg -s2_ksp_type cg
456:       test:
457:          suffix: hpddm_cg_p_h
458:          args: -ksp_type cg -s2_ksp_type hpddm -s2_ksp_hpddm_type {{cg bcg bfbcg}shared output}
459:       test:
460:          suffix: hpddm_cg_h_h
461:          args: -ksp_type hpddm -ksp_hpddm_type cg -s2_ksp_type hpddm -s2_ksp_hpddm_type {{cg bcg bfbcg}shared output}

463: TEST*/