Actual source code: ex5f.F90

  1:       !Solves two linear systems in parallel with KSP.  The code
  2:       !illustrates repeated solution of linear systems with the same preconditioner
  3:       !method but different matrices (having the same nonzero structure).  The code
  4:       !also uses multiple profiling stages.  Input arguments are
  5:       !  -m <size> : problem size
  6:       !  -mat_nonsym : use nonsymmetric matrix (default is symmetric)

  8: program main
  9: #include <petsc/finclude/petscksp.h>
 10:       use petscksp

 12:       implicit none
 13:       KSP            :: ksp              ! linear solver context
 14:       Mat            :: C,Ctmp           ! matrix
 15:       Vec            :: x,u,b            ! approx solution, RHS, exact solution
 16:       PetscReal      :: norm,bnorm       ! norm of solution residual
 17:       PetscScalar    :: v
 18:       PetscScalar, parameter :: myNone = -1.0
 19:       PetscInt       :: Ii,JJ,ldim,low,high,iglobal,Istart,Iend
 20:       PetscErrorCode :: ierr
 21:       PetscInt       :: i,j,its,n
 22:       PetscInt       :: m = 3, orthog = 0
 23:       PetscMPIInt    :: size,rank
 24:       PetscBool :: &
 25:         testnewC         = PETSC_FALSE, &
 26:         testscaledMat    = PETSC_FALSE, &
 27:         mat_nonsymmetric = PETSC_FALSE
 28:       PetscBool      :: flg
 29:       PetscRandom    :: rctx
 30:       PetscLogStage,dimension(0:1) :: stages
 31:       character(len=PETSC_MAX_PATH_LEN) :: outputString
 32:       PetscInt,parameter :: one = 1

 34:       PetscCallA(PetscInitialize(ierr))

 36:       PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-orthog',orthog,flg,ierr))
 37:       PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-m',m,flg,ierr))
 38:       PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr))
 39:       PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD,size,ierr))
 40:       n=2*size

 42:       ! Set flag if we are doing a nonsymmetric problem; the default is symmetric.

 44:       PetscCallA(PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-mat_nonsym',mat_nonsymmetric,flg,ierr))
 45:       PetscCallA(PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-test_scaledMat',testscaledMat,flg,ierr))

 47:       ! Register two stages for separate profiling of the two linear solves.
 48:       ! Use the runtime option -log_view for a printout of performance
 49:       ! statistics at the program's conlusion.

 51:       PetscCallA(PetscLogStageRegister('Original Solve',stages(0),ierr))
 52:       PetscCallA(PetscLogStageRegister('Second Solve',stages(1),ierr))

 54:       ! -------------- Stage 0: Solve Original System ----------------------
 55:       ! Indicate to PETSc profiling that we're beginning the first stage

 57:       PetscCallA(PetscLogStagePush(stages(0),ierr))

 59:       ! Create parallel matrix, specifying only its global dimensions.
 60:       ! When using MatCreate(), the matrix format can be specified at
 61:       ! runtime. Also, the parallel partitioning of the matrix is
 62:       ! determined by PETSc at runtime.

 64:       PetscCallA(MatCreate(PETSC_COMM_WORLD,C,ierr))
 65:       PetscCallA(MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n,ierr))
 66:       PetscCallA(MatSetFromOptions(C,ierr))
 67:       PetscCallA(MatSetUp(C,ierr))

 69:       ! Currently, all PETSc parallel matrix formats are partitioned by
 70:       ! contiguous chunks of rows across the processors.  Determine which
 71:       ! rows of the matrix are locally owned.

 73:       PetscCallA(MatGetOwnershipRange(C,Istart,Iend,ierr))

 75:       ! Set matrix entries matrix in parallel.
 76:       ! - Each processor needs to insert only elements that it owns
 77:       ! locally (but any non-local elements will be sent to the
 78:       ! appropriate processor during matrix assembly).
 79:       !- Always specify global row and columns of matrix entries.

 81:       intitializeC: do Ii=Istart,Iend-1
 82:           v =-1.0; i = Ii/n; j = Ii - i*n
 83:           if (i>0) then
 84:             JJ = Ii - n
 85:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
 86:           endif

 88:           if (i<m-1) then
 89:             JJ = Ii + n
 90:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
 91:           endif

 93:           if (j>0) then
 94:             JJ = Ii - 1
 95:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
 96:           endif

 98:           if (j<n-1) then
 99:             JJ = Ii + 1
100:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
101:           endif

103:           v=4.0
104:           PetscCallA(MatSetValues(C,one,Ii,one,Ii,v,ADD_VALUES,ierr))
105:       enddo intitializeC

107:       ! Make the matrix nonsymmetric if desired
108:       if (mat_nonsymmetric) then
109:         do Ii=Istart,Iend-1
110:           v=-1.5; i=Ii/n
111:           if (i>1) then
112:             JJ=Ii-n-1
113:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
114:           endif
115:         enddo
116:       else
117:         PetscCallA(MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE,ierr))
118:         PetscCallA(MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE,ierr))
119:       endif

121:       ! Assemble matrix, using the 2-step process:
122:       ! MatAssemblyBegin(), MatAssemblyEnd()
123:       ! Computations can be done while messages are in transition
124:       ! by placing code between these two statements.

126:       PetscCallA(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr))
127:       PetscCallA(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr))

129:       ! Create parallel vectors.
130:       ! - When using VecSetSizes(), we specify only the vector's global
131:       !   dimension; the parallel partitioning is determined at runtime.
132:       ! - Note: We form 1 vector from scratch and then duplicate as needed.

134:       PetscCallA( VecCreate(PETSC_COMM_WORLD,u,ierr))
135:       PetscCallA( VecSetSizes(u,PETSC_DECIDE,m*n,ierr))
136:       PetscCallA( VecSetFromOptions(u,ierr))
137:       PetscCallA( VecDuplicate(u,b,ierr))
138:       PetscCallA( VecDuplicate(b,x,ierr))

140:       ! Currently, all parallel PETSc vectors are partitioned by
141:       ! contiguous chunks across the processors.  Determine which
142:       ! range of entries are locally owned.

144:       PetscCallA( VecGetOwnershipRange(x,low,high,ierr))

146:       !Set elements within the exact solution vector in parallel.
147:       ! - Each processor needs to insert only elements that it owns
148:       ! locally (but any non-local entries will be sent to the
149:       ! appropriate processor during vector assembly).
150:       ! - Always specify global locations of vector entries.

152:       PetscCallA(VecGetLocalSize(x,ldim,ierr))
153:       do i=0,ldim-1
154:         iglobal = i + low
155:         v = real(i + 100*rank)
156:         PetscCallA(VecSetValues(u,one,iglobal,v,INSERT_VALUES,ierr))
157:       enddo

159:       ! Assemble vector, using the 2-step process:
160:       ! VecAssemblyBegin(), VecAssemblyEnd()
161:       ! Computations can be done while messages are in transition,
162:       ! by placing code between these two statements.
163:       PetscCallA( VecAssemblyBegin(u,ierr))
164:       PetscCallA( VecAssemblyEnd(u,ierr))

166:       ! Compute right-hand-side vector

168:       PetscCallA( MatMult(C,u,b,ierr))

170:       ! Create linear solver context

172:       PetscCallA( KSPCreate(PETSC_COMM_WORLD,ksp,ierr))

174:       ! Set operators. Here the matrix that defines the linear system
175:       ! also serves as the preconditioning matrix.

177:       PetscCallA( KSPSetOperators(ksp,C,C,ierr))

179:       ! Set runtime options (e.g., -ksp_type <type> -pc_type <type>)

181:       PetscCallA( KSPSetFromOptions(ksp,ierr))

183:       ! Solve linear system.  Here we explicitly call KSPSetUp() for more
184:       ! detailed performance monitoring of certain preconditioners, such
185:       ! as ICC and ILU.  This call is optional, as KSPSetUp() will
186:       ! automatically be called within KSPSolve() if it hasn't been
187:       ! called already.

189:       PetscCallA( KSPSetUp(ksp,ierr))

191:       ! Do not do this in application code, use -ksp_gmres_modifiedgramschmidt or -ksp_gmres_modifiedgramschmidt
192:       if (orthog .eq. 1) then
193:          PetscCallA(KSPGMRESSetOrthogonalization(ksp,KSPGMRESModifiedGramSchmidtOrthogonalization,ierr))
194:       else if (orthog .eq. 2) then
195:          PetscCallA(KSPGMRESSetOrthogonalization(ksp,KSPGMRESClassicalGramSchmidtOrthogonalization,ierr))
196:       endif

198:       PetscCallA( KSPSolve(ksp,b,x,ierr))

200:       ! Check the residual
201:       PetscCallA(VecAXPY(x,myNone,u,ierr))
202:       PetscCallA(VecNorm(x,NORM_2,norm,ierr))
203:       PetscCallA(VecNorm(b,NORM_2,bnorm,ierr))

205:       PetscCallA(KSPGetIterationNumber(ksp,its,ierr))
206:       if (.not. testscaledMat .or. norm/bnorm > PETSC_SMALL) then
207:         write(outputString,'(a,f11.9,a,i2.2,a)') 'Relative norm of residual ',norm/bnorm,', Iterations ',its,'\n'
208:         PetscCallA(PetscPrintf(PETSC_COMM_WORLD,outputString,ierr))
209:       endif

211:       ! -------------- Stage 1: Solve Second System ----------------------

213:       ! Solve another linear system with the same method.  We reuse the KSP
214:       ! context, matrix and vector data structures, and hence save the
215:       ! overhead of creating new ones.

217:       ! Indicate to PETSc profiling that we're concluding the first
218:       ! stage with PetscLogStagePop(), and beginning the second stage with
219:       ! PetscLogStagePush().

221:       PetscCallA(PetscLogStagePop(ierr))
222:       PetscCallA(PetscLogStagePush(stages(1),ierr))

224:       ! Initialize all matrix entries to zero.  MatZeroEntries() retains the
225:       ! nonzero structure of the matrix for sparse formats.

227:       PetscCallA(MatZeroEntries(C,ierr))

229:       ! Assemble matrix again.  Note that we retain the same matrix data
230:       ! structure and the same nonzero pattern; we just change the values
231:       ! of the matrix entries.

233:       do i=0,m-1
234:         do j=2*rank,2*rank+1
235:           v =-1.0; Ii=j + n*i
236:           if (i>0) then
237:             JJ = Ii - n
238:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
239:           endif

241:           if (i<m-1) then
242:             JJ = Ii + n
243:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
244:           endif

246:           if (j>0) then
247:             JJ = Ii - 1
248:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
249:           endif

251:           if (j<n-1) then
252:             JJ = Ii + 1
253:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
254:           endif

256:           v=6.0
257:           PetscCallA(MatSetValues(C,one,Ii,one,Ii,v,ADD_VALUES,ierr))
258:         enddo
259:       enddo

261:       ! Make the matrix nonsymmetric if desired

263:       if (mat_nonsymmetric) then
264:         do Ii=Istart,Iend-1
265:           v=-1.5;  i=Ii/n
266:           if (i>1) then
267:             JJ=Ii-n-1
268:             PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,ADD_VALUES,ierr))
269:           endif
270:         enddo
271:       endif

273:       ! Assemble matrix, using the 2-step process:
274:       ! MatAssemblyBegin(), MatAssemblyEnd()
275:       ! Computations can be done while messages are in transition
276:       ! by placing code between these two statements.

278:       PetscCallA(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr))
279:       PetscCallA(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr))

281:       if (testscaledMat) then
282:         ! Scale a(0,0) and a(M-1,M-1)

284:         if (rank /= 0) then
285:           v = 6.0*0.00001; Ii = 0; JJ = 0
286:           PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,INSERT_VALUES,ierr))
287:         elseif (rank == size -1) then
288:           v = 6.0*0.00001; Ii = m*n-1; JJ = m*n-1
289:           PetscCallA(MatSetValues(C,one,Ii,one,JJ,v,INSERT_VALUES,ierr))

291:         endif

293:         PetscCallA(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY,ierr))
294:         PetscCallA(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY,ierr))

296:         ! Compute a new right-hand-side vector

298:         PetscCallA( VecDestroy(u,ierr))
299:         PetscCallA( VecCreate(PETSC_COMM_WORLD,u,ierr))
300:         PetscCallA( VecSetSizes(u,PETSC_DECIDE,m*n,ierr))
301:         PetscCallA( VecSetFromOptions(u,ierr))

303:         PetscCallA( PetscRandomCreate(PETSC_COMM_WORLD,rctx,ierr))
304:         PetscCallA( PetscRandomSetFromOptions(rctx,ierr))
305:         PetscCallA( VecSetRandom(u,rctx,ierr))
306:         PetscCallA( PetscRandomDestroy(rctx,ierr))
307:         PetscCallA( VecAssemblyBegin(u,ierr))
308:         PetscCallA( VecAssemblyEnd(u,ierr))

310:       endif

312:       PetscCallA(PetscOptionsGetBool(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-test_newMat',testnewC,flg,ierr))

314:       if (testnewC) then
315:       ! User may use a new matrix C with same nonzero pattern, e.g.
316:       ! ex5 -ksp_monitor -mat_type sbaij -pc_type cholesky -pc_factor_mat_solver_type mumps -test_newMat

318:         PetscCallA( MatDuplicate(C,MAT_COPY_VALUES,Ctmp,ierr))
319:         PetscCallA( MatDestroy(C,ierr))
320:         PetscCallA( MatDuplicate(Ctmp,MAT_COPY_VALUES,C,ierr))
321:         PetscCallA( MatDestroy(Ctmp,ierr))
322:       endif

324:       PetscCallA(MatMult(C,u,b,ierr))

326:       ! Set operators. Here the matrix that defines the linear system
327:       ! also serves as the preconditioning matrix.

329:       PetscCallA(KSPSetOperators(ksp,C,C,ierr))

331:       ! Solve linear system
332:       PetscCallA( KSPSetUp(ksp,ierr))
333:       PetscCallA( KSPSolve(ksp,b,x,ierr))
334:       ! Check the residual

336:       PetscCallA(VecAXPY(x,myNone,u,ierr))
337:       PetscCallA(VecNorm(x,NORM_2,norm,ierr))
338:       PetscCallA(VecNorm(b,NORM_2,bnorm,ierr))
339:       PetscCallA(KSPGetIterationNumber(ksp,its,ierr))
340:       if (.not. testscaledMat .or. norm/bnorm > PETSC_SMALL) then
341:         write(outputString,'(a,f11.9,a,i2.2,a)') 'Relative norm of residual ',norm/bnorm,', Iterations ',its,'\n'
342:         PetscCallA(PetscPrintf(PETSC_COMM_WORLD,outputString,ierr))
343:       endif

345:       ! Free work space.  All PETSc objects should be destroyed when they
346:       ! are no longer needed.

348:       PetscCallA( KSPDestroy(ksp,ierr))
349:       PetscCallA( VecDestroy(u,ierr))
350:       PetscCallA( VecDestroy(x,ierr))
351:       PetscCallA( VecDestroy(b,ierr))
352:       PetscCallA( MatDestroy(C,ierr))

354:       ! Indicate to PETSc profiling that we're concluding the second stage

356:       PetscCallA(PetscLogStagePop(ierr))
357:       PetscCallA(PetscFinalize(ierr))
358: end program main

360: !/*TEST
361: !
362: !   test:
363: !      args: -pc_type jacobi -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always
364: !
365: !   test:
366: !      suffix: 2
367: !      nsize: 2
368: !      args: -pc_type jacobi -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always -ksp_rtol .000001
369: !
370: !   test:
371: !      suffix: 5
372: !      nsize: 2
373: !      args: -ksp_gmres_cgs_refinement_type refine_always -ksp_monitor draw::draw_lg -ksp_monitor_true_residual draw::draw_lg
374: !      output_file: output/ex5f_5.out
375: !
376: !   test:
377: !      suffix: asm
378: !      nsize: 4
379: !      args: -pc_type asm
380: !      output_file: output/ex5f_asm.out
381: !
382: !   test:
383: !      suffix: asm_baij
384: !      nsize: 4
385: !      args: -pc_type asm -mat_type baij
386: !      output_file: output/ex5f_asm.out
387: !
388: !   test:
389: !      suffix: redundant_0
390: !      args: -m 1000 -pc_type redundant -pc_redundant_number 1 -redundant_ksp_type gmres -redundant_pc_type jacobi
391: !
392: !   test:
393: !      suffix: redundant_1
394: !      nsize: 5
395: !      args: -pc_type redundant -pc_redundant_number 1 -redundant_ksp_type gmres -redundant_pc_type jacobi
396: !
397: !   test:
398: !      suffix: redundant_2
399: !      nsize: 5
400: !      args: -pc_type redundant -pc_redundant_number 3 -redundant_ksp_type gmres -redundant_pc_type jacobi
401: !
402: !   test:
403: !      suffix: redundant_3
404: !      nsize: 5
405: !      args: -pc_type redundant -pc_redundant_number 5 -redundant_ksp_type gmres -redundant_pc_type jacobi
406: !
407: !   test:
408: !      suffix: redundant_4
409: !      nsize: 5
410: !      args: -pc_type redundant -pc_redundant_number 3 -redundant_ksp_type gmres -redundant_pc_type jacobi -psubcomm_type interlaced
411: !
412: !   test:
413: !      suffix: superlu_dist
414: !      nsize: 15
415: !      requires: superlu_dist
416: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 150 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat
417: !
418: !   test:
419: !      suffix: superlu_dist_2
420: !      nsize: 15
421: !      requires: superlu_dist
422: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 150 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat -mat_superlu_dist_fact SamePattern_SameRowPerm
423: !      output_file: output/ex5f_superlu_dist.out
424: !
425: !   test:
426: !      suffix: superlu_dist_3
427: !      nsize: 15
428: !      requires: superlu_dist
429: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -mat_superlu_dist_equil false -m 500 -mat_superlu_dist_r 3 -mat_superlu_dist_c 5 -test_scaledMat -mat_superlu_dist_fact DOFACT
430: !      output_file: output/ex5f_superlu_dist.out
431: !
432: !   test:
433: !      suffix: superlu_dist_0
434: !      nsize: 1
435: !      requires: superlu_dist
436: !      args: -pc_type lu -pc_factor_mat_solver_type superlu_dist -test_scaledMat
437: !      output_file: output/ex5f_superlu_dist.out
438: !
439: !   test:
440: !      suffix: orthog1
441: !      args: -orthog 1 -ksp_view
442: !
443: !   test:
444: !      suffix: orthog2
445: !      args: -orthog 2 -ksp_view
446: !
447: !TEST*/