Actual source code: ex13f90.F90

  1: #include <petsc/finclude/petscksp.h>
  2: module ex13module
  3:   use petscksp
  4:   type User
  5:     Vec x
  6:     Vec b
  7:     Mat A
  8:     KSP ksp
  9:     PetscInt m
 10:     PetscInt n
 11:   end type User

 13: contains
 14: ! ----------------------------------------------------------------
 15:   subroutine UserInitializeLinearSolver(m, n, userctx, ierr)

 17:     PetscInt m, n
 18:     PetscErrorCode ierr
 19:     type(User) userctx

 21:     common/param/hx2, hy2
 22:     PetscReal hx2, hy2

 24: !  Local variable declararions
 25:     Mat A
 26:     Vec b, x
 27:     KSP ksp
 28:     PetscInt Ntot, five, one

 30: !  Here we assume use of a grid of size m x n, with all points on the
 31: !  interior of the domain, i.e., we do not include the points corresponding
 32: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
 33: !  is [0,1]x[0,1].

 35:     hx2 = (m + 1)*(m + 1)
 36:     hy2 = (n + 1)*(n + 1)
 37:     Ntot = m*n

 39:     five = 5
 40:     one = 1

 42: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

 44:     PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, Ntot, Ntot, five, PETSC_NULL_INTEGER_ARRAY, A, ierr))
 45: !
 46: !  Create vectors. Here we create vectors with no memory allocated.
 47: !  This way, we can use the data structures already in the program
 48: !  by using VecPlaceArray() subroutine at a later stage.
 49: !
 50:     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, one, Ntot, PETSC_NULL_SCALAR_ARRAY, b, ierr))
 51:     PetscCall(VecDuplicate(b, x, ierr))

 53: !  Create linear solver context. This will be used repeatedly for all
 54: !  the linear solves needed.

 56:     PetscCall(KSPCreate(PETSC_COMM_SELF, ksp, ierr))

 58:     userctx%x = x
 59:     userctx%b = b
 60:     userctx%A = A
 61:     userctx%ksp = ksp
 62:     userctx%m = m
 63:     userctx%n = n

 65:   end
 66: ! -----------------------------------------------------------------------

 68: !   Solves -div (rho grad psi) = F using finite differences.
 69: !   rho is a 2-dimensional array of size m by n, stored in Fortran
 70: !   style by columns. userb is a standard one-dimensional array.

 72:   subroutine UserDoLinearSolver(rho, userctx, userb, userx, ierr)

 74:     PetscErrorCode ierr
 75:     type(User) userctx
 76:     PetscScalar rho(*), userb(*), userx(*)

 78:     common/param/hx2, hy2
 79:     PetscReal hx2, hy2

 81:     PC pc
 82:     KSP ksp
 83:     Vec b, x
 84:     Mat A
 85:     PetscInt m, n, one
 86:     PetscInt i, j, II, JJ
 87:     PetscScalar v

 89:     one = 1
 90:     x = userctx%x
 91:     b = userctx%b
 92:     A = userctx%A
 93:     ksp = userctx%ksp
 94:     m = userctx%m
 95:     n = userctx%n

 97: !  This is not the most efficient way of generating the matrix,
 98: !  but let's not worry about it.  We should have separate code for
 99: !  the four corners, each edge and then the interior. Then we won't
100: !  have the slow if-tests inside the loop.
101: !
102: !  Compute the operator
103: !          -div rho grad
104: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
105: !  is assumed to be given on the same grid as the finite difference
106: !  stencil is applied.  For a staggered grid, one would have to change
107: !  things slightly.

109:     II = 0
110:     do j = 1, n
111:       do i = 1, m
112:         if (j > 1) then
113:           JJ = II - m
114:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hy2
115:           PetscCall(MatSetValues(A, one, [II], one, [JJ], [v], INSERT_VALUES, ierr))
116:         end if
117:         if (j < n) then
118:           JJ = II + m
119:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hy2
120:           PetscCall(MatSetValues(A, one, [II], one, [JJ], [v], INSERT_VALUES, ierr))
121:         end if
122:         if (i > 1) then
123:           JJ = II - 1
124:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hx2
125:           PetscCall(MatSetValues(A, one, [II], one, [JJ], [v], INSERT_VALUES, ierr))
126:         end if
127:         if (i < m) then
128:           JJ = II + 1
129:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hx2
130:           PetscCall(MatSetValues(A, one, [II], one, [JJ], [v], INSERT_VALUES, ierr))
131:         end if
132:         v = 2*rho(II + 1)*(hx2 + hy2)
133:         PetscCall(MatSetValues(A, one, [II], one, [II], [v], INSERT_VALUES, ierr))
134:         II = II + 1
135:       end do
136:     end do
137: !
138: !     Assemble matrix
139: !
140:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY, ierr))
141:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY, ierr))

143: !
144: !     Set operators. Here the matrix that defines the linear system
145: !     also serves as the matrix from which the preconditioner is constructed. Since all the matrices
146: !     will have the same nonzero pattern here, we indicate this so the
147: !     linear solvers can take advantage of this.
148: !
149:     PetscCall(KSPSetOperators(ksp, A, A, ierr))

151: !
152: !     Set linear solver defaults for this problem (optional).
153: !     - Here we set it to use direct LU factorization for the solution
154: !
155:     PetscCall(KSPGetPC(ksp, pc, ierr))
156:     PetscCall(PCSetType(pc, PCLU, ierr))

158: !
159: !     Set runtime options, e.g.,
160: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
161: !     These options will override those specified above as long as
162: !     KSPSetFromOptions() is called _after_ any other customization
163: !     routines.
164: !
165: !     Run the program with the option -help to see all the possible
166: !     linear solver options.
167: !
168:     PetscCall(KSPSetFromOptions(ksp, ierr))

170: !
171: !     This allows the PETSc linear solvers to compute the solution
172: !     directly in the user's array rather than in the PETSc vector.
173: !
174: !     This is essentially a hack and not highly recommend unless you
175: !     are quite comfortable with using PETSc. In general, users should
176: !     write their entire application using PETSc vectors rather than
177: !     arrays.
178: !
179:     PetscCall(VecPlaceArray(x, userx, ierr))
180:     PetscCall(VecPlaceArray(b, userb, ierr))

182: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
183: !                      Solve the linear system
184: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

186:     PetscCall(KSPSolve(ksp, b, x, ierr))

188:     PetscCall(VecResetArray(x, ierr))
189:     PetscCall(VecResetArray(b, ierr))
190:   end

192: ! ------------------------------------------------------------------------

194:   subroutine UserFinalizeLinearSolver(userctx, ierr)

196:     PetscErrorCode ierr
197:     type(User) userctx

199: !
200: !     We are all done and don't need to solve any more linear systems, so
201: !     we free the work space.  All PETSc objects should be destroyed when
202: !     they are no longer needed.
203: !
204:     PetscCall(VecDestroy(userctx%x, ierr))
205:     PetscCall(VecDestroy(userctx%b, ierr))
206:     PetscCall(MatDestroy(userctx%A, ierr))
207:     PetscCall(KSPDestroy(userctx%ksp, ierr))
208:   end

210: end module

212: program main
213:   use ex13module
214:   implicit none

216: !    User-defined context that contains all the data structures used
217: !    in the linear solution process.

219: !   Vec    x,b      /* solution vector, right-hand side vector and work vector */
220: !   Mat    A        /* sparse matrix */
221: !   KSP   ksp     /* linear solver context */
222: !   int    m,n      /* grid dimensions */
223: !
224: !   Since we cannot store Scalars and integers in the same context,
225: !   we store the integers/pointers in the user-defined context, and
226: !   the scalar values are carried in the common block.
227: !   The scalar values in this simplistic example could easily
228: !   be recalculated in each routine, where they are needed.
229: !
230: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

232: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
233: !                   Variable declarations
234: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

236:   PetscScalar hx, hy, x, y
237:   type(User) userctx
238:   PetscErrorCode ierr
239:   PetscInt m, n, t, tmax, i, j
240:   PetscBool flg
241:   PetscMPIInt size
242:   PetscReal enorm
243:   PetscScalar cnorm
244:   PetscScalar, allocatable :: userx(:, :)
245:   PetscScalar, allocatable :: userb(:, :)
246:   PetscScalar, allocatable :: solution(:, :)
247:   PetscScalar, allocatable :: rho(:, :)

249:   PetscReal hx2, hy2
250:   common/param/hx2, hy2

252:   tmax = 2
253:   m = 6
254:   n = 7

256: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
257: !                 Beginning of program
258: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

260:   PetscCallA(PetscInitialize(ierr))
261:   PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
262:   PetscCheckA(size == 1, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, 'This is a uniprocessor example only')

264: !  The next two lines are for testing only; these allow the user to
265: !  decide the grid size at runtime.

267:   PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS, PETSC_NULL_CHARACTER, '-m', m, flg, ierr))
268:   PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS, PETSC_NULL_CHARACTER, '-n', n, flg, ierr))

270: !  Create the empty sparse matrix and linear solver data structures

272:   PetscCallA(UserInitializeLinearSolver(m, n, userctx, ierr))

274: !  Allocate arrays to hold the solution to the linear system.  This
275: !  approach is not normally done in PETSc programs, but in this case,
276: !  since we are calling these routines from a non-PETSc program, we
277: !  would like to reuse the data structures from another code. So in
278: !  the context of a larger application these would be provided by
279: !  other (non-PETSc) parts of the application code.

281:   allocate (userx(m, n), userb(m, n), solution(m, n))

283: !  Allocate an array to hold the coefficients in the elliptic operator

285:   allocate (rho(m, n))

287: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
288: !  right-hand side b and the solution with a known problem for testing.

290:   hx = 1.0/real(m + 1)
291:   hy = 1.0/real(n + 1)
292:   y = hy
293:   do j = 1, n
294:     x = hx
295:     do i = 1, m
296:       rho(i, j) = x
297:       solution(i, j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
298:       userb(i, j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y) + 8*PETSC_PI*PETSC_PI*x*sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
299:       x = x + hx
300:     end do
301:     y = y + hy
302:   end do

304: !  Loop over a bunch of timesteps, setting up and solver the linear
305: !  system for each time-step.
306: !  Note that this loop is somewhat artificial. It is intended to
307: !  demonstrate how one may reuse the linear solvers in each time-step.

309:   do t = 1, tmax
310:     PetscCallA(UserDoLinearSolver(rho, userctx, userb, userx, ierr))

312: !        Compute error: Note that this could (and usually should) all be done
313: !        using the PETSc vector operations. Here we demonstrate using more
314: !        standard programming practices to show how they may be mixed with
315: !        PETSc.
316:     cnorm = 0.0
317:     do j = 1, n
318:       do i = 1, m
319:         cnorm = cnorm + PetscConj(solution(i, j) - userx(i, j))*(solution(i, j) - userx(i, j))
320:       end do
321:     end do
322:     enorm = PetscRealPart(cnorm*hx*hy)
323:     write (6, 115) m, n, enorm
324: 115 format('m = ', I2, ' n = ', I2, ' error norm = ', 1PE11.4)
325:   end do

327: !  We are finished solving linear systems, so we clean up the
328: !  data structures.

330:   deallocate (userx, userb, solution, rho)

332:   PetscCallA(UserFinalizeLinearSolver(userctx, ierr))
333:   PetscCallA(PetscFinalize(ierr))
334: end
335: !
336: !/*TEST
337: !
338: !   test:
339: !      args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
340: !      output_file: output/ex13f90_1.out
341: !
342: !TEST*/