Actual source code: ex13f90.F90

  1: #include <petsc/finclude/petscksp.h>
  2: module ex13module
  3:   use petscksp

  5:   implicit none
  6:   PetscReal :: hx2, hy2
  7:   type User
  8:     Vec x
  9:     Vec b
 10:     Mat A
 11:     KSP ksp
 12:     PetscInt m
 13:     PetscInt n
 14:   end type User

 16: contains
 17: ! ----------------------------------------------------------------
 18:   subroutine UserInitializeLinearSolver(m, n, userctx, ierr)

 20:     PetscInt, intent(in) :: m, n
 21:     PetscErrorCode, intent(out) :: ierr
 22:     type(User) userctx

 24: !  Local variable declararions
 25:     Mat A
 26:     Vec b, x
 27:     KSP ksp
 28:     PetscInt Ntot

 30: !  Here we assume use of a grid of size m x n, with all points on the
 31: !  interior of the domain, i.e., we do not include the points corresponding
 32: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
 33: !  is [0,1]x[0,1].

 35:     hx2 = (m + 1)**2
 36:     hy2 = (n + 1)**2
 37:     Ntot = m*n

 39: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

 41:     PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, Ntot, Ntot, 5_PETSC_INT_KIND, PETSC_NULL_INTEGER_ARRAY, A, ierr))
 42: !
 43: !  Create vectors. Here we create vectors with no memory allocated.
 44: !  This way, we can use the data structures already in the program
 45: !  by using VecPlaceArray() subroutine at a later stage.
 46: !
 47:     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1_PETSC_INT_KIND, Ntot, PETSC_NULL_SCALAR_ARRAY, b, ierr))
 48:     PetscCall(VecDuplicate(b, x, ierr))

 50: !  Create linear solver context. This will be used repeatedly for all
 51: !  the linear solves needed.

 53:     PetscCall(KSPCreate(PETSC_COMM_SELF, ksp, ierr))

 55:     userctx%x = x
 56:     userctx%b = b
 57:     userctx%A = A
 58:     userctx%ksp = ksp
 59:     userctx%m = m
 60:     userctx%n = n

 62:   end
 63: ! -----------------------------------------------------------------------

 65: !   Solves -div (rho grad psi) = F using finite differences.
 66: !   rho is a 2-dimensional array of size m by n, stored in Fortran
 67: !   style by columns. userb is a standard one-dimensional array.

 69:   subroutine UserDoLinearSolver(rho, userctx, userb, userx, ierr)

 71:     PetscErrorCode, intent(out) :: ierr
 72:     type(User) userctx
 73:     PetscScalar rho(*), userb(*), userx(*)

 75:     PC pc
 76:     KSP ksp
 77:     Vec b, x
 78:     Mat A
 79:     PetscInt m, n
 80:     PetscInt i, j, II, JJ
 81:     PetscScalar v

 83:     x = userctx%x
 84:     b = userctx%b
 85:     A = userctx%A
 86:     ksp = userctx%ksp
 87:     m = userctx%m
 88:     n = userctx%n

 90: !  This is not the most efficient way of generating the matrix,
 91: !  but let's not worry about it.  We should have separate code for
 92: !  the four corners, each edge and then the interior. Then we won't
 93: !  have the slow if-tests inside the loop.
 94: !
 95: !  Compute the operator
 96: !          -div rho grad
 97: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
 98: !  is assumed to be given on the same grid as the finite difference
 99: !  stencil is applied.  For a staggered grid, one would have to change
100: !  things slightly.

102:     II = 0
103:     do j = 1, n
104:       do i = 1, m
105:         if (j > 1) then
106:           JJ = II - m
107:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hy2
108:           PetscCall(MatSetValues(A, 1_PETSC_INT_KIND, [II], 1_PETSC_INT_KIND, [JJ], [v], INSERT_VALUES, ierr))
109:         end if
110:         if (j < n) then
111:           JJ = II + m
112:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hy2
113:           PetscCall(MatSetValues(A, 1_PETSC_INT_KIND, [II], 1_PETSC_INT_KIND, [JJ], [v], INSERT_VALUES, ierr))
114:         end if
115:         if (i > 1) then
116:           JJ = II - 1
117:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hx2
118:           PetscCall(MatSetValues(A, 1_PETSC_INT_KIND, [II], 1_PETSC_INT_KIND, [JJ], [v], INSERT_VALUES, ierr))
119:         end if
120:         if (i < m) then
121:           JJ = II + 1
122:           v = -0.5*(rho(II + 1) + rho(JJ + 1))*hx2
123:           PetscCall(MatSetValues(A, 1_PETSC_INT_KIND, [II], 1_PETSC_INT_KIND, [JJ], [v], INSERT_VALUES, ierr))
124:         end if
125:         v = 2*rho(II + 1)*(hx2 + hy2)
126:         PetscCall(MatSetValues(A, 1_PETSC_INT_KIND, [II], 1_PETSC_INT_KIND, [II], [v], INSERT_VALUES, ierr))
127:         II = II + 1
128:       end do
129:     end do
130: !
131: !     Assemble matrix
132: !
133:     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY, ierr))
134:     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY, ierr))

136: !
137: !     Set operators. Here the matrix that defines the linear system
138: !     also serves as the matrix from which the preconditioner is constructed. Since all the matrices
139: !     will have the same nonzero pattern here, we indicate this so the
140: !     linear solvers can take advantage of this.
141: !
142:     PetscCall(KSPSetOperators(ksp, A, A, ierr))

144: !
145: !     Set linear solver defaults for this problem (optional).
146: !     - Here we set it to use direct LU factorization for the solution
147: !
148:     PetscCall(KSPGetPC(ksp, pc, ierr))
149:     PetscCall(PCSetType(pc, PCLU, ierr))

151: !
152: !     Set runtime options, e.g.,
153: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
154: !     These options will override those specified above as long as
155: !     KSPSetFromOptions() is called _after_ any other customization
156: !     routines.
157: !
158: !     Run the program with the option -help to see all the possible
159: !     linear solver options.
160: !
161:     PetscCall(KSPSetFromOptions(ksp, ierr))

163: !
164: !     This allows the PETSc linear solvers to compute the solution
165: !     directly in the user's array rather than in the PETSc vector.
166: !
167: !     This is essentially a hack and not highly recommend unless you
168: !     are quite comfortable with using PETSc. In general, users should
169: !     write their entire application using PETSc vectors rather than
170: !     arrays.
171: !
172:     PetscCall(VecPlaceArray(x, userx, ierr))
173:     PetscCall(VecPlaceArray(b, userb, ierr))

175: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
176: !                      Solve the linear system
177: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

179:     PetscCall(KSPSolve(ksp, b, x, ierr))

181:     PetscCall(VecResetArray(x, ierr))
182:     PetscCall(VecResetArray(b, ierr))
183:   end

185: ! ------------------------------------------------------------------------

187:   subroutine UserFinalizeLinearSolver(userctx, ierr)

189:     PetscErrorCode, intent(out) :: ierr
190:     type(User) userctx

192: !
193: !     We are all done and don't need to solve any more linear systems, so
194: !     we free the work space.  All PETSc objects should be destroyed when
195: !     they are no longer needed.
196: !
197:     PetscCall(VecDestroy(userctx%x, ierr))
198:     PetscCall(VecDestroy(userctx%b, ierr))
199:     PetscCall(MatDestroy(userctx%A, ierr))
200:     PetscCall(KSPDestroy(userctx%ksp, ierr))
201:   end

203: end module ex13module

205: program main
206:   use ex13module
207: !    User-defined context that contains all the data structures used
208: !    in the linear solution process.

210: !   Vec    x,b      /* solution vector, right-hand side vector and work vector */
211: !   Mat    A        /* sparse matrix */
212: !   KSP   ksp     /* linear solver context */
213: !   int    m,n      /* grid dimensions */
214: !
215: !   Since we cannot store Scalars and integers in the same context,
216: !   we store the integers/pointers in the user-defined context, and
217: !   the scalar values are carried in the common block.
218: !   The scalar values in this simplistic example could easily
219: !   be recalculated in each routine, where they are needed.
220: !
221: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

223:   implicit none
224:   PetscScalar hx, hy, x, y
225:   type(User) userctx
226:   PetscErrorCode ierr
227:   PetscInt m, n, t, i, j
228:   PetscInt, parameter :: tmax = 2
229:   PetscBool flg
230:   PetscMPIInt size
231:   PetscReal enorm
232:   PetscScalar cnorm
233:   PetscScalar, allocatable :: userx(:, :)
234:   PetscScalar, allocatable :: userb(:, :)
235:   PetscScalar, allocatable :: solution(:, :)
236:   PetscScalar, allocatable :: rho(:, :)

238: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
239: !                 Beginning of program
240: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

242:   PetscCallA(PetscInitialize(ierr))
243:   PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
244:   PetscCheckA(size == 1, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, 'This is a uniprocessor example only')

246: !  The next two lines are for testing only; these allow the user to
247: !  decide the grid size at runtime.
248:   m = 6
249:   PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS, PETSC_NULL_CHARACTER, '-m', m, flg, ierr))
250:   n = 7
251:   PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS, PETSC_NULL_CHARACTER, '-n', n, flg, ierr))

253: !  Create the empty sparse matrix and linear solver data structures

255:   PetscCallA(UserInitializeLinearSolver(m, n, userctx, ierr))

257: !  Allocate arrays to hold the solution to the linear system.  This
258: !  approach is not normally done in PETSc programs, but in this case,
259: !  since we are calling these routines from a non-PETSc program, we
260: !  would like to reuse the data structures from another code. So in
261: !  the context of a larger application these would be provided by
262: !  other (non-PETSc) parts of the application code.

264:   allocate (userx(m, n), userb(m, n), solution(m, n))

266: !  Allocate an array to hold the coefficients in the elliptic operator

268:   allocate (rho(m, n))

270: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
271: !  right-hand side b and the solution with a known problem for testing.

273:   hx = 1.0/real(m + 1)
274:   hy = 1.0/real(n + 1)
275:   y = hy
276:   do j = 1, n
277:     x = hx
278:     do i = 1, m
279:       rho(i, j) = x
280:       solution(i, j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
281:       userb(i, j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y) + 8*PETSC_PI*PETSC_PI*x*sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
282:       x = x + hx
283:     end do
284:     y = y + hy
285:   end do

287: !  Loop over a bunch of timesteps, setting up and solver the linear
288: !  system for each time-step.
289: !  Note that this loop is somewhat artificial. It is intended to
290: !  demonstrate how one may reuse the linear solvers in each time-step.

292:   do t = 1, tmax
293:     PetscCallA(UserDoLinearSolver(rho, userctx, userb, userx, ierr))

295: !   Compute error: Note that this could (and usually should) all be done
296: !   using the PETSc vector operations. Here we demonstrate using more
297: !   standard programming practices to show how they may be mixed with
298: !   PETSc.
299:     cnorm = 0.0
300:     do j = 1, n
301:       do i = 1, m
302:         cnorm = cnorm + PetscConj(solution(i, j) - userx(i, j))*(solution(i, j) - userx(i, j))
303:       end do
304:     end do
305:     enorm = PetscRealPart(cnorm*hx*hy)
306:     write (6, 115) m, n, enorm
307: 115 format('m = ', I2, ' n = ', I2, ' error norm = ', 1PE11.4)
308:   end do

310: !  We are finished solving linear systems, so we clean up the
311: !  data structures.

313:   deallocate (userx, userb, solution, rho)

315:   PetscCallA(UserFinalizeLinearSolver(userctx, ierr))
316:   PetscCallA(PetscFinalize(ierr))
317: end
318: !
319: !/*TEST
320: !
321: !   test:
322: !      args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
323: !      output_file: output/ex13f90_1.out
324: !
325: !TEST*/