Actual source code: ex13f90.F90

  1: !
  2: !
  3: ! -----------------------------------------------------------------------

  5:       module UserModule
  6: #include <petsc/finclude/petscksp.h>
  7:         use petscksp
  8:         type User
  9:           Vec x
 10:           Vec b
 11:           Mat A
 12:           KSP ksp
 13:           PetscInt m
 14:           PetscInt n
 15:         end type User
 16:       end module

 18:       program main
 19:       use UserModule
 20:       implicit none

 22: !    User-defined context that contains all the data structures used
 23: !    in the linear solution process.

 25: !   Vec    x,b      /* solution vector, right hand side vector and work vector */
 26: !   Mat    A        /* sparse matrix */
 27: !   KSP   ksp     /* linear solver context */
 28: !   int    m,n      /* grid dimensions */
 29: !
 30: !   Since we cannot store Scalars and integers in the same context,
 31: !   we store the integers/pointers in the user-defined context, and
 32: !   the scalar values are carried in the common block.
 33: !   The scalar values in this simplistic example could easily
 34: !   be recalculated in each routine, where they are needed.
 35: !
 36: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

 38: !  Note: Any user-defined Fortran routines MUST be declared as external.

 40:       external UserInitializeLinearSolver
 41:       external UserFinalizeLinearSolver
 42:       external UserDoLinearSolver

 44: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 45: !                   Variable declarations
 46: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 48:       PetscScalar  hx,hy,x,y
 49:       type(User) userctx
 50:       PetscErrorCode ierr
 51:       PetscInt m,n,t,tmax,i,j
 52:       PetscBool  flg
 53:       PetscMPIInt size
 54:       PetscReal  enorm
 55:       PetscScalar cnorm
 56:       PetscScalar,ALLOCATABLE :: userx(:,:)
 57:       PetscScalar,ALLOCATABLE :: userb(:,:)
 58:       PetscScalar,ALLOCATABLE :: solution(:,:)
 59:       PetscScalar,ALLOCATABLE :: rho(:,:)

 61:       PetscReal hx2,hy2
 62:       common /param/ hx2,hy2

 64:       tmax = 2
 65:       m = 6
 66:       n = 7

 68: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 69: !                 Beginning of program
 70: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 72:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 73:       if (ierr .ne. 0) then
 74:         print*,'Unable to initialize PETSc'
 75:         stop
 76:       endif
 77:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 78:       if (size .ne. 1) then; SETERRA(PETSC_COMM_WORLD,PETSC_ERR_WRONG_MPI_SIZE,'This is a uniprocessor example only'); endif

 80: !  The next two lines are for testing only; these allow the user to
 81: !  decide the grid size at runtime.

 83:       call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-m',m,flg,ierr);CHKERRA(ierr)
 84:       call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-n',n,flg,ierr);CHKERRA(ierr)

 86: !  Create the empty sparse matrix and linear solver data structures

 88:       call UserInitializeLinearSolver(m,n,userctx,ierr);CHKERRA(ierr)

 90: !  Allocate arrays to hold the solution to the linear system.  This
 91: !  approach is not normally done in PETSc programs, but in this case,
 92: !  since we are calling these routines from a non-PETSc program, we
 93: !  would like to reuse the data structures from another code. So in
 94: !  the context of a larger application these would be provided by
 95: !  other (non-PETSc) parts of the application code.

 97:       ALLOCATE (userx(m,n),userb(m,n),solution(m,n))

 99: !  Allocate an array to hold the coefficients in the elliptic operator

101:        ALLOCATE (rho(m,n))

103: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
104: !  right-hand-side b[] and the solution with a known problem for testing.

106:       hx = 1.0/real(m+1)
107:       hy = 1.0/real(n+1)
108:       y  = hy
109:       do 20 j=1,n
110:          x = hx
111:          do 10 i=1,m
112:             rho(i,j)      = x
113:             solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
114:             userb(i,j)    = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y) +                                &
115:      &                      8*PETSC_PI*PETSC_PI*x*sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
116:            x = x + hx
117:  10      continue
118:          y = y + hy
119:  20   continue

121: !  Loop over a bunch of timesteps, setting up and solver the linear
122: !  system for each time-step.
123: !  Note that this loop is somewhat artificial. It is intended to
124: !  demonstrate how one may reuse the linear solvers in each time-step.

126:       do 100 t=1,tmax
127:          call UserDoLinearSolver(rho,userctx,userb,userx,ierr);CHKERRA(ierr)

129: !        Compute error: Note that this could (and usually should) all be done
130: !        using the PETSc vector operations. Here we demonstrate using more
131: !        standard programming practices to show how they may be mixed with
132: !        PETSc.
133:          cnorm = 0.0
134:          do 90 j=1,n
135:             do 80 i=1,m
136:               cnorm = cnorm + PetscConj(solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
137:  80         continue
138:  90      continue
139:          enorm =  PetscRealPart(cnorm*hx*hy)
140:          write(6,115) m,n,enorm
141:  115     format ('m = ',I2,' n = ',I2,' error norm = ',1PE11.4)
142:  100  continue

144: !  We are finished solving linear systems, so we clean up the
145: !  data structures.

147:       DEALLOCATE (userx,userb,solution,rho)

149:       call UserFinalizeLinearSolver(userctx,ierr);CHKERRA(ierr)
150:       call PetscFinalize(ierr)
151:       end

153: ! ----------------------------------------------------------------
154:       subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
155:       use UserModule
156:       implicit none

158:       PetscInt m,n
159:       PetscErrorCode ierr
160:       type(User) userctx

162:       common /param/ hx2,hy2
163:       PetscReal hx2,hy2

165: !  Local variable declararions
166:       Mat     A
167:       Vec     b,x
168:       KSP    ksp
169:       PetscInt Ntot,five,one

171: !  Here we assume use of a grid of size m x n, with all points on the
172: !  interior of the domain, i.e., we do not include the points corresponding
173: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
174: !  is [0,1]x[0,1].

176:       hx2 = (m+1)*(m+1)
177:       hy2 = (n+1)*(n+1)
178:       Ntot = m*n

180:       five = 5
181:       one = 1

183: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

185:       call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,five,PETSC_NULL_INTEGER,A,ierr);CHKERRQ(ierr)
186: !
187: !  Create vectors. Here we create vectors with no memory allocated.
188: !  This way, we can use the data structures already in the program
189: !  by using VecPlaceArray() subroutine at a later stage.
190: !
191:       call VecCreateSeqWithArray(PETSC_COMM_SELF,one,Ntot,PETSC_NULL_SCALAR,b,ierr);CHKERRQ(ierr)
192:       call VecDuplicate(b,x,ierr);CHKERRQ(ierr)

194: !  Create linear solver context. This will be used repeatedly for all
195: !  the linear solves needed.

197:       call KSPCreate(PETSC_COMM_SELF,ksp,ierr);CHKERRQ(ierr)

199:       userctx%x = x
200:       userctx%b = b
201:       userctx%A = A
202:       userctx%ksp = ksp
203:       userctx%m = m
204:       userctx%n = n

206:       return
207:       end
208: ! -----------------------------------------------------------------------

210: !   Solves -div (rho grad psi) = F using finite differences.
211: !   rho is a 2-dimensional array of size m by n, stored in Fortran
212: !   style by columns. userb is a standard one-dimensional array.

214:       subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
215:       use UserModule
216:       implicit none

218:       PetscErrorCode ierr
219:       type(User) userctx
220:       PetscScalar rho(*),userb(*),userx(*)

222:       common /param/ hx2,hy2
223:       PetscReal hx2,hy2

225:       PC   pc
226:       KSP ksp
227:       Vec  b,x
228:       Mat  A
229:       PetscInt m,n,one
230:       PetscInt i,j,II,JJ
231:       PetscScalar  v

233:       one  = 1
234:       x    = userctx%x
235:       b    = userctx%b
236:       A    = userctx%A
237:       ksp  = userctx%ksp
238:       m    = userctx%m
239:       n    = userctx%n

241: !  This is not the most efficient way of generating the matrix,
242: !  but let's not worry about it.  We should have separate code for
243: !  the four corners, each edge and then the interior. Then we won't
244: !  have the slow if-tests inside the loop.
245: !
246: !  Compute the operator
247: !          -div rho grad
248: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
249: !  is assumed to be given on the same grid as the finite difference
250: !  stencil is applied.  For a staggered grid, one would have to change
251: !  things slightly.

253:       II = 0
254:       do 110 j=1,n
255:          do 100 i=1,m
256:             if (j .gt. 1) then
257:                JJ = II - m
258:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
259:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
260:             endif
261:             if (j .lt. n) then
262:                JJ = II + m
263:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
264:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
265:             endif
266:             if (i .gt. 1) then
267:                JJ = II - 1
268:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
269:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
270:             endif
271:             if (i .lt. m) then
272:                JJ = II + 1
273:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
274:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
275:             endif
276:             v = 2*rho(II+1)*(hx2+hy2)
277:             call MatSetValues(A,one,II,one,II,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
278:             II = II+1
279:  100     continue
280:  110  continue
281: !
282: !     Assemble matrix
283: !
284:       call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)
285:       call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)

287: !
288: !     Set operators. Here the matrix that defines the linear system
289: !     also serves as the preconditioning matrix. Since all the matrices
290: !     will have the same nonzero pattern here, we indicate this so the
291: !     linear solvers can take advantage of this.
292: !
293:       call KSPSetOperators(ksp,A,A,ierr);CHKERRQ(ierr)

295: !
296: !     Set linear solver defaults for this problem (optional).
297: !     - Here we set it to use direct LU factorization for the solution
298: !
299:       call KSPGetPC(ksp,pc,ierr);CHKERRQ(ierr)
300:       call PCSetType(pc,PCLU,ierr);CHKERRQ(ierr)

302: !
303: !     Set runtime options, e.g.,
304: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
305: !     These options will override those specified above as long as
306: !     KSPSetFromOptions() is called _after_ any other customization
307: !     routines.
308: !
309: !     Run the program with the option -help to see all the possible
310: !     linear solver options.
311: !
312:       call KSPSetFromOptions(ksp,ierr);CHKERRQ(ierr)

314: !
315: !     This allows the PETSc linear solvers to compute the solution
316: !     directly in the user's array rather than in the PETSc vector.
317: !
318: !     This is essentially a hack and not highly recommend unless you
319: !     are quite comfortable with using PETSc. In general, users should
320: !     write their entire application using PETSc vectors rather than
321: !     arrays.
322: !
323:       call VecPlaceArray(x,userx,ierr);CHKERRQ(ierr)
324:       call VecPlaceArray(b,userb,ierr);CHKERRQ(ierr)

326: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
327: !                      Solve the linear system
328: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

330:       call KSPSolve(ksp,b,x,ierr);CHKERRQ(ierr)

332:       call VecResetArray(x,ierr);CHKERRQ(ierr)
333:       call VecResetArray(b,ierr);CHKERRQ(ierr)

335:       return
336:       end

338: ! ------------------------------------------------------------------------

340:       subroutine UserFinalizeLinearSolver(userctx,ierr)
341:       use UserModule
342:       implicit none

344:       PetscErrorCode ierr
345:       type(User) userctx

347: !
348: !     We are all done and don't need to solve any more linear systems, so
349: !     we free the work space.  All PETSc objects should be destroyed when
350: !     they are no longer needed.
351: !
352:       call VecDestroy(userctx%x,ierr);CHKERRQ(ierr)
353:       call VecDestroy(userctx%b,ierr);CHKERRQ(ierr)
354:       call MatDestroy(userctx%A,ierr);CHKERRQ(ierr)
355:       call KSPDestroy(userctx%ksp,ierr);CHKERRQ(ierr)

357:       return
358:       end

360: !
361: !/*TEST
362: !
363: !   test:
364: !      args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
365: !      output_file: output/ex13f90_1.out
366: !
367: !TEST*/