Actual source code: ex13f90.F
1: !
2: !
3: !/*T
4: ! Concepts: KSP^basic sequential example
5: ! Concepts: KSP^Laplacian, 2d
6: ! Concepts: Laplacian, 2d
7: ! Processors: 1
8: !T*/
9: ! -----------------------------------------------------------------------
11: program main
12: implicit none
14: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
15: ! Include files
16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
17: !
18: ! The following include statements are required for KSP Fortran programs:
19: ! petsc.h - base PETSc routines
20: ! petscvec.h - vectors
21: ! petscmat.h - matrices
22: ! petscksp.h - Krylov subspace methods
23: ! petscpc.h - preconditioners
24: !
25: #include include/finclude/petsc.h
26: #include include/finclude/petscvec.h
27: #include include/finclude/petscmat.h
28: #include include/finclude/petscksp.h
29: #include include/finclude/petscpc.h
31: ! User-defined context that contains all the data structures used
32: ! in the linear solution process.
34: ! Vec x,b /* solution vector, right hand side vector and work vector */
35: ! Mat A /* sparse matrix */
36: ! KSP ksp /* linear solver context */
37: ! int m,n /* grid dimensions */
38: !
39: ! Since we cannot store Scalars and integers in the same context,
40: ! we store the integers/pointers in the user-defined context, and
41: ! the scalar values are carried in the common block.
42: ! The scalar values in this simplistic example could easily
43: ! be recalculated in each routine, where they are needed.
44: !
45: ! Scalar hx2,hy2 /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
47: ! Note: Any user-defined Fortran routines MUST be declared as external.
49: external UserInitializeLinearSolver
50: external UserFinalizeLinearSolver
51: external UserDoLinearSolver
53: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
54: ! Variable declarations
55: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
57: PetscScalar hx,hy,x,y
58: PetscFortranAddr userctx(6)
59: integer ierr,m,n,t,tmax,flg,i,j,Ntot
60: integer size,rank
61: double precision enorm
62: PetscScalar,ALLOCATABLE :: userx(:,:),userb(:,:)
63: PetscScalar,ALLOCATABLE :: solution(:,:),rho(:,:)
65: double precision hx2,hy2
66: common /param/ hx2,hy2
68: tmax = 2
69: m = 6
70: n = 7
72: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
73: ! Beginning of program
74: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
76: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
77: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
78: if (size .ne. 1) then
79: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
80: if (rank .eq. 0) then
81: write(6,*) 'This is a uniprocessor example only!'
82: endif
83: SETERRQ(1,' ',ierr)
84: endif
86: ! The next two lines are for testing only; these allow the user to
87: ! decide the grid size at runtime.
89: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
90: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)
92: ! Create the empty sparse matrix and linear solver data structures
94: call UserInitializeLinearSolver(m,n,userctx,ierr)
95: Ntot = m*n
97: ! Allocate arrays to hold the solution to the linear system. This
98: ! approach is not normally done in PETSc programs, but in this case,
99: ! since we are calling these routines from a non-PETSc program, we
100: ! would like to reuse the data structures from another code. So in
101: ! the context of a larger application these would be provided by
102: ! other (non-PETSc) parts of the application code.
104: ALLOCATE (userx(m,n),userb(m,n),solution(m,n))
106: ! Allocate an array to hold the coefficients in the elliptic operator
108: ALLOCATE (rho(m,n))
110: ! Fill up the array rho[] with the function rho(x,y) = x; fill the
111: ! right-hand-side b[] and the solution with a known problem for testing.
113: hx = 1.0/(m+1)
114: hy = 1.0/(n+1)
115: y = hy
116: do 20 j=1,n
117: x = hx
118: do 10 i=1,m
119: rho(i,j) = x
120: solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
121: userb(i,j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)* &
122: & sin(2.*PETSC_PI*y) + &
123: & 8*PETSC_PI*PETSC_PI*x* &
124: & sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
125: x = x + hx
126: 10 continue
127: y = y + hy
128: 20 continue
130: ! Loop over a bunch of timesteps, setting up and solver the linear
131: ! system for each time-step.
132: ! Note that this loop is somewhat artificial. It is intended to
133: ! demonstrate how one may reuse the linear solvers in each time-step.
135: do 100 t=1,tmax
136: call UserDoLinearSolver(rho,userctx,userb,userx,ierr)
138: ! Compute error: Note that this could (and usually should) all be done
139: ! using the PETSc vector operations. Here we demonstrate using more
140: ! standard programming practices to show how they may be mixed with
141: ! PETSc.
142: enorm = 0.0
143: do 90 j=1,n
144: do 80 i=1,m
145: enorm = enorm + &
146: & (solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
147: 80 continue
148: 90 continue
149: enorm = enorm * PetscRealPart(hx*hy)
150: write(6,115) m,n,enorm
151: 115 format ('m = ',I2,' n = ',I2,' error norm = ',1PE10.4)
152: 100 continue
154: ! We are finished solving linear systems, so we clean up the
155: ! data structures.
157: DEALLOCATE (userx,userb,solution,rho)
159: call UserFinalizeLinearSolver(userctx,ierr)
160: call PetscFinalize(ierr)
161: end
163: ! ----------------------------------------------------------------
164: subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
166: implicit none
168: #include include/finclude/petsc.h
169: #include include/finclude/petscvec.h
170: #include include/finclude/petscmat.h
171: #include include/finclude/petscksp.h
172: #include include/finclude/petscpc.h
174: integer m,n,ierr
175: PetscFortranAddr userctx(*)
177: common /param/ hx2,hy2
178: double precision hx2,hy2
180: ! Local variable declararions
181: Mat A
182: Vec b,x
183: KSP ksp
184: integer Ntot
187: ! Here we assume use of a grid of size m x n, with all points on the
188: ! interior of the domain, i.e., we do not include the points corresponding
189: ! to homogeneous Dirichlet boundary conditions. We assume that the domain
190: ! is [0,1]x[0,1].
192: hx2 = (m+1)*(m+1)
193: hy2 = (n+1)*(n+1)
194: Ntot = m*n
196: ! Create the sparse matrix. Preallocate 5 nonzeros per row.
198: call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,5, &
199: & PETSC_NULL_INTEGER,A,ierr)
200: !
201: ! Create vectors. Here we create vectors with no memory allocated.
202: ! This way, we can use the data structures already in the program
203: ! by using VecPlaceArray() subroutine at a later stage.
204: !
205: call VecCreateSeqWithArray(PETSC_COMM_SELF,Ntot, &
206: & PETSC_NULL_SCALAR,b,ierr)
207: call VecDuplicate(b,x,ierr)
209: ! Create linear solver context. This will be used repeatedly for all
210: ! the linear solves needed.
212: call KSPCreate(PETSC_COMM_SELF,ksp,ierr)
214: userctx(1) = x
215: userctx(2) = b
216: userctx(3) = A
217: userctx(4) = ksp
218: userctx(5) = m
219: userctx(6) = n
221: return
222: end
223: ! -----------------------------------------------------------------------
225: ! Solves -div (rho grad psi) = F using finite differences.
226: ! rho is a 2-dimensional array of size m by n, stored in Fortran
227: ! style by columns. userb is a standard one-dimensional array.
229: subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
231: implicit none
233: #include include/finclude/petsc.h
234: #include include/finclude/petscvec.h
235: #include include/finclude/petscmat.h
236: #include include/finclude/petscksp.h
237: #include include/finclude/petscpc.h
239: integer ierr
240: PetscFortranAddr userctx(*)
241: PetscScalar rho(*),userb(*),userx(*)
244: common /param/ hx2,hy2
245: double precision hx2,hy2
247: PC pc
248: KSP ksp
249: Vec b,x
250: Mat A
251: integer m,n
252: integer i,j,II,JJ
253: PetscScalar v
255: ! PetscScalar tmpx(*),tmpb(*)
257: x = userctx(1)
258: b = userctx(2)
259: A = userctx(3)
260: ksp = userctx(4)
261: m = userctx(5)
262: n = userctx(6)
264: ! This is not the most efficient way of generating the matrix,
265: ! but let's not worry about it. We should have separate code for
266: ! the four corners, each edge and then the interior. Then we won't
267: ! have the slow if-tests inside the loop.
268: !
269: ! Compute the operator
270: ! -div rho grad
271: ! on an m by n grid with zero Dirichlet boundary conditions. The rho
272: ! is assumed to be given on the same grid as the finite difference
273: ! stencil is applied. For a staggered grid, one would have to change
274: ! things slightly.
276: II = 0
277: do 110 j=1,n
278: do 100 i=1,m
279: if (j .gt. 1) then
280: JJ = II - m
281: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
282: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
283: endif
284: if (j .lt. n) then
285: JJ = II + m
286: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
287: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
288: endif
289: if (i .gt. 1) then
290: JJ = II - 1
291: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
292: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
293: endif
294: if (i .lt. m) then
295: JJ = II + 1
296: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
297: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
298: endif
299: v = 2*rho(II+1)*(hx2+hy2)
300: call MatSetValues(A,1,II,1,II,v,INSERT_VALUES,ierr)
301: II = II+1
302: 100 continue
303: 110 continue
304: !
305: ! Assemble matrix
306: !
307: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
308: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)
310: !
311: ! Set operators. Here the matrix that defines the linear system
312: ! also serves as the preconditioning matrix. Since all the matrices
313: ! will have the same nonzero pattern here, we indicate this so the
314: ! linear solvers can take advantage of this.
315: !
316: call KSPSetOperators(ksp,A,A,SAME_NONZERO_PATTERN,ierr)
318: !
319: ! Set linear solver defaults for this problem (optional).
320: ! - Here we set it to use direct LU factorization for the solution
321: !
322: call KSPGetPC(ksp,pc,ierr)
323: call PCSetType(pc,PCLU,ierr)
325: !
326: ! Set runtime options, e.g.,
327: ! -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
328: ! These options will override those specified above as long as
329: ! KSPSetFromOptions() is called _after_ any other customization
330: ! routines.
331: !
332: ! Run the program with the option -help to see all the possible
333: ! linear solver options.
334: !
335: call KSPSetFromOptions(ksp,ierr)
337: !
338: ! This allows the PETSc linear solvers to compute the solution
339: ! directly in the user's array rather than in the PETSc vector.
340: !
341: ! This is essentially a hack and not highly recommend unless you
342: ! are quite comfortable with using PETSc. In general, users should
343: ! write their entire application using PETSc vectors rather than
344: ! arrays.
345: !
346: call VecPlaceArray(x,userx,ierr)
347: call VecPlaceArray(b,userb,ierr)
349: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
350: ! Solve the linear system
351: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
353: call KSPSolve(ksp,b,x,ierr)
355: call VecResetArray(x,ierr)
356: call VecResetArray(b,ierr)
358: return
359: end
361: ! ------------------------------------------------------------------------
363: subroutine UserFinalizeLinearSolver(userctx,ierr)
365: implicit none
367: #include include/finclude/petsc.h
368: #include include/finclude/petscvec.h
369: #include include/finclude/petscmat.h
370: #include include/finclude/petscksp.h
371: #include include/finclude/petscpc.h
373: integer ierr
374: PetscFortranAddr userctx(*)
376: ! Local variable declararions
378: Vec x,b
379: Mat A
380: KSP ksp
381: !
382: ! We are all done and don't need to solve any more linear systems, so
383: ! we free the work space. All PETSc objects should be destroyed when
384: ! they are no longer needed.
385: !
386: x = userctx(1)
387: b = userctx(2)
388: A = userctx(3)
389: ksp = userctx(4)
391: call VecDestroy(x,ierr)
392: call VecDestroy(b,ierr)
393: call MatDestroy(A,ierr)
394: call KSPDestroy(ksp,ierr)
396: return
397: end