Actual source code: mg.c
1: #define PETSCKSP_DLL
3: /*
4: Defines the multigrid preconditioner interface.
5: */
6: #include src/ksp/pc/impls/mg/mgimpl.h
11: PetscErrorCode PCMGMCycle_Private(PC_MG **mglevels,PetscTruth *converged)
12: {
13: PC_MG *mg = *mglevels,*mgc;
15: PetscInt cycles = mg->cycles;
18: if (converged) *converged = PETSC_FALSE;
20: if (mg->eventsolve) {PetscLogEventBegin(mg->eventsolve,0,0,0,0);}
21: KSPSolve(mg->smoothd,mg->b,mg->x);
22: if (mg->eventsolve) {PetscLogEventEnd(mg->eventsolve,0,0,0,0);}
23: if (mg->level) { /* not the coarsest grid */
24: (*mg->residual)(mg->A,mg->b,mg->x,mg->r);
26: /* if on finest level and have convergence criteria set */
27: if (mg->level == mg->levels-1 && mg->ttol) {
28: PetscReal rnorm;
29: VecNorm(mg->r,NORM_2,&rnorm);
30: if (rnorm <= mg->ttol) {
31: *converged = PETSC_TRUE;
32: if (rnorm < mg->abstol) {
33: PetscInfo2(0,"Linear solver has converged. Residual norm %G is less than absolute tolerance %G\n",rnorm,mg->abstol);
34: } else {
35: PetscInfo2(0,"Linear solver has converged. Residual norm %G is less than relative tolerance times initial residual norm %G\n",rnorm,mg->ttol);
36: }
37: return(0);
38: }
39: }
41: mgc = *(mglevels - 1);
42: MatRestrict(mg->restrct,mg->r,mgc->b);
43: VecSet(mgc->x,0.0);
44: while (cycles--) {
45: PCMGMCycle_Private(mglevels-1,converged);
46: }
47: MatInterpolateAdd(mg->interpolate,mgc->x,mg->x,mg->x);
48: if (mg->eventsolve) {PetscLogEventBegin(mg->eventsolve,0,0,0,0);}
49: KSPSolve(mg->smoothu,mg->b,mg->x);
50: if (mg->eventsolve) {PetscLogEventEnd(mg->eventsolve,0,0,0,0);}
51: }
52: return(0);
53: }
55: /*
56: PCMGCreate_Private - Creates a PC_MG structure for use with the
57: multigrid code. Level 0 is the coarsest. (But the
58: finest level is stored first in the array).
60: */
63: static PetscErrorCode PCMGCreate_Private(MPI_Comm comm,PetscInt levels,PC pc,MPI_Comm *comms,PC_MG ***result)
64: {
65: PC_MG **mg;
67: PetscInt i;
68: PetscMPIInt size;
69: const char *prefix;
70: PC ipc;
73: PetscMalloc(levels*sizeof(PC_MG*),&mg);
74: PetscLogObjectMemory(pc,levels*(sizeof(PC_MG*)+sizeof(PC_MG)));
76: PCGetOptionsPrefix(pc,&prefix);
78: for (i=0; i<levels; i++) {
79: PetscNew(PC_MG,&mg[i]);
80: mg[i]->level = i;
81: mg[i]->levels = levels;
82: mg[i]->cycles = 1;
83: mg[i]->galerkin = PETSC_FALSE;
84: mg[i]->galerkinused = PETSC_FALSE;
85: mg[i]->default_smoothu = 1;
86: mg[i]->default_smoothd = 1;
88: if (comms) comm = comms[i];
89: KSPCreate(comm,&mg[i]->smoothd);
90: KSPSetTolerances(mg[i]->smoothd,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT, mg[i]->default_smoothd);
91: KSPSetOptionsPrefix(mg[i]->smoothd,prefix);
93: /* do special stuff for coarse grid */
94: if (!i && levels > 1) {
95: KSPAppendOptionsPrefix(mg[0]->smoothd,"mg_coarse_");
97: /* coarse solve is (redundant) LU by default */
98: KSPSetType(mg[0]->smoothd,KSPPREONLY);
99: KSPGetPC(mg[0]->smoothd,&ipc);
100: MPI_Comm_size(comm,&size);
101: if (size > 1) {
102: PCSetType(ipc,PCREDUNDANT);
103: PCRedundantGetPC(ipc,&ipc);
104: }
105: PCSetType(ipc,PCLU);
107: } else {
108: char tprefix[128];
109: sprintf(tprefix,"mg_levels_%d_",(int)i);
110: KSPAppendOptionsPrefix(mg[i]->smoothd,tprefix);
111: }
112: PetscLogObjectParent(pc,mg[i]->smoothd);
113: mg[i]->smoothu = mg[i]->smoothd;
114: mg[i]->rtol = 0.0;
115: mg[i]->abstol = 0.0;
116: mg[i]->dtol = 0.0;
117: mg[i]->ttol = 0.0;
118: mg[i]->eventsetup = 0;
119: mg[i]->eventsolve = 0;
120: }
121: *result = mg;
122: return(0);
123: }
127: static PetscErrorCode PCDestroy_MG(PC pc)
128: {
129: PC_MG **mg = (PC_MG**)pc->data;
131: PetscInt i,n = mg[0]->levels;
134: if (mg[0]->galerkinused) {
135: Mat B;
136: for (i=0; i<n-1; i++) {
137: KSPGetOperators(mg[i]->smoothd,0,&B,0);
138: MatDestroy(B);
139: }
140: }
142: for (i=0; i<n-1; i++) {
143: if (mg[i+1]->r) {VecDestroy(mg[i+1]->r);}
144: if (mg[i]->b) {VecDestroy(mg[i]->b);}
145: if (mg[i]->x) {VecDestroy(mg[i]->x);}
146: if (mg[i+1]->restrct) {MatDestroy(mg[i+1]->restrct);}
147: if (mg[i+1]->interpolate) {MatDestroy(mg[i+1]->interpolate);}
148: }
150: for (i=0; i<n; i++) {
151: if (mg[i]->smoothd != mg[i]->smoothu) {
152: KSPDestroy(mg[i]->smoothd);
153: }
154: KSPDestroy(mg[i]->smoothu);
155: PetscFree(mg[i]);
156: }
157: PetscFree(mg);
158: return(0);
159: }
163: EXTERN PetscErrorCode PCMGACycle_Private(PC_MG**);
164: EXTERN PetscErrorCode PCMGFCycle_Private(PC_MG**);
165: EXTERN PetscErrorCode PCMGKCycle_Private(PC_MG**);
167: /*
168: PCApply_MG - Runs either an additive, multiplicative, Kaskadic
169: or full cycle of multigrid.
171: Note:
172: A simple wrapper which calls PCMGMCycle(),PCMGACycle(), or PCMGFCycle().
173: */
176: static PetscErrorCode PCApply_MG(PC pc,Vec b,Vec x)
177: {
178: PC_MG **mg = (PC_MG**)pc->data;
180: PetscInt levels = mg[0]->levels;
183: mg[levels-1]->b = b;
184: mg[levels-1]->x = x;
185: if (!mg[levels-1]->r && mg[0]->am != PC_MG_ADDITIVE && levels > 1) {
186: Vec tvec;
187: VecDuplicate(mg[levels-1]->b,&tvec);
188: PCMGSetR(pc,levels-1,tvec);
189: VecDestroy(tvec);
190: }
191: if (mg[0]->am == PC_MG_MULTIPLICATIVE) {
192: VecSet(x,0.0);
193: PCMGMCycle_Private(mg+levels-1,PETSC_NULL);
194: }
195: else if (mg[0]->am == PC_MG_ADDITIVE) {
196: PCMGACycle_Private(mg);
197: }
198: else if (mg[0]->am == PC_MG_KASKADE) {
199: PCMGKCycle_Private(mg);
200: }
201: else {
202: PCMGFCycle_Private(mg);
203: }
204: return(0);
205: }
209: static PetscErrorCode PCApplyRichardson_MG(PC pc,Vec b,Vec x,Vec w,PetscReal rtol,PetscReal abstol, PetscReal dtol,PetscInt its)
210: {
211: PC_MG **mg = (PC_MG**)pc->data;
213: PetscInt levels = mg[0]->levels;
214: PetscTruth converged = PETSC_FALSE;
217: mg[levels-1]->b = b;
218: mg[levels-1]->x = x;
220: mg[levels-1]->rtol = rtol;
221: mg[levels-1]->abstol = abstol;
222: mg[levels-1]->dtol = dtol;
223: if (rtol) {
224: /* compute initial residual norm for relative convergence test */
225: PetscReal rnorm;
226: (*mg[levels-1]->residual)(mg[levels-1]->A,b,x,w);
227: VecNorm(w,NORM_2,&rnorm);
228: mg[levels-1]->ttol = PetscMax(rtol*rnorm,abstol);
229: } else if (abstol) {
230: mg[levels-1]->ttol = abstol;
231: } else {
232: mg[levels-1]->ttol = 0.0;
233: }
235: while (its-- && !converged) {
236: PCMGMCycle_Private(mg+levels-1,&converged);
237: }
238: return(0);
239: }
243: PetscErrorCode PCSetFromOptions_MG(PC pc)
244: {
246: PetscInt m,levels = 1;
247: PetscTruth flg;
248: PC_MG **mg = (PC_MG**)pc->data;
249: PCMGType mgtype = mg[0]->am;;
253: PetscOptionsHead("Multigrid options");
254: if (!pc->data) {
255: PetscOptionsInt("-pc_mg_levels","Number of Levels","PCMGSetLevels",levels,&levels,&flg);
256: PCMGSetLevels(pc,levels,PETSC_NULL);
257: }
258: PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","PCMGSetCycles",1,&m,&flg);
259: if (flg) {
260: PCMGSetCycles(pc,m);
261: }
262: PetscOptionsName("-pc_mg_galerkin","Use Galerkin process to compute coarser operators","PCMGSetGalerkin",&flg);
263: if (flg) {
264: PCMGSetGalerkin(pc);
265: }
266: PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","PCMGSetNumberSmoothUp",1,&m,&flg);
267: if (flg) {
268: PCMGSetNumberSmoothUp(pc,m);
269: }
270: PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","PCMGSetNumberSmoothDown",1,&m,&flg);
271: if (flg) {
272: PCMGSetNumberSmoothDown(pc,m);
273: }
274: PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)mgtype,(PetscEnum*)&mgtype,&flg);
275: if (flg) {PCMGSetType(pc,mgtype);}
276: PetscOptionsName("-pc_mg_log","Log times for each multigrid level","None",&flg);
277: if (flg) {
278: PetscInt i;
279: char eventname[128];
280: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
281: levels = mg[0]->levels;
282: for (i=0; i<levels; i++) {
283: sprintf(eventname,"MSetup Level %d",(int)i);
284: PetscLogEventRegister(&mg[i]->eventsetup,eventname,pc->cookie);
285: sprintf(eventname,"MGSolve Level %d to 0",(int)i);
286: PetscLogEventRegister(&mg[i]->eventsolve,eventname,pc->cookie);
287: }
288: }
289: PetscOptionsTail();
290: return(0);
291: }
293: const char *PCMGTypes[] = {"MULTIPLICATIVE","ADDITIVE","FULL","KASKADE","PCMGType","PC_MG",0};
297: static PetscErrorCode PCView_MG(PC pc,PetscViewer viewer)
298: {
299: PC_MG **mg = (PC_MG**)pc->data;
301: PetscInt levels = mg[0]->levels,i;
302: PetscTruth iascii;
305: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
306: if (iascii) {
307: PetscViewerASCIIPrintf(viewer," MG: type is %s, levels=%D cycles=%D, pre-smooths=%D, post-smooths=%D\n",
308: PCMGTypes[mg[0]->am],levels,mg[0]->cycles,mg[0]->default_smoothd,mg[0]->default_smoothu);
309: if (mg[0]->galerkin) {
310: PetscViewerASCIIPrintf(viewer," Using Galerkin computed coarse grid matrices\n");
311: }
312: for (i=0; i<levels; i++) {
313: if (!i) {
314: PetscViewerASCIIPrintf(viewer,"Coarse gride solver -- level %D -------------------------------\n",i);
315: } else {
316: PetscViewerASCIIPrintf(viewer,"Down solver (pre-smoother) on level %D -------------------------------\n",i);
317: }
318: PetscViewerASCIIPushTab(viewer);
319: KSPView(mg[i]->smoothd,viewer);
320: PetscViewerASCIIPopTab(viewer);
321: if (i && mg[i]->smoothd == mg[i]->smoothu) {
322: PetscViewerASCIIPrintf(viewer,"Up solver (post-smoother) same as down solver (pre-smoother)\n");
323: } else if (i){
324: PetscViewerASCIIPrintf(viewer,"Up solver (post-smoother) on level %D -------------------------------\n",i);
325: PetscViewerASCIIPushTab(viewer);
326: KSPView(mg[i]->smoothu,viewer);
327: PetscViewerASCIIPopTab(viewer);
328: }
329: }
330: } else {
331: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for PCMG",((PetscObject)viewer)->type_name);
332: }
333: return(0);
334: }
336: /*
337: Calls setup for the KSP on each level
338: */
341: static PetscErrorCode PCSetUp_MG(PC pc)
342: {
343: PC_MG **mg = (PC_MG**)pc->data;
345: PetscInt i,n = mg[0]->levels;
346: PC cpc;
347: PetscTruth preonly,lu,redundant,cholesky,monitor = PETSC_FALSE,dump;
348: PetscViewer ascii;
349: MPI_Comm comm;
350: Mat dA,dB;
351: MatStructure uflag;
352: Vec tvec;
355: if (!pc->setupcalled) {
356: PetscOptionsHasName(0,"-pc_mg_monitor",&monitor);
357:
358: for (i=0; i<n; i++) {
359: if (monitor) {
360: PetscObjectGetComm((PetscObject)mg[i]->smoothd,&comm);
361: PetscViewerASCIIOpen(comm,"stdout",&ascii);
362: PetscViewerASCIISetTab(ascii,n-i);
363: KSPSetMonitor(mg[i]->smoothd,KSPDefaultMonitor,ascii,(PetscErrorCode(*)(void*))PetscViewerDestroy);
364: }
365: KSPSetFromOptions(mg[i]->smoothd);
366: }
367: for (i=1; i<n; i++) {
368: if (mg[i]->smoothu && (mg[i]->smoothu != mg[i]->smoothd)) {
369: if (monitor) {
370: PetscObjectGetComm((PetscObject)mg[i]->smoothu,&comm);
371: PetscViewerASCIIOpen(comm,"stdout",&ascii);
372: PetscViewerASCIISetTab(ascii,n-i);
373: KSPSetMonitor(mg[i]->smoothu,KSPDefaultMonitor,ascii,(PetscErrorCode(*)(void*))PetscViewerDestroy);
374: }
375: KSPSetFromOptions(mg[i]->smoothu);
376: }
377: }
378: for (i=1; i<n; i++) {
379: if (!mg[i]->residual) {
380: Mat mat;
381: KSPGetOperators(mg[i]->smoothd,PETSC_NULL,&mat,PETSC_NULL);
382: PCMGSetResidual(pc,i,PCMGDefaultResidual,mat);
383: }
384: if (mg[i]->restrct && !mg[i]->interpolate) {
385: PCMGSetInterpolate(pc,i,mg[i]->restrct);
386: }
387: if (!mg[i]->restrct && mg[i]->interpolate) {
388: PCMGSetRestriction(pc,i,mg[i]->interpolate);
389: }
390: #if defined(PETSC_USE_DEBUG)
391: if (!mg[i]->restrct || !mg[i]->interpolate) {
392: SETERRQ1(PETSC_ERR_ARG_WRONGSTATE,"Need to set restriction or interpolation on level %d",(int)i);
393: }
394: #endif
395: }
396: for (i=0; i<n-1; i++) {
397: if (!mg[i]->b) {
398: Mat mat;
399: Vec vec;
400: KSPGetOperators(mg[i]->smoothd,PETSC_NULL,&mat,PETSC_NULL);
401: MatGetVecs(mat,&vec,PETSC_NULL);
402: PCMGSetRhs(pc,i,vec);
403: }
404: if (!mg[i]->r && i) {
405: VecDuplicate(mg[i]->b,&tvec);
406: PCMGSetR(pc,i,tvec);
407: VecDestroy(tvec);
408: }
409: if (!mg[i]->x) {
410: VecDuplicate(mg[i]->b,&tvec);
411: PCMGSetX(pc,i,tvec);
412: VecDestroy(tvec);
413: }
414: }
415: }
417: /* If user did not provide fine grid operators, use those from PC */
418: /* BUG BUG BUG This will work ONLY the first time called: hence if the user changes
419: the PC matrices between solves PCMG will continue to use first set provided */
420: KSPGetOperators(mg[n-1]->smoothd,&dA,&dB,&uflag);
421: if (!dA && !dB) {
422: PetscInfo(pc,"Using outer operators to define finest grid operator \n because PCMGGetSmoother(pc,nlevels-1,&ksp);KSPSetOperators(ksp,...); was not called.\n");
423: KSPSetOperators(mg[n-1]->smoothd,pc->mat,pc->pmat,uflag);
424: }
426: if (mg[0]->galerkin) {
427: Mat B;
428: mg[0]->galerkinused = PETSC_TRUE;
429: /* currently only handle case where mat and pmat are the same on coarser levels */
430: KSPGetOperators(mg[n-1]->smoothd,&dA,&dB,&uflag);
431: if (!pc->setupcalled) {
432: for (i=n-2; i>-1; i--) {
433: MatPtAP(dB,mg[i+1]->interpolate,MAT_INITIAL_MATRIX,1.0,&B);
434: KSPSetOperators(mg[i]->smoothd,B,B,uflag);
435: dB = B;
436: }
437: } else {
438: for (i=n-2; i>-1; i--) {
439: KSPGetOperators(mg[i]->smoothd,0,&B,0);
440: MatPtAP(dB,mg[i+1]->interpolate,MAT_REUSE_MATRIX,1.0,&B);
441: KSPSetOperators(mg[i]->smoothd,B,B,uflag);
442: dB = B;
443: }
444: }
445: }
447: for (i=1; i<n; i++) {
448: if (mg[i]->smoothu == mg[i]->smoothd) {
449: /* if doing only down then initial guess is zero */
450: KSPSetInitialGuessNonzero(mg[i]->smoothd,PETSC_TRUE);
451: }
452: if (mg[i]->eventsetup) {PetscLogEventBegin(mg[i]->eventsetup,0,0,0,0);}
453: KSPSetUp(mg[i]->smoothd);
454: if (mg[i]->eventsetup) {PetscLogEventEnd(mg[i]->eventsetup,0,0,0,0);}
455: }
456: for (i=1; i<n; i++) {
457: if (mg[i]->smoothu && mg[i]->smoothu != mg[i]->smoothd) {
458: PC uppc,downpc;
459: Mat downmat,downpmat,upmat,uppmat;
460: MatStructure matflag;
462: /* check if operators have been set for up, if not use down operators to set them */
463: KSPGetPC(mg[i]->smoothu,&uppc);
464: PCGetOperators(uppc,&upmat,&uppmat,PETSC_NULL);
465: if (!upmat) {
466: KSPGetPC(mg[i]->smoothd,&downpc);
467: PCGetOperators(downpc,&downmat,&downpmat,&matflag);
468: KSPSetOperators(mg[i]->smoothu,downmat,downpmat,matflag);
469: }
471: KSPSetInitialGuessNonzero(mg[i]->smoothu,PETSC_TRUE);
472: if (mg[i]->eventsetup) {PetscLogEventBegin(mg[i]->eventsetup,0,0,0,0);}
473: KSPSetUp(mg[i]->smoothu);
474: if (mg[i]->eventsetup) {PetscLogEventEnd(mg[i]->eventsetup,0,0,0,0);}
475: }
476: }
478: /*
479: If coarse solver is not direct method then DO NOT USE preonly
480: */
481: PetscTypeCompare((PetscObject)mg[0]->smoothd,KSPPREONLY,&preonly);
482: if (preonly) {
483: KSPGetPC(mg[0]->smoothd,&cpc);
484: PetscTypeCompare((PetscObject)cpc,PCLU,&lu);
485: PetscTypeCompare((PetscObject)cpc,PCREDUNDANT,&redundant);
486: PetscTypeCompare((PetscObject)cpc,PCCHOLESKY,&cholesky);
487: if (!lu && !redundant && !cholesky) {
488: KSPSetType(mg[0]->smoothd,KSPGMRES);
489: }
490: }
492: if (!pc->setupcalled) {
493: if (monitor) {
494: PetscObjectGetComm((PetscObject)mg[0]->smoothd,&comm);
495: PetscViewerASCIIOpen(comm,"stdout",&ascii);
496: PetscViewerASCIISetTab(ascii,n);
497: KSPSetMonitor(mg[0]->smoothd,KSPDefaultMonitor,ascii,(PetscErrorCode(*)(void*))PetscViewerDestroy);
498: }
499: KSPSetFromOptions(mg[0]->smoothd);
500: }
502: if (mg[0]->eventsetup) {PetscLogEventBegin(mg[0]->eventsetup,0,0,0,0);}
503: KSPSetUp(mg[0]->smoothd);
504: if (mg[0]->eventsetup) {PetscLogEventEnd(mg[0]->eventsetup,0,0,0,0);}
506: #if defined(PETSC_USE_SOCKET_VIEWER)
507: /*
508: Dump the interpolation/restriction matrices to matlab plus the
509: Jacobian/stiffness on each level. This allows Matlab users to
510: easily check if the Galerkin condition A_c = R A_f R^T is satisfied */
511: PetscOptionsHasName(pc->prefix,"-pc_mg_dump_matlab",&dump);
512: if (dump) {
513: for (i=1; i<n; i++) {
514: MatView(mg[i]->restrct,PETSC_VIEWER_SOCKET_(pc->comm));
515: }
516: for (i=0; i<n; i++) {
517: KSPGetPC(mg[i]->smoothd,&pc);
518: MatView(pc->mat,PETSC_VIEWER_SOCKET_(pc->comm));
519: }
520: }
521: #endif
523: PetscOptionsHasName(pc->prefix,"-pc_mg_dump_binary",&dump);
524: if (dump) {
525: for (i=1; i<n; i++) {
526: MatView(mg[i]->restrct,PETSC_VIEWER_BINARY_(pc->comm));
527: }
528: for (i=0; i<n; i++) {
529: KSPGetPC(mg[i]->smoothd,&pc);
530: MatView(pc->mat,PETSC_VIEWER_BINARY_(pc->comm));
531: }
532: }
533: return(0);
534: }
536: /* -------------------------------------------------------------------------------------*/
540: /*@C
541: PCMGSetLevels - Sets the number of levels to use with MG.
542: Must be called before any other MG routine.
544: Collective on PC
546: Input Parameters:
547: + pc - the preconditioner context
548: . levels - the number of levels
549: - comms - optional communicators for each level; this is to allow solving the coarser problems
550: on smaller sets of processors. Use PETSC_NULL_OBJECT for default in Fortran
552: Level: intermediate
554: Notes:
555: If the number of levels is one then the multigrid uses the -mg_levels prefix
556: for setting the level options rather than the -mg_coarse prefix.
558: .keywords: MG, set, levels, multigrid
560: .seealso: PCMGSetType(), PCMGGetLevels()
561: @*/
562: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetLevels(PC pc,PetscInt levels,MPI_Comm *comms)
563: {
565: PC_MG **mg=0;
570: if (pc->data) {
571: SETERRQ(PETSC_ERR_ORDER,"Number levels already set for MG\n\
572: make sure that you call PCMGSetLevels() before KSPSetFromOptions()");
573: }
574: PCMGCreate_Private(pc->comm,levels,pc,comms,&mg);
575: mg[0]->am = PC_MG_MULTIPLICATIVE;
576: pc->data = (void*)mg;
577: pc->ops->applyrichardson = PCApplyRichardson_MG;
578: return(0);
579: }
583: /*@
584: PCMGGetLevels - Gets the number of levels to use with MG.
586: Not Collective
588: Input Parameter:
589: . pc - the preconditioner context
591: Output parameter:
592: . levels - the number of levels
594: Level: advanced
596: .keywords: MG, get, levels, multigrid
598: .seealso: PCMGSetLevels()
599: @*/
600: PetscErrorCode PETSCKSP_DLLEXPORT PCMGGetLevels(PC pc,PetscInt *levels)
601: {
602: PC_MG **mg;
608: mg = (PC_MG**)pc->data;
609: *levels = mg[0]->levels;
610: return(0);
611: }
615: /*@
616: PCMGSetType - Determines the form of multigrid to use:
617: multiplicative, additive, full, or the Kaskade algorithm.
619: Collective on PC
621: Input Parameters:
622: + pc - the preconditioner context
623: - form - multigrid form, one of PC_MG_MULTIPLICATIVE, PC_MG_ADDITIVE,
624: PC_MG_FULL, PC_MG_KASKADE
626: Options Database Key:
627: . -pc_mg_type <form> - Sets <form>, one of multiplicative,
628: additive, full, kaskade
630: Level: advanced
632: .keywords: MG, set, method, multiplicative, additive, full, Kaskade, multigrid
634: .seealso: PCMGSetLevels()
635: @*/
636: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetType(PC pc,PCMGType form)
637: {
638: PC_MG **mg;
642: mg = (PC_MG**)pc->data;
644: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
645: mg[0]->am = form;
646: if (form == PC_MG_MULTIPLICATIVE) pc->ops->applyrichardson = PCApplyRichardson_MG;
647: else pc->ops->applyrichardson = 0;
648: return(0);
649: }
653: /*@
654: PCMGSetCycles - Sets the type cycles to use. Use PCMGSetCyclesOnLevel() for more
655: complicated cycling.
657: Collective on PC
659: Input Parameters:
660: + pc - the multigrid context
661: - n - the number of cycles
663: Options Database Key:
664: $ -pc_mg_cycles n - 1 denotes a V-cycle; 2 denotes a W-cycle.
666: Level: advanced
668: .keywords: MG, set, cycles, V-cycle, W-cycle, multigrid
670: .seealso: PCMGSetCyclesOnLevel()
671: @*/
672: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetCycles(PC pc,PetscInt n)
673: {
674: PC_MG **mg;
675: PetscInt i,levels;
679: mg = (PC_MG**)pc->data;
680: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
681: levels = mg[0]->levels;
683: for (i=0; i<levels; i++) {
684: mg[i]->cycles = n;
685: }
686: return(0);
687: }
691: /*@
692: PCMGSetGalerkin - Causes the coarser grid matrices to be computed from the
693: finest grid via the Galerkin process: A_i-1 = r_i * A_i * r_i^t
695: Collective on PC
697: Input Parameters:
698: . pc - the multigrid context
700: Options Database Key:
701: $ -pc_mg_galerkin
703: Level: intermediate
705: .keywords: MG, set, Galerkin
707: .seealso: PCMGGetGalerkin()
709: @*/
710: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetGalerkin(PC pc)
711: {
712: PC_MG **mg;
713: PetscInt i,levels;
717: mg = (PC_MG**)pc->data;
718: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
719: levels = mg[0]->levels;
721: for (i=0; i<levels; i++) {
722: mg[i]->galerkin = PETSC_TRUE;
723: }
724: return(0);
725: }
729: /*@
730: PCMGGetGalerkin - Checks if Galerkin multigrid is being used, i.e.
731: A_i-1 = r_i * A_i * r_i^t
733: Not Collective
735: Input Parameter:
736: . pc - the multigrid context
738: Output Parameter:
739: . gelerkin - PETSC_TRUE or PETSC_FALSE
741: Options Database Key:
742: $ -pc_mg_galerkin
744: Level: intermediate
746: .keywords: MG, set, Galerkin
748: .seealso: PCMGSetGalerkin()
750: @*/
751: PetscErrorCode PETSCKSP_DLLEXPORT PCMGGetGalerkin(PC pc,PetscTruth *galerkin)
752: {
753: PC_MG **mg;
757: mg = (PC_MG**)pc->data;
758: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
759: *galerkin = mg[0]->galerkin;
760: return(0);
761: }
765: /*@
766: PCMGSetNumberSmoothDown - Sets the number of pre-smoothing steps to
767: use on all levels. Use PCMGGetSmootherDown() to set different
768: pre-smoothing steps on different levels.
770: Collective on PC
772: Input Parameters:
773: + mg - the multigrid context
774: - n - the number of smoothing steps
776: Options Database Key:
777: . -pc_mg_smoothdown <n> - Sets number of pre-smoothing steps
779: Level: advanced
781: .keywords: MG, smooth, down, pre-smoothing, steps, multigrid
783: .seealso: PCMGSetNumberSmoothUp()
784: @*/
785: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetNumberSmoothDown(PC pc,PetscInt n)
786: {
787: PC_MG **mg;
789: PetscInt i,levels;
793: mg = (PC_MG**)pc->data;
794: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
795: levels = mg[0]->levels;
797: for (i=1; i<levels; i++) {
798: /* make sure smoother up and down are different */
799: PCMGGetSmootherUp(pc,i,PETSC_NULL);
800: KSPSetTolerances(mg[i]->smoothd,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,n);
801: mg[i]->default_smoothd = n;
802: }
803: return(0);
804: }
808: /*@
809: PCMGSetNumberSmoothUp - Sets the number of post-smoothing steps to use
810: on all levels. Use PCMGGetSmootherUp() to set different numbers of
811: post-smoothing steps on different levels.
813: Collective on PC
815: Input Parameters:
816: + mg - the multigrid context
817: - n - the number of smoothing steps
819: Options Database Key:
820: . -pc_mg_smoothup <n> - Sets number of post-smoothing steps
822: Level: advanced
824: Note: this does not set a value on the coarsest grid, since we assume that
825: there is no separate smooth up on the coarsest grid.
827: .keywords: MG, smooth, up, post-smoothing, steps, multigrid
829: .seealso: PCMGSetNumberSmoothDown()
830: @*/
831: PetscErrorCode PETSCKSP_DLLEXPORT PCMGSetNumberSmoothUp(PC pc,PetscInt n)
832: {
833: PC_MG **mg;
835: PetscInt i,levels;
839: mg = (PC_MG**)pc->data;
840: if (!mg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Must set MG levels before calling");
841: levels = mg[0]->levels;
843: for (i=1; i<levels; i++) {
844: /* make sure smoother up and down are different */
845: PCMGGetSmootherUp(pc,i,PETSC_NULL);
846: KSPSetTolerances(mg[i]->smoothu,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,n);
847: mg[i]->default_smoothu = n;
848: }
849: return(0);
850: }
852: /* ----------------------------------------------------------------------------------------*/
854: /*MC
855: PCMG - Use geometric multigrid preconditioning. This preconditioner requires you provide additional
856: information about the coarser grid matrices and restriction/interpolation operators.
858: Options Database Keys:
859: + -pc_mg_levels <nlevels> - number of levels including finest
860: . -pc_mg_cycles 1 or 2 - for V or W-cycle
861: . -pc_mg_smoothup <n> - number of smoothing steps after interpolation
862: . -pc_mg_smoothdown <n> - number of smoothing steps before applying restriction operator
863: . -pc_mg_type <additive,multiplicative,full,cascade> - multiplicative is the default
864: . -pc_mg_log - log information about time spent on each level of the solver
865: . -pc_mg_monitor - print information on the multigrid convergence
866: . -pc_mg_galerkin - use Galerkin process to compute coarser operators
867: - -pc_mg_dump_matlab - dumps the matrices for each level and the restriction/interpolation matrices
868: to the Socket viewer for reading from Matlab.
870: Notes:
872: Level: intermediate
874: Concepts: multigrid
876: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
877: PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), PCMGSetCycles(), PCMGSetNumberSmoothDown(),
878: PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
879: PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
880: PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
881: M*/
886: PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_MG(PC pc)
887: {
889: pc->ops->apply = PCApply_MG;
890: pc->ops->setup = PCSetUp_MG;
891: pc->ops->destroy = PCDestroy_MG;
892: pc->ops->setfromoptions = PCSetFromOptions_MG;
893: pc->ops->view = PCView_MG;
895: pc->data = (void*)0;
896: return(0);
897: }