Actual source code: pipelcg.c
1: #include <petsc/private/kspimpl.h>
2: #include <petsc/private/vecimpl.h>
4: #define offset(j) PetscMax(((j) - (2*l)), 0)
5: #define shift(i,j) ((i) - offset((j)))
6: #define G(i,j) (plcg->G[((j)*(2*l+1))+(shift((i),(j))) ])
7: #define G_noshift(i,j) (plcg->G[((j)*(2*l+1))+(i)])
8: #define alpha(i) (plcg->alpha[(i)])
9: #define gamma(i) (plcg->gamma[(i)])
10: #define delta(i) (plcg->delta[(i)])
11: #define sigma(i) (plcg->sigma[(i)])
12: #define req(i) (plcg->req[(i)])
14: typedef struct KSP_CG_PIPE_L_s KSP_CG_PIPE_L;
15: struct KSP_CG_PIPE_L_s {
16: PetscInt l; /* pipeline depth */
17: Vec *Z; /* Z vectors (shifted base) */
18: Vec *U; /* U vectors (unpreconditioned shifted base) */
19: Vec *V; /* V vectors (original base) */
20: Vec *Q; /* Q vectors (auxiliary bases) */
21: Vec p; /* work vector */
22: PetscScalar *G; /* such that Z = VG (band matrix)*/
23: PetscScalar *gamma,*delta,*alpha;
24: PetscReal lmin,lmax; /* min and max eigen values estimates to compute base shifts */
25: PetscReal *sigma; /* base shifts */
26: MPI_Request *req; /* request array for asynchronous global collective */
27: PetscBool show_rstrt; /* flag to show restart information in output (default: not shown) */
28: };
30: /*
31: KSPSetUp_PIPELCG - Sets up the workspace needed by the PIPELCG method.
33: This is called once, usually automatically by KSPSolve() or KSPSetUp()
34: but can be called directly by KSPSetUp()
35: */
36: static PetscErrorCode KSPSetUp_PIPELCG(KSP ksp)
37: {
38: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
39: PetscInt l=plcg->l,max_it=ksp->max_it;
40: MPI_Comm comm;
42: comm = PetscObjectComm((PetscObject)ksp);
47: KSPSetWorkVecs(ksp,1); /* get work vectors needed by PIPELCG */
48: plcg->p = ksp->work[0];
50: VecDuplicateVecs(plcg->p,PetscMax(3,l+1),&plcg->Z);
51: VecDuplicateVecs(plcg->p,3,&plcg->U);
52: VecDuplicateVecs(plcg->p,3,&plcg->V);
53: VecDuplicateVecs(plcg->p,3*(l-1)+1,&plcg->Q);
54: PetscCalloc1(2,&plcg->alpha);
55: PetscCalloc1(l,&plcg->sigma);
57: return 0;
58: }
60: static PetscErrorCode KSPReset_PIPELCG(KSP ksp)
61: {
62: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
63: PetscInt l=plcg->l;
65: PetscFree(plcg->sigma);
66: PetscFree(plcg->alpha);
67: VecDestroyVecs(PetscMax(3,l+1),&plcg->Z);
68: VecDestroyVecs(3,&plcg->U);
69: VecDestroyVecs(3,&plcg->V);
70: VecDestroyVecs(3*(l-1)+1,&plcg->Q);
71: return 0;
72: }
74: static PetscErrorCode KSPDestroy_PIPELCG(KSP ksp)
75: {
76: KSPReset_PIPELCG(ksp);
77: KSPDestroyDefault(ksp);
78: return 0;
79: }
81: static PetscErrorCode KSPSetFromOptions_PIPELCG(PetscOptionItems *PetscOptionsObject,KSP ksp)
82: {
83: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
84: PetscBool flag=PETSC_FALSE;
86: PetscOptionsHead(PetscOptionsObject,"KSP PIPELCG options");
87: PetscOptionsInt("-ksp_pipelcg_pipel","Pipeline length","",plcg->l,&plcg->l,&flag);
88: if (!flag) plcg->l = 1;
89: PetscOptionsReal("-ksp_pipelcg_lmin","Estimate for smallest eigenvalue","",plcg->lmin,&plcg->lmin,&flag);
90: if (!flag) plcg->lmin = 0.0;
91: PetscOptionsReal("-ksp_pipelcg_lmax","Estimate for largest eigenvalue","",plcg->lmax,&plcg->lmax,&flag);
92: if (!flag) plcg->lmax = 0.0;
93: PetscOptionsBool("-ksp_pipelcg_monitor","Output information on restarts when they occur? (default: 0)","",plcg->show_rstrt,&plcg->show_rstrt,&flag);
94: if (!flag) plcg->show_rstrt = PETSC_FALSE;
95: PetscOptionsTail();
96: return 0;
97: }
99: static PetscErrorCode MPIPetsc_Iallreduce(void *sendbuf,void *recvbuf,PetscMPIInt count,MPI_Datatype datatype,MPI_Op op,MPI_Comm comm,MPI_Request *request)
100: {
101: #if defined(PETSC_HAVE_MPI_NONBLOCKING_COLLECTIVES)
102: MPI_Iallreduce(sendbuf,recvbuf,count,datatype,op,comm,request);
103: #else
104: MPIU_Allreduce(sendbuf,recvbuf,count,datatype,op,comm);
105: *request = MPI_REQUEST_NULL;
106: #endif
107: return 0;
108: }
110: static PetscErrorCode KSPView_PIPELCG(KSP ksp,PetscViewer viewer)
111: {
112: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
113: PetscBool iascii=PETSC_FALSE,isstring=PETSC_FALSE;
115: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
116: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
117: if (iascii) {
118: PetscViewerASCIIPrintf(viewer," Pipeline depth: %D\n", plcg->l);
119: PetscViewerASCIIPrintf(viewer," Minimal eigenvalue estimate %g\n",plcg->lmin);
120: PetscViewerASCIIPrintf(viewer," Maximal eigenvalue estimate %g\n",plcg->lmax);
121: } else if (isstring) {
122: PetscViewerStringSPrintf(viewer," Pipeline depth: %D\n", plcg->l);
123: PetscViewerStringSPrintf(viewer," Minimal eigenvalue estimate %g\n",plcg->lmin);
124: PetscViewerStringSPrintf(viewer," Maximal eigenvalue estimate %g\n",plcg->lmax);
125: }
126: return 0;
127: }
129: static PetscErrorCode KSPSolve_InnerLoop_PIPELCG(KSP ksp)
130: {
131: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
132: Mat A=NULL,Pmat=NULL;
133: PetscInt it=0,max_it=ksp->max_it,l=plcg->l,i=0,j=0,k=0;
134: PetscInt start=0,middle=0,end=0;
135: Vec *Z=plcg->Z,*U=plcg->U,*V=plcg->V,*Q=plcg->Q;
136: Vec x=NULL,p=NULL,temp=NULL;
137: PetscScalar sum_dummy=0.0,eta=0.0,zeta=0.0,lambda=0.0;
138: PetscReal dp=0.0,tmp=0.0,beta=0.0,invbeta2=0.0;
139: MPI_Comm comm;
141: x = ksp->vec_sol;
142: p = plcg->p;
144: comm = PetscObjectComm((PetscObject)ksp);
145: PCGetOperators(ksp->pc,&A,&Pmat);
147: for (it = 0; it < max_it+l; ++it) {
148: /* ----------------------------------- */
149: /* Multiplication z_{it+1} = Az_{it} */
150: /* ----------------------------------- */
151: /* Shift the U vector pointers */
152: temp = U[2];
153: for (i = 2; i>0; i--) {
154: U[i] = U[i-1];
155: }
156: U[0] = temp;
157: if (it < l) {
158: /* SpMV and Sigma-shift and Prec */
159: MatMult(A,Z[l-it],U[0]);
160: VecAXPY(U[0],-sigma(it),U[1]);
161: KSP_PCApply(ksp,U[0],Z[l-it-1]);
162: if (it < l-1) {
163: VecCopy(Z[l-it-1],Q[3*it]);
164: }
165: } else {
166: /* Shift the Z vector pointers */
167: temp = Z[PetscMax(l,2)];
168: for (i = PetscMax(l,2); i > 0; --i) {
169: Z[i] = Z[i-1];
170: }
171: Z[0] = temp;
172: /* SpMV and Prec */
173: MatMult(A,Z[1],U[0]);
174: KSP_PCApply(ksp,U[0],Z[0]);
175: }
177: /* ----------------------------------- */
178: /* Adjust the G matrix */
179: /* ----------------------------------- */
180: if (it >= l) {
181: if (it == l) {
182: /* MPI_Wait for G(0,0),scale V0 and Z and U and Q vectors with 1/beta */
183: MPI_Wait(&req(0),MPI_STATUS_IGNORE);
184: beta = PetscSqrtReal(PetscRealPart(G(0,0)));
185: G(0,0) = 1.0;
186: VecAXPY(V[0],1.0/beta,p); /* this assumes V[0] to be zero initially */
187: for (j = 0; j <= PetscMax(l,2); ++j) {
188: VecScale(Z[j],1.0/beta);
189: }
190: for (j = 0; j <= 2; ++j) {
191: VecScale(U[j],1.0/beta);
192: }
193: for (j = 0; j < l-1; ++j) {
194: VecScale(Q[3*j],1.0/beta);
195: }
196: }
198: /* MPI_Wait until the dot products,started l iterations ago,are completed */
199: MPI_Wait(&req(it-l+1),MPI_STATUS_IGNORE);
200: if (it >= 2*l) {
201: for (j = PetscMax(0,it-3*l+1); j <= it-2*l; j++) {
202: G(j,it-l+1) = G(it-2*l+1,j+l); /* exploit symmetry in G matrix */
203: }
204: }
206: if (it <= 2*l-1) {
207: invbeta2 = 1.0 / (beta * beta);
208: /* Scale columns 1 up to l of G with 1/beta^2 */
209: for (j = PetscMax(it-3*l+1,0); j <= it-l+1; ++j) {
210: G(j,it-l+1) *= invbeta2;
211: }
212: }
214: for (j = PetscMax(it-2*l+2,0); j <= it-l; ++j) {
215: sum_dummy = 0.0;
216: for (k = PetscMax(it-3*l+1,0); k <= j-1; ++k) {
217: sum_dummy = sum_dummy + G(k,j) * G(k,it-l+1);
218: }
219: G(j,it-l+1) = (G(j,it-l+1) - sum_dummy) / G(j,j);
220: }
222: sum_dummy = 0.0;
223: for (k = PetscMax(it-3*l+1,0); k <= it-l; ++k) {
224: sum_dummy = sum_dummy + G(k,it-l+1) * G(k,it-l+1);
225: }
227: tmp = PetscRealPart(G(it-l+1,it-l+1) - sum_dummy);
228: /* Breakdown check */
229: if (tmp < 0) {
230: if (plcg->show_rstrt) {
231: PetscPrintf(comm,"Sqrt breakdown in iteration %D: sqrt argument is %e. Iteration was restarted.\n",ksp->its+1,(double)tmp);
232: }
233: /* End hanging dot-products in the pipeline before exiting for-loop */
234: start = it-l+2;
235: end = PetscMin(it+1,max_it+1); /* !warning! 'it' can actually be greater than 'max_it' */
236: for (i = start; i < end; ++i) {
237: MPI_Wait(&req(i),MPI_STATUS_IGNORE);
238: }
239: break;
240: }
241: G(it-l+1,it-l+1) = PetscSqrtReal(tmp);
243: if (it < 2*l) {
244: if (it == l) {
245: gamma(it-l) = (G(it-l,it-l+1) + sigma(it-l) * G(it-l,it-l)) / G(it-l,it-l);
246: } else {
247: gamma(it-l) = (G(it-l,it-l+1) + sigma(it-l) * G(it-l,it-l)
248: - delta(it-l-1) * G(it-l-1,it-l)) / G(it-l,it-l);
249: }
250: delta(it-l) = G(it-l+1,it-l+1) / G(it-l,it-l);
251: } else {
252: gamma(it-l) = (G(it-l,it-l) * gamma(it-2*l)
253: + G(it-l,it-l+1) * delta(it-2*l)
254: - G(it-l-1,it-l) * delta(it-l-1)) / G(it-l,it-l);
255: delta(it-l) = (G(it-l+1,it-l+1) * delta(it-2*l)) / G(it-l,it-l);
256: }
258: /* -------------------------------------------------- */
259: /* Recursively compute the next V, Q, Z and U vectors */
260: /* -------------------------------------------------- */
261: /* Shift the V vector pointers */
262: temp = V[2];
263: for (i = 2; i>0; i--) {
264: V[i] = V[i-1];
265: }
266: V[0] = temp;
268: /* Recurrence V vectors */
269: if (l == 1) {
270: VecCopy(Z[1],V[0]);
271: } else {
272: VecCopy(Q[0],V[0]);
273: }
274: if (it == l) {
275: VecAXPY(V[0],sigma(0)-gamma(it-l),V[1]);
276: } else {
277: alpha(0) = sigma(0)-gamma(it-l);
278: alpha(1) = -delta(it-l-1);
279: VecMAXPY(V[0],2,&alpha(0),&V[1]);
280: }
281: VecScale(V[0],1.0/delta(it-l));
283: /* Recurrence Q vectors */
284: for (j = 0; j < l-1; ++j) {
285: /* Shift the Q vector pointers */
286: temp = Q[3*j+2];
287: for (i = 2; i>0; i--) {
288: Q[3*j+i] = Q[3*j+i-1];
289: }
290: Q[3*j] = temp;
292: if (j < l-2) {
293: VecCopy(Q[3*(j+1)],Q[3*j]);
294: } else {
295: VecCopy(Z[1],Q[3*j]);
296: }
297: if (it == l) {
298: VecAXPY(Q[3*j],sigma(j+1)-gamma(it-l),Q[3*j+1]);
299: } else {
300: alpha(0) = sigma(j+1)-gamma(it-l);
301: alpha(1) = -delta(it-l-1);
302: VecMAXPY(Q[3*j],2,&alpha(0),&Q[3*j+1]);
303: }
304: VecScale(Q[3*j],1.0/delta(it-l));
305: }
307: /* Recurrence Z and U vectors */
308: if (it == l) {
309: VecAXPY(Z[0],-gamma(it-l),Z[1]);
310: VecAXPY(U[0],-gamma(it-l),U[1]);
311: } else {
312: alpha(0) = -gamma(it-l);
313: alpha(1) = -delta(it-l-1);
314: VecMAXPY(Z[0],2,&alpha(0),&Z[1]);
315: VecMAXPY(U[0],2,&alpha(0),&U[1]);
316: }
317: VecScale(Z[0],1.0/delta(it-l));
318: VecScale(U[0],1.0/delta(it-l));
319: }
321: /* ---------------------------------------- */
322: /* Compute and communicate the dot products */
323: /* ---------------------------------------- */
324: if (it < l) {
325: for (j = 0; j < it+2; ++j) {
326: (*U[0]->ops->dot_local)(U[0],Z[l-j],&G(j,it+1)); /* dot-products (U[0],Z[j]) */
327: }
328: MPIPetsc_Iallreduce(MPI_IN_PLACE,&G(0,it+1),it+2,MPIU_SCALAR,MPIU_SUM,comm,&req(it+1));
329: } else if ((it >= l) && (it < max_it)) {
330: middle = it-l+2;
331: end = it+2;
332: (*U[0]->ops->dot_local)(U[0],V[0],&G(it-l+1,it+1)); /* dot-product (U[0],V[0]) */
333: for (j = middle; j < end; ++j) {
334: (*U[0]->ops->dot_local)(U[0],plcg->Z[it+1-j],&G(j,it+1)); /* dot-products (U[0],Z[j]) */
335: }
336: MPIPetsc_Iallreduce(MPI_IN_PLACE,&G(it-l+1,it+1),l+1,MPIU_SCALAR,MPIU_SUM,comm,&req(it+1));
337: }
339: /* ----------------------------------------- */
340: /* Compute solution vector and residual norm */
341: /* ----------------------------------------- */
342: if (it >= l) {
343: if (it == l) {
344: if (ksp->its != 0) {
345: ++ ksp->its;
346: }
347: eta = gamma(0);
348: zeta = beta;
349: VecCopy(V[1],p);
350: VecScale(p,1.0/eta);
351: VecAXPY(x,zeta,p);
352: dp = beta;
353: } else if (it > l) {
354: k = it-l;
355: ++ ksp->its;
356: lambda = delta(k-1)/eta;
357: eta = gamma(k) - lambda * delta(k-1);
358: zeta = -lambda * zeta;
359: VecScale(p,-delta(k-1)/eta);
360: VecAXPY(p,1.0/eta,V[1]);
361: VecAXPY(x,zeta,p);
362: dp = PetscAbsScalar(zeta);
363: }
364: ksp->rnorm = dp;
365: KSPLogResidualHistory(ksp,dp);
366: KSPMonitor(ksp,ksp->its,dp);
367: (*ksp->converged)(ksp,ksp->its,dp,&ksp->reason,ksp->cnvP);
369: if (ksp->its >= max_it && !ksp->reason) ksp->reason = KSP_DIVERGED_ITS;
370: if (ksp->reason) {
371: /* End hanging dot-products in the pipeline before exiting for-loop */
372: start = it-l+2;
373: end = PetscMin(it+2,max_it+1); /* !warning! 'it' can actually be greater than 'max_it' */
374: for (i = start; i < end; ++i) {
375: MPI_Wait(&req(i),MPI_STATUS_IGNORE);
376: }
377: break;
378: }
379: }
380: } /* End inner for loop */
381: return 0;
382: }
384: static PetscErrorCode KSPSolve_ReInitData_PIPELCG(KSP ksp)
385: {
386: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
387: PetscInt i=0,j=0,l=plcg->l,max_it=ksp->max_it;
389: for (i = 0; i < PetscMax(3,l+1); ++i) {
390: VecSet(plcg->Z[i],0.0);
391: }
392: for (i = 1; i < 3; ++i) {
393: VecSet(plcg->U[i],0.0);
394: }
395: for (i = 0; i < 3; ++i) {
396: VecSet(plcg->V[i],0.0);
397: }
398: for (i = 0; i < 3*(l-1)+1; ++i) {
399: VecSet(plcg->Q[i],0.0);
400: }
401: for (j = 0; j < (max_it+1); ++j) {
402: gamma(j) = 0.0;
403: delta(j) = 0.0;
404: for (i = 0; i < (2*l+1); ++i) {
405: G_noshift(i,j) = 0.0;
406: }
407: }
408: return 0;
409: }
411: /*
412: KSPSolve_PIPELCG - This routine actually applies the pipelined(l) conjugate gradient method
413: */
414: static PetscErrorCode KSPSolve_PIPELCG(KSP ksp)
415: {
416: KSP_CG_PIPE_L *plcg = (KSP_CG_PIPE_L*)ksp->data;
417: Mat A=NULL,Pmat=NULL;
418: Vec b=NULL,x=NULL,p=NULL;
419: PetscInt max_it=ksp->max_it,l=plcg->l;
420: PetscInt i=0,outer_it=0,curr_guess_zero=0;
421: PetscReal lmin=plcg->lmin,lmax=plcg->lmax;
422: PetscBool diagonalscale=PETSC_FALSE;
423: MPI_Comm comm;
425: comm = PetscObjectComm((PetscObject)ksp);
426: PCGetDiagonalScale(ksp->pc,&diagonalscale);
427: if (diagonalscale) {
428: SETERRQ(comm,PETSC_ERR_SUP,"Krylov method %s does not support diagonal scaling",((PetscObject)ksp)->type_name);
429: }
431: x = ksp->vec_sol;
432: b = ksp->vec_rhs;
433: p = plcg->p;
435: PetscCalloc1((max_it+1)*(2*l+1),&plcg->G);
436: PetscCalloc1(max_it+1,&plcg->gamma);
437: PetscCalloc1(max_it+1,&plcg->delta);
438: PetscCalloc1(max_it+1,&plcg->req);
440: PCGetOperators(ksp->pc,&A,&Pmat);
442: for (i = 0; i < l; ++i) {
443: sigma(i) = (0.5*(lmin+lmax) + (0.5*(lmax-lmin) * PetscCosReal(PETSC_PI*(2.0*i+1.0)/(2.0*l))));
444: }
446: ksp->its = 0;
447: outer_it = 0;
448: curr_guess_zero = !! ksp->guess_zero;
450: while (ksp->its < max_it) { /* OUTER LOOP (gmres-like restart to handle breakdowns) */
451: /* RESTART LOOP */
452: if (!curr_guess_zero) {
453: KSP_MatMult(ksp,A,x,plcg->U[0]); /* u <- b - Ax */
454: VecAYPX(plcg->U[0],-1.0,b);
455: } else {
456: VecCopy(b,plcg->U[0]); /* u <- b (x is 0) */
457: }
458: KSP_PCApply(ksp,plcg->U[0],p); /* p <- Bu */
460: if (outer_it > 0) {
461: /* Re-initialize Z,U,V,Q,gamma,delta,G after restart occurred */
462: KSPSolve_ReInitData_PIPELCG(ksp);
463: }
465: (*plcg->U[0]->ops->dot_local)(plcg->U[0],p,&G(0,0));
466: MPIPetsc_Iallreduce(MPI_IN_PLACE,&G(0,0),1,MPIU_SCALAR,MPIU_SUM,comm,&req(0));
467: VecCopy(p,plcg->Z[l]);
469: KSPSolve_InnerLoop_PIPELCG(ksp);
471: if (ksp->reason) break; /* convergence or divergence */
472: ++ outer_it;
473: curr_guess_zero = 0;
474: }
476: if (!ksp->reason) ksp->reason = KSP_DIVERGED_ITS;
477: PetscFree(plcg->G);
478: PetscFree(plcg->gamma);
479: PetscFree(plcg->delta);
480: PetscFree(plcg->req);
481: return 0;
482: }
484: /*MC
485: KSPPIPELCG - Deep pipelined (length l) Conjugate Gradient method. This method has only a single non-blocking global
486: reduction per iteration, compared to 2 blocking reductions for standard CG. The reduction is overlapped by the
487: matrix-vector product and preconditioner application of the next l iterations. The pipeline length l is a parameter
488: of the method.
490: Options Database Keys:
491: + -ksp_pipelcg_pipel - pipelined length
492: . -ksp_pipelcg_lmin - approximation to the smallest eigenvalue of the preconditioned operator (default: 0.0)
493: . -ksp_pipelcg_lmax - approximation to the largest eigenvalue of the preconditioned operator (default: 0.0)
494: - -ksp_pipelcg_monitor - output where/why the method restarts when a sqrt breakdown occurs
496: see KSPSolve() for additional options
498: Level: advanced
500: Notes:
501: MPI configuration may be necessary for reductions to make asynchronous progress, which is important for
502: performance of pipelined methods. See the FAQ on the PETSc website for details.
504: Contributed by:
505: Siegfried Cools, University of Antwerp, Dept. Mathematics and Computer Science,
506: funded by Flemish Research Foundation (FWO) grant number 12H4617N.
508: Example usage:
509: [*] KSP ex2, no preconditioner, pipel = 2, lmin = 0.0, lmax = 8.0 :
510: $mpiexec -n 14 ./ex2 -m 1000 -n 1000 -ksp_type pipelcg -pc_type none -ksp_norm_type natural
511: -ksp_rtol 1e-10 -ksp_max_it 1000 -ksp_pipelcg_pipel 2 -ksp_pipelcg_lmin 0.0 -ksp_pipelcg_lmax 8.0 -log_view
512: [*] SNES ex48, bjacobi preconditioner, pipel = 3, lmin = 0.0, lmax = 2.0, show restart information :
513: $mpiexec -n 14 ./ex48 -M 150 -P 100 -ksp_type pipelcg -pc_type bjacobi -ksp_rtol 1e-10 -ksp_pipelcg_pipel 3
514: -ksp_pipelcg_lmin 0.0 -ksp_pipelcg_lmax 2.0 -ksp_pipelcg_monitor -log_view
516: References:
517: + * - J. Cornelis, S. Cools and W. Vanroose,
518: "The Communication-Hiding Conjugate Gradient Method with Deep Pipelines"
519: Submitted to SIAM Journal on Scientific Computing (SISC), 2018.
520: - * - S. Cools, J. Cornelis and W. Vanroose,
521: "Numerically Stable Recurrence Relations for the Communication Hiding Pipelined Conjugate Gradient Method"
522: Submitted to IEEE Transactions on Parallel and Distributed Systems, 2019.
524: .seealso: KSPCreate(), KSPSetType(), KSPType (for list of available types), KSPCG, KSPPIPECG, KSPPIPECGRR, KSPPGMRES,
525: KSPPIPEBCGS, KSPSetPCSide()
526: M*/
527: PETSC_EXTERN
528: PetscErrorCode KSPCreate_PIPELCG(KSP ksp)
529: {
530: KSP_CG_PIPE_L *plcg = NULL;
532: PetscNewLog(ksp,&plcg);
533: ksp->data = (void*)plcg;
535: KSPSetSupportedNorm(ksp,KSP_NORM_NONE,PC_LEFT,1);
536: KSPSetSupportedNorm(ksp,KSP_NORM_NATURAL,PC_LEFT,2);
538: ksp->ops->setup = KSPSetUp_PIPELCG;
539: ksp->ops->solve = KSPSolve_PIPELCG;
540: ksp->ops->reset = KSPReset_PIPELCG;
541: ksp->ops->destroy = KSPDestroy_PIPELCG;
542: ksp->ops->view = KSPView_PIPELCG;
543: ksp->ops->setfromoptions = KSPSetFromOptions_PIPELCG;
544: ksp->ops->buildsolution = KSPBuildSolutionDefault;
545: ksp->ops->buildresidual = KSPBuildResidualDefault;
546: return 0;
547: }