Actual source code: theta.c
1: /*
2: Code for timestepping with implicit Theta method
3: */
4: #include <petsc/private/tsimpl.h>
5: #include <petscsnes.h>
6: #include <petscdm.h>
7: #include <petscmat.h>
9: typedef struct {
10: /* context for time stepping */
11: PetscReal stage_time;
12: Vec Stages[2]; /* Storage for stage solutions */
13: Vec X0, X, Xdot; /* Storage for u^n, u^n + dt a_{11} k_1, and time derivative u^{n+1}_t */
14: Vec affine; /* Affine vector needed for residual at beginning of step in endpoint formulation */
15: PetscReal Theta;
16: PetscReal shift; /* Shift parameter for SNES Jacobian, used by forward, TLM and adjoint */
17: PetscInt order;
18: PetscBool endpoint;
19: PetscBool extrapolate;
20: TSStepStatus status;
21: Vec VecCostIntegral0; /* Backup for roll-backs due to events, used by cost integral */
22: PetscReal ptime0; /* Backup for ts->ptime, the start time of current time step, used by TLM and cost integral */
23: PetscReal time_step0; /* Backup for ts->timestep, the step size of current time step, used by TLM and cost integral*/
25: /* context for sensitivity analysis */
26: PetscInt num_tlm; /* Total number of tangent linear equations */
27: Vec *VecsDeltaLam; /* Increment of the adjoint sensitivity w.r.t IC at stage */
28: Vec *VecsDeltaMu; /* Increment of the adjoint sensitivity w.r.t P at stage */
29: Vec *VecsSensiTemp; /* Vector to be multiplied with Jacobian transpose */
30: Mat MatFwdStages[2]; /* TLM Stages */
31: Mat MatDeltaFwdSensip; /* Increment of the forward sensitivity at stage */
32: Vec VecDeltaFwdSensipCol; /* Working vector for holding one column of the sensitivity matrix */
33: Mat MatFwdSensip0; /* backup for roll-backs due to events */
34: Mat MatIntegralSensipTemp; /* Working vector for forward integral sensitivity */
35: Mat MatIntegralSensip0; /* backup for roll-backs due to events */
36: Vec *VecsDeltaLam2; /* Increment of the 2nd-order adjoint sensitivity w.r.t IC at stage */
37: Vec *VecsDeltaMu2; /* Increment of the 2nd-order adjoint sensitivity w.r.t P at stage */
38: Vec *VecsSensi2Temp; /* Working vectors that holds the residual for the second-order adjoint */
39: Vec *VecsAffine; /* Working vectors to store residuals */
40: /* context for error estimation */
41: Vec vec_sol_prev;
42: Vec vec_lte_work;
43: } TS_Theta;
45: static PetscErrorCode TSThetaGetX0AndXdot(TS ts, DM dm, Vec *X0, Vec *Xdot)
46: {
47: TS_Theta *th = (TS_Theta *)ts->data;
49: if (X0) {
50: if (dm && dm != ts->dm) {
51: DMGetNamedGlobalVector(dm, "TSTheta_X0", X0);
52: } else *X0 = ts->vec_sol;
53: }
54: if (Xdot) {
55: if (dm && dm != ts->dm) {
56: DMGetNamedGlobalVector(dm, "TSTheta_Xdot", Xdot);
57: } else *Xdot = th->Xdot;
58: }
59: return 0;
60: }
62: static PetscErrorCode TSThetaRestoreX0AndXdot(TS ts, DM dm, Vec *X0, Vec *Xdot)
63: {
64: if (X0) {
65: if (dm && dm != ts->dm) DMRestoreNamedGlobalVector(dm, "TSTheta_X0", X0);
66: }
67: if (Xdot) {
68: if (dm && dm != ts->dm) DMRestoreNamedGlobalVector(dm, "TSTheta_Xdot", Xdot);
69: }
70: return 0;
71: }
73: static PetscErrorCode DMCoarsenHook_TSTheta(DM fine, DM coarse, void *ctx)
74: {
75: return 0;
76: }
78: static PetscErrorCode DMRestrictHook_TSTheta(DM fine, Mat restrct, Vec rscale, Mat inject, DM coarse, void *ctx)
79: {
80: TS ts = (TS)ctx;
81: Vec X0, Xdot, X0_c, Xdot_c;
83: TSThetaGetX0AndXdot(ts, fine, &X0, &Xdot);
84: TSThetaGetX0AndXdot(ts, coarse, &X0_c, &Xdot_c);
85: MatRestrict(restrct, X0, X0_c);
86: MatRestrict(restrct, Xdot, Xdot_c);
87: VecPointwiseMult(X0_c, rscale, X0_c);
88: VecPointwiseMult(Xdot_c, rscale, Xdot_c);
89: TSThetaRestoreX0AndXdot(ts, fine, &X0, &Xdot);
90: TSThetaRestoreX0AndXdot(ts, coarse, &X0_c, &Xdot_c);
91: return 0;
92: }
94: static PetscErrorCode DMSubDomainHook_TSTheta(DM dm, DM subdm, void *ctx)
95: {
96: return 0;
97: }
99: static PetscErrorCode DMSubDomainRestrictHook_TSTheta(DM dm, VecScatter gscat, VecScatter lscat, DM subdm, void *ctx)
100: {
101: TS ts = (TS)ctx;
102: Vec X0, Xdot, X0_sub, Xdot_sub;
104: TSThetaGetX0AndXdot(ts, dm, &X0, &Xdot);
105: TSThetaGetX0AndXdot(ts, subdm, &X0_sub, &Xdot_sub);
107: VecScatterBegin(gscat, X0, X0_sub, INSERT_VALUES, SCATTER_FORWARD);
108: VecScatterEnd(gscat, X0, X0_sub, INSERT_VALUES, SCATTER_FORWARD);
110: VecScatterBegin(gscat, Xdot, Xdot_sub, INSERT_VALUES, SCATTER_FORWARD);
111: VecScatterEnd(gscat, Xdot, Xdot_sub, INSERT_VALUES, SCATTER_FORWARD);
113: TSThetaRestoreX0AndXdot(ts, dm, &X0, &Xdot);
114: TSThetaRestoreX0AndXdot(ts, subdm, &X0_sub, &Xdot_sub);
115: return 0;
116: }
118: static PetscErrorCode TSThetaEvaluateCostIntegral(TS ts)
119: {
120: TS_Theta *th = (TS_Theta *)ts->data;
121: TS quadts = ts->quadraturets;
123: if (th->endpoint) {
124: /* Evolve ts->vec_costintegral to compute integrals */
125: if (th->Theta != 1.0) {
126: TSComputeRHSFunction(quadts, th->ptime0, th->X0, ts->vec_costintegrand);
127: VecAXPY(quadts->vec_sol, th->time_step0 * (1.0 - th->Theta), ts->vec_costintegrand);
128: }
129: TSComputeRHSFunction(quadts, ts->ptime, ts->vec_sol, ts->vec_costintegrand);
130: VecAXPY(quadts->vec_sol, th->time_step0 * th->Theta, ts->vec_costintegrand);
131: } else {
132: TSComputeRHSFunction(quadts, th->stage_time, th->X, ts->vec_costintegrand);
133: VecAXPY(quadts->vec_sol, th->time_step0, ts->vec_costintegrand);
134: }
135: return 0;
136: }
138: static PetscErrorCode TSForwardCostIntegral_Theta(TS ts)
139: {
140: TS_Theta *th = (TS_Theta *)ts->data;
141: TS quadts = ts->quadraturets;
143: /* backup cost integral */
144: VecCopy(quadts->vec_sol, th->VecCostIntegral0);
145: TSThetaEvaluateCostIntegral(ts);
146: return 0;
147: }
149: static PetscErrorCode TSAdjointCostIntegral_Theta(TS ts)
150: {
151: TS_Theta *th = (TS_Theta *)ts->data;
153: /* Like TSForwardCostIntegral(), the adjoint cost integral evaluation relies on ptime0 and time_step0. */
154: th->ptime0 = ts->ptime + ts->time_step;
155: th->time_step0 = -ts->time_step;
156: TSThetaEvaluateCostIntegral(ts);
157: return 0;
158: }
160: static PetscErrorCode TSTheta_SNESSolve(TS ts, Vec b, Vec x)
161: {
162: PetscInt nits, lits;
164: SNESSolve(ts->snes, b, x);
165: SNESGetIterationNumber(ts->snes, &nits);
166: SNESGetLinearSolveIterations(ts->snes, &lits);
167: ts->snes_its += nits;
168: ts->ksp_its += lits;
169: return 0;
170: }
172: static PetscErrorCode TSStep_Theta(TS ts)
173: {
174: TS_Theta *th = (TS_Theta *)ts->data;
175: PetscInt rejections = 0;
176: PetscBool stageok, accept = PETSC_TRUE;
177: PetscReal next_time_step = ts->time_step;
179: if (!ts->steprollback) {
180: if (th->vec_sol_prev) VecCopy(th->X0, th->vec_sol_prev);
181: VecCopy(ts->vec_sol, th->X0);
182: }
184: th->status = TS_STEP_INCOMPLETE;
185: while (!ts->reason && th->status != TS_STEP_COMPLETE) {
186: th->shift = 1 / (th->Theta * ts->time_step);
187: th->stage_time = ts->ptime + (th->endpoint ? (PetscReal)1 : th->Theta) * ts->time_step;
188: VecCopy(th->X0, th->X);
189: if (th->extrapolate && !ts->steprestart) VecAXPY(th->X, 1 / th->shift, th->Xdot);
190: if (th->endpoint) { /* This formulation assumes linear time-independent mass matrix */
191: if (!th->affine) VecDuplicate(ts->vec_sol, &th->affine);
192: VecZeroEntries(th->Xdot);
193: TSComputeIFunction(ts, ts->ptime, th->X0, th->Xdot, th->affine, PETSC_FALSE);
194: VecScale(th->affine, (th->Theta - 1) / th->Theta);
195: } else if (th->affine) { /* Just in case th->endpoint is changed between calls to TSStep_Theta() */
196: VecZeroEntries(th->affine);
197: }
198: TSPreStage(ts, th->stage_time);
199: TSTheta_SNESSolve(ts, th->affine, th->X);
200: TSPostStage(ts, th->stage_time, 0, &th->X);
201: TSAdaptCheckStage(ts->adapt, ts, th->stage_time, th->X, &stageok);
202: if (!stageok) goto reject_step;
204: th->status = TS_STEP_PENDING;
205: if (th->endpoint) {
206: VecCopy(th->X, ts->vec_sol);
207: } else {
208: VecAXPBYPCZ(th->Xdot, -th->shift, th->shift, 0, th->X0, th->X);
209: VecAXPY(ts->vec_sol, ts->time_step, th->Xdot);
210: }
211: TSAdaptChoose(ts->adapt, ts, ts->time_step, NULL, &next_time_step, &accept);
212: th->status = accept ? TS_STEP_COMPLETE : TS_STEP_INCOMPLETE;
213: if (!accept) {
214: VecCopy(th->X0, ts->vec_sol);
215: ts->time_step = next_time_step;
216: goto reject_step;
217: }
219: if (ts->forward_solve || ts->costintegralfwd) { /* Save the info for the later use in cost integral evaluation */
220: th->ptime0 = ts->ptime;
221: th->time_step0 = ts->time_step;
222: }
223: ts->ptime += ts->time_step;
224: ts->time_step = next_time_step;
225: break;
227: reject_step:
228: ts->reject++;
229: accept = PETSC_FALSE;
230: if (!ts->reason && ++rejections > ts->max_reject && ts->max_reject >= 0) {
231: ts->reason = TS_DIVERGED_STEP_REJECTED;
232: PetscInfo(ts, "Step=%" PetscInt_FMT ", step rejections %" PetscInt_FMT " greater than current TS allowed, stopping solve\n", ts->steps, rejections);
233: }
234: }
235: return 0;
236: }
238: static PetscErrorCode TSAdjointStepBEuler_Private(TS ts)
239: {
240: TS_Theta *th = (TS_Theta *)ts->data;
241: TS quadts = ts->quadraturets;
242: Vec *VecsDeltaLam = th->VecsDeltaLam, *VecsDeltaMu = th->VecsDeltaMu, *VecsSensiTemp = th->VecsSensiTemp;
243: Vec *VecsDeltaLam2 = th->VecsDeltaLam2, *VecsDeltaMu2 = th->VecsDeltaMu2, *VecsSensi2Temp = th->VecsSensi2Temp;
244: PetscInt nadj;
245: Mat J, Jpre, quadJ = NULL, quadJp = NULL;
246: KSP ksp;
247: PetscScalar *xarr;
248: TSEquationType eqtype;
249: PetscBool isexplicitode = PETSC_FALSE;
250: PetscReal adjoint_time_step;
252: TSGetEquationType(ts, &eqtype);
253: if (eqtype == TS_EQ_ODE_EXPLICIT) {
254: isexplicitode = PETSC_TRUE;
255: VecsDeltaLam = ts->vecs_sensi;
256: VecsDeltaLam2 = ts->vecs_sensi2;
257: }
258: th->status = TS_STEP_INCOMPLETE;
259: SNESGetKSP(ts->snes, &ksp);
260: TSGetIJacobian(ts, &J, &Jpre, NULL, NULL);
261: if (quadts) {
262: TSGetRHSJacobian(quadts, &quadJ, NULL, NULL, NULL);
263: TSGetRHSJacobianP(quadts, &quadJp, NULL, NULL);
264: }
266: th->stage_time = ts->ptime;
267: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
269: /* Build RHS for first-order adjoint lambda_{n+1}/h + r_u^T(n+1) */
270: if (quadts) TSComputeRHSJacobian(quadts, th->stage_time, ts->vec_sol, quadJ, NULL);
272: for (nadj = 0; nadj < ts->numcost; nadj++) {
273: VecCopy(ts->vecs_sensi[nadj], VecsSensiTemp[nadj]);
274: VecScale(VecsSensiTemp[nadj], 1. / adjoint_time_step); /* lambda_{n+1}/h */
275: if (quadJ) {
276: MatDenseGetColumn(quadJ, nadj, &xarr);
277: VecPlaceArray(ts->vec_drdu_col, xarr);
278: VecAXPY(VecsSensiTemp[nadj], 1., ts->vec_drdu_col);
279: VecResetArray(ts->vec_drdu_col);
280: MatDenseRestoreColumn(quadJ, &xarr);
281: }
282: }
284: /* Build LHS for first-order adjoint */
285: th->shift = 1. / adjoint_time_step;
286: TSComputeSNESJacobian(ts, ts->vec_sol, J, Jpre);
287: KSPSetOperators(ksp, J, Jpre);
289: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
290: for (nadj = 0; nadj < ts->numcost; nadj++) {
291: KSPConvergedReason kspreason;
292: KSPSolveTranspose(ksp, VecsSensiTemp[nadj], VecsDeltaLam[nadj]);
293: KSPGetConvergedReason(ksp, &kspreason);
294: if (kspreason < 0) {
295: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
296: PetscInfo(ts, "Step=%" PetscInt_FMT ", %" PetscInt_FMT "th cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n", ts->steps, nadj);
297: }
298: }
300: if (ts->vecs_sensi2) { /* U_{n+1} */
301: /* Get w1 at t_{n+1} from TLM matrix */
302: MatDenseGetColumn(ts->mat_sensip, 0, &xarr);
303: VecPlaceArray(ts->vec_sensip_col, xarr);
304: /* lambda_s^T F_UU w_1 */
305: TSComputeIHessianProductFunctionUU(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fuu);
306: /* lambda_s^T F_UP w_2 */
307: TSComputeIHessianProductFunctionUP(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_dir, ts->vecs_fup);
308: for (nadj = 0; nadj < ts->numcost; nadj++) { /* compute the residual */
309: VecCopy(ts->vecs_sensi2[nadj], VecsSensi2Temp[nadj]);
310: VecScale(VecsSensi2Temp[nadj], 1. / adjoint_time_step);
311: VecAXPY(VecsSensi2Temp[nadj], -1., ts->vecs_fuu[nadj]);
312: if (ts->vecs_fup) VecAXPY(VecsSensi2Temp[nadj], -1., ts->vecs_fup[nadj]);
313: }
314: /* Solve stage equation LHS X = RHS for second-order adjoint */
315: for (nadj = 0; nadj < ts->numcost; nadj++) {
316: KSPConvergedReason kspreason;
317: KSPSolveTranspose(ksp, VecsSensi2Temp[nadj], VecsDeltaLam2[nadj]);
318: KSPGetConvergedReason(ksp, &kspreason);
319: if (kspreason < 0) {
320: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
321: PetscInfo(ts, "Step=%" PetscInt_FMT ", %" PetscInt_FMT "th cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n", ts->steps, nadj);
322: }
323: }
324: }
326: /* Update sensitivities, and evaluate integrals if there is any */
327: if (!isexplicitode) {
328: th->shift = 0.0;
329: TSComputeSNESJacobian(ts, ts->vec_sol, J, Jpre);
330: KSPSetOperators(ksp, J, Jpre);
331: for (nadj = 0; nadj < ts->numcost; nadj++) {
332: /* Add f_U \lambda_s to the original RHS */
333: VecScale(VecsSensiTemp[nadj], -1.);
334: MatMultTransposeAdd(J, VecsDeltaLam[nadj], VecsSensiTemp[nadj], VecsSensiTemp[nadj]);
335: VecScale(VecsSensiTemp[nadj], -adjoint_time_step);
336: VecCopy(VecsSensiTemp[nadj], ts->vecs_sensi[nadj]);
337: if (ts->vecs_sensi2) {
338: MatMultTransposeAdd(J, VecsDeltaLam2[nadj], VecsSensi2Temp[nadj], VecsSensi2Temp[nadj]);
339: VecScale(VecsSensi2Temp[nadj], -adjoint_time_step);
340: VecCopy(VecsSensi2Temp[nadj], ts->vecs_sensi2[nadj]);
341: }
342: }
343: }
344: if (ts->vecs_sensip) {
345: VecAXPBYPCZ(th->Xdot, -1. / adjoint_time_step, 1.0 / adjoint_time_step, 0, th->X0, ts->vec_sol);
346: TSComputeIJacobianP(ts, th->stage_time, ts->vec_sol, th->Xdot, 1. / adjoint_time_step, ts->Jacp, PETSC_FALSE); /* get -f_p */
347: if (quadts) TSComputeRHSJacobianP(quadts, th->stage_time, ts->vec_sol, quadJp);
348: if (ts->vecs_sensi2p) {
349: /* lambda_s^T F_PU w_1 */
350: TSComputeIHessianProductFunctionPU(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fpu);
351: /* lambda_s^T F_PP w_2 */
352: TSComputeIHessianProductFunctionPP(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_dir, ts->vecs_fpp);
353: }
355: for (nadj = 0; nadj < ts->numcost; nadj++) {
356: MatMultTranspose(ts->Jacp, VecsDeltaLam[nadj], VecsDeltaMu[nadj]);
357: VecAXPY(ts->vecs_sensip[nadj], -adjoint_time_step, VecsDeltaMu[nadj]);
358: if (quadJp) {
359: MatDenseGetColumn(quadJp, nadj, &xarr);
360: VecPlaceArray(ts->vec_drdp_col, xarr);
361: VecAXPY(ts->vecs_sensip[nadj], adjoint_time_step, ts->vec_drdp_col);
362: VecResetArray(ts->vec_drdp_col);
363: MatDenseRestoreColumn(quadJp, &xarr);
364: }
365: if (ts->vecs_sensi2p) {
366: MatMultTranspose(ts->Jacp, VecsDeltaLam2[nadj], VecsDeltaMu2[nadj]);
367: VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step, VecsDeltaMu2[nadj]);
368: if (ts->vecs_fpu) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step, ts->vecs_fpu[nadj]);
369: if (ts->vecs_fpp) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step, ts->vecs_fpp[nadj]);
370: }
371: }
372: }
374: if (ts->vecs_sensi2) {
375: VecResetArray(ts->vec_sensip_col);
376: MatDenseRestoreColumn(ts->mat_sensip, &xarr);
377: }
378: th->status = TS_STEP_COMPLETE;
379: return 0;
380: }
382: static PetscErrorCode TSAdjointStep_Theta(TS ts)
383: {
384: TS_Theta *th = (TS_Theta *)ts->data;
385: TS quadts = ts->quadraturets;
386: Vec *VecsDeltaLam = th->VecsDeltaLam, *VecsDeltaMu = th->VecsDeltaMu, *VecsSensiTemp = th->VecsSensiTemp;
387: Vec *VecsDeltaLam2 = th->VecsDeltaLam2, *VecsDeltaMu2 = th->VecsDeltaMu2, *VecsSensi2Temp = th->VecsSensi2Temp;
388: PetscInt nadj;
389: Mat J, Jpre, quadJ = NULL, quadJp = NULL;
390: KSP ksp;
391: PetscScalar *xarr;
392: PetscReal adjoint_time_step;
393: PetscReal adjoint_ptime; /* end time of the adjoint time step (ts->ptime is the start time, usually ts->ptime is larger than adjoint_ptime) */
395: if (th->Theta == 1.) {
396: TSAdjointStepBEuler_Private(ts);
397: return 0;
398: }
399: th->status = TS_STEP_INCOMPLETE;
400: SNESGetKSP(ts->snes, &ksp);
401: TSGetIJacobian(ts, &J, &Jpre, NULL, NULL);
402: if (quadts) {
403: TSGetRHSJacobian(quadts, &quadJ, NULL, NULL, NULL);
404: TSGetRHSJacobianP(quadts, &quadJp, NULL, NULL);
405: }
406: /* If endpoint=1, th->ptime and th->X0 will be used; if endpoint=0, th->stage_time and th->X will be used. */
407: th->stage_time = th->endpoint ? ts->ptime : (ts->ptime + (1. - th->Theta) * ts->time_step);
408: adjoint_ptime = ts->ptime + ts->time_step;
409: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
411: if (!th->endpoint) {
412: /* recover th->X0 using vec_sol and the stage value th->X */
413: VecAXPBYPCZ(th->X0, 1.0 / (1.0 - th->Theta), th->Theta / (th->Theta - 1.0), 0, th->X, ts->vec_sol);
414: }
416: /* Build RHS for first-order adjoint */
417: /* Cost function has an integral term */
418: if (quadts) {
419: if (th->endpoint) {
420: TSComputeRHSJacobian(quadts, th->stage_time, ts->vec_sol, quadJ, NULL);
421: } else {
422: TSComputeRHSJacobian(quadts, th->stage_time, th->X, quadJ, NULL);
423: }
424: }
426: for (nadj = 0; nadj < ts->numcost; nadj++) {
427: VecCopy(ts->vecs_sensi[nadj], VecsSensiTemp[nadj]);
428: VecScale(VecsSensiTemp[nadj], 1. / (th->Theta * adjoint_time_step));
429: if (quadJ) {
430: MatDenseGetColumn(quadJ, nadj, &xarr);
431: VecPlaceArray(ts->vec_drdu_col, xarr);
432: VecAXPY(VecsSensiTemp[nadj], 1., ts->vec_drdu_col);
433: VecResetArray(ts->vec_drdu_col);
434: MatDenseRestoreColumn(quadJ, &xarr);
435: }
436: }
438: /* Build LHS for first-order adjoint */
439: th->shift = 1. / (th->Theta * adjoint_time_step);
440: if (th->endpoint) {
441: TSComputeSNESJacobian(ts, ts->vec_sol, J, Jpre);
442: } else {
443: TSComputeSNESJacobian(ts, th->X, J, Jpre);
444: }
445: KSPSetOperators(ksp, J, Jpre);
447: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
448: for (nadj = 0; nadj < ts->numcost; nadj++) {
449: KSPConvergedReason kspreason;
450: KSPSolveTranspose(ksp, VecsSensiTemp[nadj], VecsDeltaLam[nadj]);
451: KSPGetConvergedReason(ksp, &kspreason);
452: if (kspreason < 0) {
453: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
454: PetscInfo(ts, "Step=%" PetscInt_FMT ", %" PetscInt_FMT "th cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n", ts->steps, nadj);
455: }
456: }
458: /* Second-order adjoint */
459: if (ts->vecs_sensi2) { /* U_{n+1} */
461: /* Get w1 at t_{n+1} from TLM matrix */
462: MatDenseGetColumn(ts->mat_sensip, 0, &xarr);
463: VecPlaceArray(ts->vec_sensip_col, xarr);
464: /* lambda_s^T F_UU w_1 */
465: TSComputeIHessianProductFunctionUU(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fuu);
466: VecResetArray(ts->vec_sensip_col);
467: MatDenseRestoreColumn(ts->mat_sensip, &xarr);
468: /* lambda_s^T F_UP w_2 */
469: TSComputeIHessianProductFunctionUP(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_dir, ts->vecs_fup);
470: for (nadj = 0; nadj < ts->numcost; nadj++) { /* compute the residual */
471: VecCopy(ts->vecs_sensi2[nadj], VecsSensi2Temp[nadj]);
472: VecScale(VecsSensi2Temp[nadj], th->shift);
473: VecAXPY(VecsSensi2Temp[nadj], -1., ts->vecs_fuu[nadj]);
474: if (ts->vecs_fup) VecAXPY(VecsSensi2Temp[nadj], -1., ts->vecs_fup[nadj]);
475: }
476: /* Solve stage equation LHS X = RHS for second-order adjoint */
477: for (nadj = 0; nadj < ts->numcost; nadj++) {
478: KSPConvergedReason kspreason;
479: KSPSolveTranspose(ksp, VecsSensi2Temp[nadj], VecsDeltaLam2[nadj]);
480: KSPGetConvergedReason(ksp, &kspreason);
481: if (kspreason < 0) {
482: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
483: PetscInfo(ts, "Step=%" PetscInt_FMT ", %" PetscInt_FMT "th cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n", ts->steps, nadj);
484: }
485: }
486: }
488: /* Update sensitivities, and evaluate integrals if there is any */
489: if (th->endpoint) { /* two-stage Theta methods with th->Theta!=1, th->Theta==1 leads to BEuler */
490: th->shift = 1. / ((th->Theta - 1.) * adjoint_time_step);
491: th->stage_time = adjoint_ptime;
492: TSComputeSNESJacobian(ts, th->X0, J, Jpre);
493: KSPSetOperators(ksp, J, Jpre);
494: /* R_U at t_n */
495: if (quadts) TSComputeRHSJacobian(quadts, adjoint_ptime, th->X0, quadJ, NULL);
496: for (nadj = 0; nadj < ts->numcost; nadj++) {
497: MatMultTranspose(J, VecsDeltaLam[nadj], ts->vecs_sensi[nadj]);
498: if (quadJ) {
499: MatDenseGetColumn(quadJ, nadj, &xarr);
500: VecPlaceArray(ts->vec_drdu_col, xarr);
501: VecAXPY(ts->vecs_sensi[nadj], -1., ts->vec_drdu_col);
502: VecResetArray(ts->vec_drdu_col);
503: MatDenseRestoreColumn(quadJ, &xarr);
504: }
505: VecScale(ts->vecs_sensi[nadj], 1. / th->shift);
506: }
508: /* Second-order adjoint */
509: if (ts->vecs_sensi2) { /* U_n */
510: /* Get w1 at t_n from TLM matrix */
511: MatDenseGetColumn(th->MatFwdSensip0, 0, &xarr);
512: VecPlaceArray(ts->vec_sensip_col, xarr);
513: /* lambda_s^T F_UU w_1 */
514: TSComputeIHessianProductFunctionUU(ts, adjoint_ptime, th->X0, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fuu);
515: VecResetArray(ts->vec_sensip_col);
516: MatDenseRestoreColumn(th->MatFwdSensip0, &xarr);
517: /* lambda_s^T F_UU w_2 */
518: TSComputeIHessianProductFunctionUP(ts, adjoint_ptime, th->X0, VecsDeltaLam, ts->vec_dir, ts->vecs_fup);
519: for (nadj = 0; nadj < ts->numcost; nadj++) {
520: /* M^T Lambda_s + h(1-theta) F_U^T Lambda_s + h(1-theta) lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2 */
521: MatMultTranspose(J, VecsDeltaLam2[nadj], ts->vecs_sensi2[nadj]);
522: VecAXPY(ts->vecs_sensi2[nadj], 1., ts->vecs_fuu[nadj]);
523: if (ts->vecs_fup) VecAXPY(ts->vecs_sensi2[nadj], 1., ts->vecs_fup[nadj]);
524: VecScale(ts->vecs_sensi2[nadj], 1. / th->shift);
525: }
526: }
528: th->stage_time = ts->ptime; /* recover the old value */
530: if (ts->vecs_sensip) { /* sensitivities wrt parameters */
531: /* U_{n+1} */
532: th->shift = 1.0 / (adjoint_time_step * th->Theta);
533: VecAXPBYPCZ(th->Xdot, -th->shift, th->shift, 0, th->X0, ts->vec_sol);
534: TSComputeIJacobianP(ts, th->stage_time, ts->vec_sol, th->Xdot, -1. / (th->Theta * adjoint_time_step), ts->Jacp, PETSC_FALSE);
535: if (quadts) TSComputeRHSJacobianP(quadts, th->stage_time, ts->vec_sol, quadJp);
536: for (nadj = 0; nadj < ts->numcost; nadj++) {
537: MatMultTranspose(ts->Jacp, VecsDeltaLam[nadj], VecsDeltaMu[nadj]);
538: VecAXPY(ts->vecs_sensip[nadj], -adjoint_time_step * th->Theta, VecsDeltaMu[nadj]);
539: if (quadJp) {
540: MatDenseGetColumn(quadJp, nadj, &xarr);
541: VecPlaceArray(ts->vec_drdp_col, xarr);
542: VecAXPY(ts->vecs_sensip[nadj], adjoint_time_step * th->Theta, ts->vec_drdp_col);
543: VecResetArray(ts->vec_drdp_col);
544: MatDenseRestoreColumn(quadJp, &xarr);
545: }
546: }
547: if (ts->vecs_sensi2p) { /* second-order */
548: /* Get w1 at t_{n+1} from TLM matrix */
549: MatDenseGetColumn(ts->mat_sensip, 0, &xarr);
550: VecPlaceArray(ts->vec_sensip_col, xarr);
551: /* lambda_s^T F_PU w_1 */
552: TSComputeIHessianProductFunctionPU(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fpu);
553: VecResetArray(ts->vec_sensip_col);
554: MatDenseRestoreColumn(ts->mat_sensip, &xarr);
556: /* lambda_s^T F_PP w_2 */
557: TSComputeIHessianProductFunctionPP(ts, th->stage_time, ts->vec_sol, VecsDeltaLam, ts->vec_dir, ts->vecs_fpp);
558: for (nadj = 0; nadj < ts->numcost; nadj++) {
559: /* Mu2 <- Mu2 + h theta F_P^T Lambda_s + h theta (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
560: MatMultTranspose(ts->Jacp, VecsDeltaLam2[nadj], VecsDeltaMu2[nadj]);
561: VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * th->Theta, VecsDeltaMu2[nadj]);
562: if (ts->vecs_fpu) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * th->Theta, ts->vecs_fpu[nadj]);
563: if (ts->vecs_fpp) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * th->Theta, ts->vecs_fpp[nadj]);
564: }
565: }
567: /* U_s */
568: VecZeroEntries(th->Xdot);
569: TSComputeIJacobianP(ts, adjoint_ptime, th->X0, th->Xdot, 1. / ((th->Theta - 1.0) * adjoint_time_step), ts->Jacp, PETSC_FALSE);
570: if (quadts) TSComputeRHSJacobianP(quadts, adjoint_ptime, th->X0, quadJp);
571: for (nadj = 0; nadj < ts->numcost; nadj++) {
572: MatMultTranspose(ts->Jacp, VecsDeltaLam[nadj], VecsDeltaMu[nadj]);
573: VecAXPY(ts->vecs_sensip[nadj], -adjoint_time_step * (1.0 - th->Theta), VecsDeltaMu[nadj]);
574: if (quadJp) {
575: MatDenseGetColumn(quadJp, nadj, &xarr);
576: VecPlaceArray(ts->vec_drdp_col, xarr);
577: VecAXPY(ts->vecs_sensip[nadj], adjoint_time_step * (1.0 - th->Theta), ts->vec_drdp_col);
578: VecResetArray(ts->vec_drdp_col);
579: MatDenseRestoreColumn(quadJp, &xarr);
580: }
581: if (ts->vecs_sensi2p) { /* second-order */
582: /* Get w1 at t_n from TLM matrix */
583: MatDenseGetColumn(th->MatFwdSensip0, 0, &xarr);
584: VecPlaceArray(ts->vec_sensip_col, xarr);
585: /* lambda_s^T F_PU w_1 */
586: TSComputeIHessianProductFunctionPU(ts, adjoint_ptime, th->X0, VecsDeltaLam, ts->vec_sensip_col, ts->vecs_fpu);
587: VecResetArray(ts->vec_sensip_col);
588: MatDenseRestoreColumn(th->MatFwdSensip0, &xarr);
589: /* lambda_s^T F_PP w_2 */
590: TSComputeIHessianProductFunctionPP(ts, adjoint_ptime, th->X0, VecsDeltaLam, ts->vec_dir, ts->vecs_fpp);
591: for (nadj = 0; nadj < ts->numcost; nadj++) {
592: /* Mu2 <- Mu2 + h(1-theta) F_P^T Lambda_s + h(1-theta) (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
593: MatMultTranspose(ts->Jacp, VecsDeltaLam2[nadj], VecsDeltaMu2[nadj]);
594: VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * (1.0 - th->Theta), VecsDeltaMu2[nadj]);
595: if (ts->vecs_fpu) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * (1.0 - th->Theta), ts->vecs_fpu[nadj]);
596: if (ts->vecs_fpp) VecAXPY(ts->vecs_sensi2p[nadj], -adjoint_time_step * (1.0 - th->Theta), ts->vecs_fpp[nadj]);
597: }
598: }
599: }
600: }
601: } else { /* one-stage case */
602: th->shift = 0.0;
603: TSComputeSNESJacobian(ts, th->X, J, Jpre); /* get -f_y */
604: KSPSetOperators(ksp, J, Jpre);
605: if (quadts) TSComputeRHSJacobian(quadts, th->stage_time, th->X, quadJ, NULL);
606: for (nadj = 0; nadj < ts->numcost; nadj++) {
607: MatMultTranspose(J, VecsDeltaLam[nadj], VecsSensiTemp[nadj]);
608: VecAXPY(ts->vecs_sensi[nadj], -adjoint_time_step, VecsSensiTemp[nadj]);
609: if (quadJ) {
610: MatDenseGetColumn(quadJ, nadj, &xarr);
611: VecPlaceArray(ts->vec_drdu_col, xarr);
612: VecAXPY(ts->vecs_sensi[nadj], adjoint_time_step, ts->vec_drdu_col);
613: VecResetArray(ts->vec_drdu_col);
614: MatDenseRestoreColumn(quadJ, &xarr);
615: }
616: }
617: if (ts->vecs_sensip) {
618: th->shift = 1.0 / (adjoint_time_step * th->Theta);
619: VecAXPBYPCZ(th->Xdot, -th->shift, th->shift, 0, th->X0, th->X);
620: TSComputeIJacobianP(ts, th->stage_time, th->X, th->Xdot, th->shift, ts->Jacp, PETSC_FALSE);
621: if (quadts) TSComputeRHSJacobianP(quadts, th->stage_time, th->X, quadJp);
622: for (nadj = 0; nadj < ts->numcost; nadj++) {
623: MatMultTranspose(ts->Jacp, VecsDeltaLam[nadj], VecsDeltaMu[nadj]);
624: VecAXPY(ts->vecs_sensip[nadj], -adjoint_time_step, VecsDeltaMu[nadj]);
625: if (quadJp) {
626: MatDenseGetColumn(quadJp, nadj, &xarr);
627: VecPlaceArray(ts->vec_drdp_col, xarr);
628: VecAXPY(ts->vecs_sensip[nadj], adjoint_time_step, ts->vec_drdp_col);
629: VecResetArray(ts->vec_drdp_col);
630: MatDenseRestoreColumn(quadJp, &xarr);
631: }
632: }
633: }
634: }
636: th->status = TS_STEP_COMPLETE;
637: return 0;
638: }
640: static PetscErrorCode TSInterpolate_Theta(TS ts, PetscReal t, Vec X)
641: {
642: TS_Theta *th = (TS_Theta *)ts->data;
643: PetscReal dt = t - ts->ptime;
645: VecCopy(ts->vec_sol, th->X);
646: if (th->endpoint) dt *= th->Theta;
647: VecWAXPY(X, dt, th->Xdot, th->X);
648: return 0;
649: }
651: static PetscErrorCode TSEvaluateWLTE_Theta(TS ts, NormType wnormtype, PetscInt *order, PetscReal *wlte)
652: {
653: TS_Theta *th = (TS_Theta *)ts->data;
654: Vec X = ts->vec_sol; /* X = solution */
655: Vec Y = th->vec_lte_work; /* Y = X + LTE */
656: PetscReal wltea, wlter;
658: if (!th->vec_sol_prev) {
659: *wlte = -1;
660: return 0;
661: }
662: /* Cannot compute LTE in first step or in restart after event */
663: if (ts->steprestart) {
664: *wlte = -1;
665: return 0;
666: }
667: /* Compute LTE using backward differences with non-constant time step */
668: {
669: PetscReal h = ts->time_step, h_prev = ts->ptime - ts->ptime_prev;
670: PetscReal a = 1 + h_prev / h;
671: PetscScalar scal[3];
672: Vec vecs[3];
673: scal[0] = +1 / a;
674: scal[1] = -1 / (a - 1);
675: scal[2] = +1 / (a * (a - 1));
676: vecs[0] = X;
677: vecs[1] = th->X0;
678: vecs[2] = th->vec_sol_prev;
679: VecCopy(X, Y);
680: VecMAXPY(Y, 3, scal, vecs);
681: TSErrorWeightedNorm(ts, X, Y, wnormtype, wlte, &wltea, &wlter);
682: }
683: if (order) *order = 2;
684: return 0;
685: }
687: static PetscErrorCode TSRollBack_Theta(TS ts)
688: {
689: TS_Theta *th = (TS_Theta *)ts->data;
690: TS quadts = ts->quadraturets;
692: VecCopy(th->X0, ts->vec_sol);
693: if (quadts && ts->costintegralfwd) VecCopy(th->VecCostIntegral0, quadts->vec_sol);
694: th->status = TS_STEP_INCOMPLETE;
695: if (ts->mat_sensip) MatCopy(th->MatFwdSensip0, ts->mat_sensip, SAME_NONZERO_PATTERN);
696: if (quadts && quadts->mat_sensip) MatCopy(th->MatIntegralSensip0, quadts->mat_sensip, SAME_NONZERO_PATTERN);
697: return 0;
698: }
700: static PetscErrorCode TSForwardStep_Theta(TS ts)
701: {
702: TS_Theta *th = (TS_Theta *)ts->data;
703: TS quadts = ts->quadraturets;
704: Mat MatDeltaFwdSensip = th->MatDeltaFwdSensip;
705: Vec VecDeltaFwdSensipCol = th->VecDeltaFwdSensipCol;
706: PetscInt ntlm;
707: KSP ksp;
708: Mat J, Jpre, quadJ = NULL, quadJp = NULL;
709: PetscScalar *barr, *xarr;
710: PetscReal previous_shift;
712: previous_shift = th->shift;
713: MatCopy(ts->mat_sensip, th->MatFwdSensip0, SAME_NONZERO_PATTERN);
715: if (quadts && quadts->mat_sensip) MatCopy(quadts->mat_sensip, th->MatIntegralSensip0, SAME_NONZERO_PATTERN);
716: SNESGetKSP(ts->snes, &ksp);
717: TSGetIJacobian(ts, &J, &Jpre, NULL, NULL);
718: if (quadts) {
719: TSGetRHSJacobian(quadts, &quadJ, NULL, NULL, NULL);
720: TSGetRHSJacobianP(quadts, &quadJp, NULL, NULL);
721: }
723: /* Build RHS */
724: if (th->endpoint) { /* 2-stage method*/
725: th->shift = 1. / ((th->Theta - 1.) * th->time_step0);
726: TSComputeIJacobian(ts, th->ptime0, th->X0, th->Xdot, th->shift, J, Jpre, PETSC_FALSE);
727: MatMatMult(J, ts->mat_sensip, MAT_REUSE_MATRIX, PETSC_DEFAULT, &MatDeltaFwdSensip);
728: MatScale(MatDeltaFwdSensip, (th->Theta - 1.) / th->Theta);
730: /* Add the f_p forcing terms */
731: if (ts->Jacp) {
732: VecZeroEntries(th->Xdot);
733: TSComputeIJacobianP(ts, th->ptime0, th->X0, th->Xdot, th->shift, ts->Jacp, PETSC_FALSE);
734: MatAXPY(MatDeltaFwdSensip, (th->Theta - 1.) / th->Theta, ts->Jacp, SUBSET_NONZERO_PATTERN);
735: th->shift = previous_shift;
736: VecAXPBYPCZ(th->Xdot, -th->shift, th->shift, 0, th->X0, ts->vec_sol);
737: TSComputeIJacobianP(ts, th->stage_time, ts->vec_sol, th->Xdot, th->shift, ts->Jacp, PETSC_FALSE);
738: MatAXPY(MatDeltaFwdSensip, -1., ts->Jacp, SUBSET_NONZERO_PATTERN);
739: }
740: } else { /* 1-stage method */
741: th->shift = 0.0;
742: TSComputeIJacobian(ts, th->stage_time, th->X, th->Xdot, th->shift, J, Jpre, PETSC_FALSE);
743: MatMatMult(J, ts->mat_sensip, MAT_REUSE_MATRIX, PETSC_DEFAULT, &MatDeltaFwdSensip);
744: MatScale(MatDeltaFwdSensip, -1.);
746: /* Add the f_p forcing terms */
747: if (ts->Jacp) {
748: th->shift = previous_shift;
749: VecAXPBYPCZ(th->Xdot, -th->shift, th->shift, 0, th->X0, th->X);
750: TSComputeIJacobianP(ts, th->stage_time, th->X, th->Xdot, th->shift, ts->Jacp, PETSC_FALSE);
751: MatAXPY(MatDeltaFwdSensip, -1., ts->Jacp, SUBSET_NONZERO_PATTERN);
752: }
753: }
755: /* Build LHS */
756: th->shift = previous_shift; /* recover the previous shift used in TSStep_Theta() */
757: if (th->endpoint) {
758: TSComputeIJacobian(ts, th->stage_time, ts->vec_sol, th->Xdot, th->shift, J, Jpre, PETSC_FALSE);
759: } else {
760: TSComputeIJacobian(ts, th->stage_time, th->X, th->Xdot, th->shift, J, Jpre, PETSC_FALSE);
761: }
762: KSPSetOperators(ksp, J, Jpre);
764: /*
765: Evaluate the first stage of integral gradients with the 2-stage method:
766: drdu|t_n*S(t_n) + drdp|t_n
767: This is done before the linear solve because the sensitivity variable S(t_n) will be propagated to S(t_{n+1})
768: */
769: if (th->endpoint) { /* 2-stage method only */
770: if (quadts && quadts->mat_sensip) {
771: TSComputeRHSJacobian(quadts, th->ptime0, th->X0, quadJ, NULL);
772: TSComputeRHSJacobianP(quadts, th->ptime0, th->X0, quadJp);
773: MatTransposeMatMult(ts->mat_sensip, quadJ, MAT_REUSE_MATRIX, PETSC_DEFAULT, &th->MatIntegralSensipTemp);
774: MatAXPY(th->MatIntegralSensipTemp, 1, quadJp, SAME_NONZERO_PATTERN);
775: MatAXPY(quadts->mat_sensip, th->time_step0 * (1. - th->Theta), th->MatIntegralSensipTemp, SAME_NONZERO_PATTERN);
776: }
777: }
779: /* Solve the tangent linear equation for forward sensitivities to parameters */
780: for (ntlm = 0; ntlm < th->num_tlm; ntlm++) {
781: KSPConvergedReason kspreason;
782: MatDenseGetColumn(MatDeltaFwdSensip, ntlm, &barr);
783: VecPlaceArray(VecDeltaFwdSensipCol, barr);
784: if (th->endpoint) {
785: MatDenseGetColumn(ts->mat_sensip, ntlm, &xarr);
786: VecPlaceArray(ts->vec_sensip_col, xarr);
787: KSPSolve(ksp, VecDeltaFwdSensipCol, ts->vec_sensip_col);
788: VecResetArray(ts->vec_sensip_col);
789: MatDenseRestoreColumn(ts->mat_sensip, &xarr);
790: } else {
791: KSPSolve(ksp, VecDeltaFwdSensipCol, VecDeltaFwdSensipCol);
792: }
793: KSPGetConvergedReason(ksp, &kspreason);
794: if (kspreason < 0) {
795: ts->reason = TSFORWARD_DIVERGED_LINEAR_SOLVE;
796: PetscInfo(ts, "Step=%" PetscInt_FMT ", %" PetscInt_FMT "th tangent linear solve, linear solve fails, stopping tangent linear solve\n", ts->steps, ntlm);
797: }
798: VecResetArray(VecDeltaFwdSensipCol);
799: MatDenseRestoreColumn(MatDeltaFwdSensip, &barr);
800: }
802: /*
803: Evaluate the second stage of integral gradients with the 2-stage method:
804: drdu|t_{n+1}*S(t_{n+1}) + drdp|t_{n+1}
805: */
806: if (quadts && quadts->mat_sensip) {
807: if (!th->endpoint) {
808: MatAXPY(ts->mat_sensip, 1, MatDeltaFwdSensip, SAME_NONZERO_PATTERN); /* stage sensitivity */
809: TSComputeRHSJacobian(quadts, th->stage_time, th->X, quadJ, NULL);
810: TSComputeRHSJacobianP(quadts, th->stage_time, th->X, quadJp);
811: MatTransposeMatMult(ts->mat_sensip, quadJ, MAT_REUSE_MATRIX, PETSC_DEFAULT, &th->MatIntegralSensipTemp);
812: MatAXPY(th->MatIntegralSensipTemp, 1, quadJp, SAME_NONZERO_PATTERN);
813: MatAXPY(quadts->mat_sensip, th->time_step0, th->MatIntegralSensipTemp, SAME_NONZERO_PATTERN);
814: MatAXPY(ts->mat_sensip, (1. - th->Theta) / th->Theta, MatDeltaFwdSensip, SAME_NONZERO_PATTERN);
815: } else {
816: TSComputeRHSJacobian(quadts, th->stage_time, ts->vec_sol, quadJ, NULL);
817: TSComputeRHSJacobianP(quadts, th->stage_time, ts->vec_sol, quadJp);
818: MatTransposeMatMult(ts->mat_sensip, quadJ, MAT_REUSE_MATRIX, PETSC_DEFAULT, &th->MatIntegralSensipTemp);
819: MatAXPY(th->MatIntegralSensipTemp, 1, quadJp, SAME_NONZERO_PATTERN);
820: MatAXPY(quadts->mat_sensip, th->time_step0 * th->Theta, th->MatIntegralSensipTemp, SAME_NONZERO_PATTERN);
821: }
822: } else {
823: if (!th->endpoint) MatAXPY(ts->mat_sensip, 1. / th->Theta, MatDeltaFwdSensip, SAME_NONZERO_PATTERN);
824: }
825: return 0;
826: }
828: static PetscErrorCode TSForwardGetStages_Theta(TS ts, PetscInt *ns, Mat *stagesensip[])
829: {
830: TS_Theta *th = (TS_Theta *)ts->data;
832: if (ns) {
833: if (!th->endpoint && th->Theta != 1.0) *ns = 1; /* midpoint form */
834: else *ns = 2; /* endpoint form */
835: }
836: if (stagesensip) {
837: if (!th->endpoint && th->Theta != 1.0) {
838: th->MatFwdStages[0] = th->MatDeltaFwdSensip;
839: } else {
840: th->MatFwdStages[0] = th->MatFwdSensip0;
841: th->MatFwdStages[1] = ts->mat_sensip; /* stiffly accurate */
842: }
843: *stagesensip = th->MatFwdStages;
844: }
845: return 0;
846: }
848: /*------------------------------------------------------------*/
849: static PetscErrorCode TSReset_Theta(TS ts)
850: {
851: TS_Theta *th = (TS_Theta *)ts->data;
853: VecDestroy(&th->X);
854: VecDestroy(&th->Xdot);
855: VecDestroy(&th->X0);
856: VecDestroy(&th->affine);
858: VecDestroy(&th->vec_sol_prev);
859: VecDestroy(&th->vec_lte_work);
861: VecDestroy(&th->VecCostIntegral0);
862: return 0;
863: }
865: static PetscErrorCode TSAdjointReset_Theta(TS ts)
866: {
867: TS_Theta *th = (TS_Theta *)ts->data;
869: VecDestroyVecs(ts->numcost, &th->VecsDeltaLam);
870: VecDestroyVecs(ts->numcost, &th->VecsDeltaMu);
871: VecDestroyVecs(ts->numcost, &th->VecsDeltaLam2);
872: VecDestroyVecs(ts->numcost, &th->VecsDeltaMu2);
873: VecDestroyVecs(ts->numcost, &th->VecsSensiTemp);
874: VecDestroyVecs(ts->numcost, &th->VecsSensi2Temp);
875: return 0;
876: }
878: static PetscErrorCode TSDestroy_Theta(TS ts)
879: {
880: TSReset_Theta(ts);
881: if (ts->dm) {
882: DMCoarsenHookRemove(ts->dm, DMCoarsenHook_TSTheta, DMRestrictHook_TSTheta, ts);
883: DMSubDomainHookRemove(ts->dm, DMSubDomainHook_TSTheta, DMSubDomainRestrictHook_TSTheta, ts);
884: }
885: PetscFree(ts->data);
886: PetscObjectComposeFunction((PetscObject)ts, "TSThetaGetTheta_C", NULL);
887: PetscObjectComposeFunction((PetscObject)ts, "TSThetaSetTheta_C", NULL);
888: PetscObjectComposeFunction((PetscObject)ts, "TSThetaGetEndpoint_C", NULL);
889: PetscObjectComposeFunction((PetscObject)ts, "TSThetaSetEndpoint_C", NULL);
890: return 0;
891: }
893: /*
894: This defines the nonlinear equation that is to be solved with SNES
895: G(U) = F[t0+Theta*dt, U, (U-U0)*shift] = 0
897: Note that U here is the stage argument. This means that U = U_{n+1} only if endpoint = true,
898: otherwise U = theta U_{n+1} + (1 - theta) U0, which for the case of implicit midpoint is
899: U = (U_{n+1} + U0)/2
900: */
901: static PetscErrorCode SNESTSFormFunction_Theta(SNES snes, Vec x, Vec y, TS ts)
902: {
903: TS_Theta *th = (TS_Theta *)ts->data;
904: Vec X0, Xdot;
905: DM dm, dmsave;
906: PetscReal shift = th->shift;
908: SNESGetDM(snes, &dm);
909: /* When using the endpoint variant, this is actually 1/Theta * Xdot */
910: TSThetaGetX0AndXdot(ts, dm, &X0, &Xdot);
911: if (x != X0) {
912: VecAXPBYPCZ(Xdot, -shift, shift, 0, X0, x);
913: } else {
914: VecZeroEntries(Xdot);
915: }
916: /* DM monkey-business allows user code to call TSGetDM() inside of functions evaluated on levels of FAS */
917: dmsave = ts->dm;
918: ts->dm = dm;
919: TSComputeIFunction(ts, th->stage_time, x, Xdot, y, PETSC_FALSE);
920: ts->dm = dmsave;
921: TSThetaRestoreX0AndXdot(ts, dm, &X0, &Xdot);
922: return 0;
923: }
925: static PetscErrorCode SNESTSFormJacobian_Theta(SNES snes, Vec x, Mat A, Mat B, TS ts)
926: {
927: TS_Theta *th = (TS_Theta *)ts->data;
928: Vec Xdot;
929: DM dm, dmsave;
930: PetscReal shift = th->shift;
932: SNESGetDM(snes, &dm);
933: /* Xdot has already been computed in SNESTSFormFunction_Theta (SNES guarantees this) */
934: TSThetaGetX0AndXdot(ts, dm, NULL, &Xdot);
936: dmsave = ts->dm;
937: ts->dm = dm;
938: TSComputeIJacobian(ts, th->stage_time, x, Xdot, shift, A, B, PETSC_FALSE);
939: ts->dm = dmsave;
940: TSThetaRestoreX0AndXdot(ts, dm, NULL, &Xdot);
941: return 0;
942: }
944: static PetscErrorCode TSForwardSetUp_Theta(TS ts)
945: {
946: TS_Theta *th = (TS_Theta *)ts->data;
947: TS quadts = ts->quadraturets;
949: /* combine sensitivities to parameters and sensitivities to initial values into one array */
950: th->num_tlm = ts->num_parameters;
951: MatDuplicate(ts->mat_sensip, MAT_DO_NOT_COPY_VALUES, &th->MatDeltaFwdSensip);
952: if (quadts && quadts->mat_sensip) {
953: MatDuplicate(quadts->mat_sensip, MAT_DO_NOT_COPY_VALUES, &th->MatIntegralSensipTemp);
954: MatDuplicate(quadts->mat_sensip, MAT_DO_NOT_COPY_VALUES, &th->MatIntegralSensip0);
955: }
956: /* backup sensitivity results for roll-backs */
957: MatDuplicate(ts->mat_sensip, MAT_DO_NOT_COPY_VALUES, &th->MatFwdSensip0);
959: VecDuplicate(ts->vec_sol, &th->VecDeltaFwdSensipCol);
960: return 0;
961: }
963: static PetscErrorCode TSForwardReset_Theta(TS ts)
964: {
965: TS_Theta *th = (TS_Theta *)ts->data;
966: TS quadts = ts->quadraturets;
968: if (quadts && quadts->mat_sensip) {
969: MatDestroy(&th->MatIntegralSensipTemp);
970: MatDestroy(&th->MatIntegralSensip0);
971: }
972: VecDestroy(&th->VecDeltaFwdSensipCol);
973: MatDestroy(&th->MatDeltaFwdSensip);
974: MatDestroy(&th->MatFwdSensip0);
975: return 0;
976: }
978: static PetscErrorCode TSSetUp_Theta(TS ts)
979: {
980: TS_Theta *th = (TS_Theta *)ts->data;
981: TS quadts = ts->quadraturets;
982: PetscBool match;
984: if (!th->VecCostIntegral0 && quadts && ts->costintegralfwd) { /* back up cost integral */
985: VecDuplicate(quadts->vec_sol, &th->VecCostIntegral0);
986: }
987: if (!th->X) VecDuplicate(ts->vec_sol, &th->X);
988: if (!th->Xdot) VecDuplicate(ts->vec_sol, &th->Xdot);
989: if (!th->X0) VecDuplicate(ts->vec_sol, &th->X0);
990: if (th->endpoint) VecDuplicate(ts->vec_sol, &th->affine);
992: th->order = (th->Theta == 0.5) ? 2 : 1;
993: th->shift = 1 / (th->Theta * ts->time_step);
995: TSGetDM(ts, &ts->dm);
996: DMCoarsenHookAdd(ts->dm, DMCoarsenHook_TSTheta, DMRestrictHook_TSTheta, ts);
997: DMSubDomainHookAdd(ts->dm, DMSubDomainHook_TSTheta, DMSubDomainRestrictHook_TSTheta, ts);
999: TSGetAdapt(ts, &ts->adapt);
1000: TSAdaptCandidatesClear(ts->adapt);
1001: PetscObjectTypeCompare((PetscObject)ts->adapt, TSADAPTNONE, &match);
1002: if (!match) {
1003: VecDuplicate(ts->vec_sol, &th->vec_sol_prev);
1004: VecDuplicate(ts->vec_sol, &th->vec_lte_work);
1005: }
1006: TSGetSNES(ts, &ts->snes);
1008: ts->stifflyaccurate = (!th->endpoint && th->Theta != 1.0) ? PETSC_FALSE : PETSC_TRUE;
1009: return 0;
1010: }
1012: /*------------------------------------------------------------*/
1014: static PetscErrorCode TSAdjointSetUp_Theta(TS ts)
1015: {
1016: TS_Theta *th = (TS_Theta *)ts->data;
1018: VecDuplicateVecs(ts->vecs_sensi[0], ts->numcost, &th->VecsDeltaLam);
1019: VecDuplicateVecs(ts->vecs_sensi[0], ts->numcost, &th->VecsSensiTemp);
1020: if (ts->vecs_sensip) VecDuplicateVecs(ts->vecs_sensip[0], ts->numcost, &th->VecsDeltaMu);
1021: if (ts->vecs_sensi2) {
1022: VecDuplicateVecs(ts->vecs_sensi[0], ts->numcost, &th->VecsDeltaLam2);
1023: VecDuplicateVecs(ts->vecs_sensi2[0], ts->numcost, &th->VecsSensi2Temp);
1024: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1025: if (!ts->ihessianproduct_fuu) ts->vecs_fuu = ts->vecs_guu;
1026: if (!ts->ihessianproduct_fup) ts->vecs_fup = ts->vecs_gup;
1027: }
1028: if (ts->vecs_sensi2p) {
1029: VecDuplicateVecs(ts->vecs_sensi2p[0], ts->numcost, &th->VecsDeltaMu2);
1030: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1031: if (!ts->ihessianproduct_fpu) ts->vecs_fpu = ts->vecs_gpu;
1032: if (!ts->ihessianproduct_fpp) ts->vecs_fpp = ts->vecs_gpp;
1033: }
1034: return 0;
1035: }
1037: static PetscErrorCode TSSetFromOptions_Theta(TS ts, PetscOptionItems *PetscOptionsObject)
1038: {
1039: TS_Theta *th = (TS_Theta *)ts->data;
1041: PetscOptionsHeadBegin(PetscOptionsObject, "Theta ODE solver options");
1042: {
1043: PetscOptionsReal("-ts_theta_theta", "Location of stage (0<Theta<=1)", "TSThetaSetTheta", th->Theta, &th->Theta, NULL);
1044: PetscOptionsBool("-ts_theta_endpoint", "Use the endpoint instead of midpoint form of the Theta method", "TSThetaSetEndpoint", th->endpoint, &th->endpoint, NULL);
1045: PetscOptionsBool("-ts_theta_initial_guess_extrapolate", "Extrapolate stage initial guess from previous solution (sometimes unstable)", "TSThetaSetExtrapolate", th->extrapolate, &th->extrapolate, NULL);
1046: }
1047: PetscOptionsHeadEnd();
1048: return 0;
1049: }
1051: static PetscErrorCode TSView_Theta(TS ts, PetscViewer viewer)
1052: {
1053: TS_Theta *th = (TS_Theta *)ts->data;
1054: PetscBool iascii;
1056: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii);
1057: if (iascii) {
1058: PetscViewerASCIIPrintf(viewer, " Theta=%g\n", (double)th->Theta);
1059: PetscViewerASCIIPrintf(viewer, " Extrapolation=%s\n", th->extrapolate ? "yes" : "no");
1060: }
1061: return 0;
1062: }
1064: static PetscErrorCode TSThetaGetTheta_Theta(TS ts, PetscReal *theta)
1065: {
1066: TS_Theta *th = (TS_Theta *)ts->data;
1068: *theta = th->Theta;
1069: return 0;
1070: }
1072: static PetscErrorCode TSThetaSetTheta_Theta(TS ts, PetscReal theta)
1073: {
1074: TS_Theta *th = (TS_Theta *)ts->data;
1077: th->Theta = theta;
1078: th->order = (th->Theta == 0.5) ? 2 : 1;
1079: return 0;
1080: }
1082: static PetscErrorCode TSThetaGetEndpoint_Theta(TS ts, PetscBool *endpoint)
1083: {
1084: TS_Theta *th = (TS_Theta *)ts->data;
1086: *endpoint = th->endpoint;
1087: return 0;
1088: }
1090: static PetscErrorCode TSThetaSetEndpoint_Theta(TS ts, PetscBool flg)
1091: {
1092: TS_Theta *th = (TS_Theta *)ts->data;
1094: th->endpoint = flg;
1095: return 0;
1096: }
1098: #if defined(PETSC_HAVE_COMPLEX)
1099: static PetscErrorCode TSComputeLinearStability_Theta(TS ts, PetscReal xr, PetscReal xi, PetscReal *yr, PetscReal *yi)
1100: {
1101: PetscComplex z = xr + xi * PETSC_i, f;
1102: TS_Theta *th = (TS_Theta *)ts->data;
1103: const PetscReal one = 1.0;
1105: f = (one + (one - th->Theta) * z) / (one - th->Theta * z);
1106: *yr = PetscRealPartComplex(f);
1107: *yi = PetscImaginaryPartComplex(f);
1108: return 0;
1109: }
1110: #endif
1112: static PetscErrorCode TSGetStages_Theta(TS ts, PetscInt *ns, Vec *Y[])
1113: {
1114: TS_Theta *th = (TS_Theta *)ts->data;
1116: if (ns) {
1117: if (!th->endpoint && th->Theta != 1.0) *ns = 1; /* midpoint form */
1118: else *ns = 2; /* endpoint form */
1119: }
1120: if (Y) {
1121: if (!th->endpoint && th->Theta != 1.0) {
1122: th->Stages[0] = th->X;
1123: } else {
1124: th->Stages[0] = th->X0;
1125: th->Stages[1] = ts->vec_sol; /* stiffly accurate */
1126: }
1127: *Y = th->Stages;
1128: }
1129: return 0;
1130: }
1132: /* ------------------------------------------------------------ */
1133: /*MC
1134: TSTHETA - DAE solver using the implicit Theta method
1136: Level: beginner
1138: Options Database Keys:
1139: + -ts_theta_theta <Theta> - Location of stage (0<Theta<=1)
1140: . -ts_theta_endpoint <flag> - Use the endpoint (like Crank-Nicholson) instead of midpoint form of the Theta method
1141: - -ts_theta_initial_guess_extrapolate <flg> - Extrapolate stage initial guess from previous solution (sometimes unstable)
1143: Notes:
1144: $ -ts_type theta -ts_theta_theta 1.0 corresponds to backward Euler (`TSBEULER`)
1145: $ -ts_type theta -ts_theta_theta 0.5 corresponds to the implicit midpoint rule
1146: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint corresponds to Crank-Nicholson (`TSCN`)
1148: The endpoint variant of the Theta method and backward Euler can be applied to DAE. The midpoint variant is not suitable for DAEs because it is not stiffly accurate.
1150: The midpoint variant is cast as a 1-stage implicit Runge-Kutta method.
1152: .vb
1153: Theta | Theta
1154: -------------
1155: | 1
1156: .ve
1158: For the default Theta=0.5, this is also known as the implicit midpoint rule.
1160: When the endpoint variant is chosen, the method becomes a 2-stage method with first stage explicit:
1162: .vb
1163: 0 | 0 0
1164: 1 | 1-Theta Theta
1165: -------------------
1166: | 1-Theta Theta
1167: .ve
1169: For the default Theta=0.5, this is the trapezoid rule (also known as Crank-Nicolson, see TSCN).
1171: To apply a diagonally implicit RK method to DAE, the stage formula
1173: $ Y_i = X + h sum_j a_ij Y'_j
1175: is interpreted as a formula for Y'_i in terms of Y_i and known values (Y'_j, j<i)
1177: .seealso: [](chapter_ts), `TSCreate()`, `TS`, `TSSetType()`, `TSCN`, `TSBEULER`, `TSThetaSetTheta()`, `TSThetaSetEndpoint()`
1178: M*/
1179: PETSC_EXTERN PetscErrorCode TSCreate_Theta(TS ts)
1180: {
1181: TS_Theta *th;
1183: ts->ops->reset = TSReset_Theta;
1184: ts->ops->adjointreset = TSAdjointReset_Theta;
1185: ts->ops->destroy = TSDestroy_Theta;
1186: ts->ops->view = TSView_Theta;
1187: ts->ops->setup = TSSetUp_Theta;
1188: ts->ops->adjointsetup = TSAdjointSetUp_Theta;
1189: ts->ops->adjointreset = TSAdjointReset_Theta;
1190: ts->ops->step = TSStep_Theta;
1191: ts->ops->interpolate = TSInterpolate_Theta;
1192: ts->ops->evaluatewlte = TSEvaluateWLTE_Theta;
1193: ts->ops->rollback = TSRollBack_Theta;
1194: ts->ops->setfromoptions = TSSetFromOptions_Theta;
1195: ts->ops->snesfunction = SNESTSFormFunction_Theta;
1196: ts->ops->snesjacobian = SNESTSFormJacobian_Theta;
1197: #if defined(PETSC_HAVE_COMPLEX)
1198: ts->ops->linearstability = TSComputeLinearStability_Theta;
1199: #endif
1200: ts->ops->getstages = TSGetStages_Theta;
1201: ts->ops->adjointstep = TSAdjointStep_Theta;
1202: ts->ops->adjointintegral = TSAdjointCostIntegral_Theta;
1203: ts->ops->forwardintegral = TSForwardCostIntegral_Theta;
1204: ts->default_adapt_type = TSADAPTNONE;
1206: ts->ops->forwardsetup = TSForwardSetUp_Theta;
1207: ts->ops->forwardreset = TSForwardReset_Theta;
1208: ts->ops->forwardstep = TSForwardStep_Theta;
1209: ts->ops->forwardgetstages = TSForwardGetStages_Theta;
1211: ts->usessnes = PETSC_TRUE;
1213: PetscNew(&th);
1214: ts->data = (void *)th;
1216: th->VecsDeltaLam = NULL;
1217: th->VecsDeltaMu = NULL;
1218: th->VecsSensiTemp = NULL;
1219: th->VecsSensi2Temp = NULL;
1221: th->extrapolate = PETSC_FALSE;
1222: th->Theta = 0.5;
1223: th->order = 2;
1224: PetscObjectComposeFunction((PetscObject)ts, "TSThetaGetTheta_C", TSThetaGetTheta_Theta);
1225: PetscObjectComposeFunction((PetscObject)ts, "TSThetaSetTheta_C", TSThetaSetTheta_Theta);
1226: PetscObjectComposeFunction((PetscObject)ts, "TSThetaGetEndpoint_C", TSThetaGetEndpoint_Theta);
1227: PetscObjectComposeFunction((PetscObject)ts, "TSThetaSetEndpoint_C", TSThetaSetEndpoint_Theta);
1228: return 0;
1229: }
1231: /*@
1232: TSThetaGetTheta - Get the abscissa of the stage in (0,1] for `TSTHETA`
1234: Not Collective
1236: Input Parameter:
1237: . ts - timestepping context
1239: Output Parameter:
1240: . theta - stage abscissa
1242: Level: advanced
1244: Note:
1245: Use of this function is normally only required to hack `TSTHETA` to use a modified integration scheme.
1247: .seealso: [](chapter_ts), `TSThetaSetTheta()`, `TSTHETA`
1248: @*/
1249: PetscErrorCode TSThetaGetTheta(TS ts, PetscReal *theta)
1250: {
1253: PetscUseMethod(ts, "TSThetaGetTheta_C", (TS, PetscReal *), (ts, theta));
1254: return 0;
1255: }
1257: /*@
1258: TSThetaSetTheta - Set the abscissa of the stage in (0,1] for `TSTHETA`
1260: Not Collective
1262: Input Parameters:
1263: + ts - timestepping context
1264: - theta - stage abscissa
1266: Options Database Key:
1267: . -ts_theta_theta <theta> - set theta
1269: Level: intermediate
1271: .seealso: [](chapter_ts), `TSThetaGetTheta()`, `TSTHETA`, `TSCN`
1272: @*/
1273: PetscErrorCode TSThetaSetTheta(TS ts, PetscReal theta)
1274: {
1276: PetscTryMethod(ts, "TSThetaSetTheta_C", (TS, PetscReal), (ts, theta));
1277: return 0;
1278: }
1280: /*@
1281: TSThetaGetEndpoint - Gets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule) for `TSTHETA`
1283: Not Collective
1285: Input Parameter:
1286: . ts - timestepping context
1288: Output Parameter:
1289: . endpoint - `PETSC_TRUE` when using the endpoint variant
1291: Level: advanced
1293: .seealso: [](chapter_ts), `TSThetaSetEndpoint()`, `TSTHETA`, `TSCN`
1294: @*/
1295: PetscErrorCode TSThetaGetEndpoint(TS ts, PetscBool *endpoint)
1296: {
1299: PetscUseMethod(ts, "TSThetaGetEndpoint_C", (TS, PetscBool *), (ts, endpoint));
1300: return 0;
1301: }
1303: /*@
1304: TSThetaSetEndpoint - Sets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule) for `TSTHETA`
1306: Not Collective
1308: Input Parameters:
1309: + ts - timestepping context
1310: - flg - `PETSC_TRUE` to use the endpoint variant
1312: Options Database Key:
1313: . -ts_theta_endpoint <flg> - use the endpoint variant
1315: Level: intermediate
1317: .seealso: [](chapter_ts), `TSTHETA`, `TSCN`
1318: @*/
1319: PetscErrorCode TSThetaSetEndpoint(TS ts, PetscBool flg)
1320: {
1322: PetscTryMethod(ts, "TSThetaSetEndpoint_C", (TS, PetscBool), (ts, flg));
1323: return 0;
1324: }
1326: /*
1327: * TSBEULER and TSCN are straightforward specializations of TSTHETA.
1328: * The creation functions for these specializations are below.
1329: */
1331: static PetscErrorCode TSSetUp_BEuler(TS ts)
1332: {
1333: TS_Theta *th = (TS_Theta *)ts->data;
1337: TSSetUp_Theta(ts);
1338: return 0;
1339: }
1341: static PetscErrorCode TSView_BEuler(TS ts, PetscViewer viewer)
1342: {
1343: return 0;
1344: }
1346: /*MC
1347: TSBEULER - ODE solver using the implicit backward Euler method
1349: Level: beginner
1351: Note:
1352: `TSBEULER` is equivalent to `TSTHETA` with Theta=1.0
1353: $ -ts_type theta -ts_theta_theta 1.0
1355: .seealso: [](chapter_ts), `TSCreate()`, `TS`, `TSSetType()`, `TSEULER`, `TSCN`, `TSTHETA`
1356: M*/
1357: PETSC_EXTERN PetscErrorCode TSCreate_BEuler(TS ts)
1358: {
1359: TSCreate_Theta(ts);
1360: TSThetaSetTheta(ts, 1.0);
1361: TSThetaSetEndpoint(ts, PETSC_FALSE);
1362: ts->ops->setup = TSSetUp_BEuler;
1363: ts->ops->view = TSView_BEuler;
1364: return 0;
1365: }
1367: static PetscErrorCode TSSetUp_CN(TS ts)
1368: {
1369: TS_Theta *th = (TS_Theta *)ts->data;
1373: TSSetUp_Theta(ts);
1374: return 0;
1375: }
1377: static PetscErrorCode TSView_CN(TS ts, PetscViewer viewer)
1378: {
1379: return 0;
1380: }
1382: /*MC
1383: TSCN - ODE solver using the implicit Crank-Nicolson method.
1385: Level: beginner
1387: Notes:
1388: `TSCN` is equivalent to `TSTHETA` with Theta=0.5 and the "endpoint" option set. I.e.
1389: .vb
1390: -ts_type theta
1391: -ts_theta_theta 0.5
1392: -ts_theta_endpoint
1393: .ve
1395: .seealso: [](chapter_ts), `TSCreate()`, `TS`, `TSSetType()`, `TSBEULER`, `TSTHETA`, `TSType`,
1396: M*/
1397: PETSC_EXTERN PetscErrorCode TSCreate_CN(TS ts)
1398: {
1399: TSCreate_Theta(ts);
1400: TSThetaSetTheta(ts, 0.5);
1401: TSThetaSetEndpoint(ts, PETSC_TRUE);
1402: ts->ops->setup = TSSetUp_CN;
1403: ts->ops->view = TSView_CN;
1404: return 0;
1405: }