Actual source code: spectraladjointassimilation.c
2: static char help[] = "Solves a simple data assimilation problem with one dimensional advection diffusion equation using TSAdjoint\n\n";
4: /*
6: Not yet tested in parallel
8: */
10: /* ------------------------------------------------------------------------
12: This program uses the one-dimensional advection-diffusion equation),
13: u_t = mu*u_xx - a u_x,
14: on the domain 0 <= x <= 1, with periodic boundary conditions
16: to demonstrate solving a data assimilation problem of finding the initial conditions
17: to produce a given solution at a fixed time.
19: The operators are discretized with the spectral element method
21: ------------------------------------------------------------------------- */
23: /*
24: Include "petscts.h" so that we can use TS solvers. Note that this file
25: automatically includes:
26: petscsys.h - base PETSc routines petscvec.h - vectors
27: petscmat.h - matrices
28: petscis.h - index sets petscksp.h - Krylov subspace methods
29: petscviewer.h - viewers petscpc.h - preconditioners
30: petscksp.h - linear solvers petscsnes.h - nonlinear solvers
31: */
33: #include <petsctao.h>
34: #include <petscts.h>
35: #include <petscdt.h>
36: #include <petscdraw.h>
37: #include <petscdmda.h>
39: /*
40: User-defined application context - contains data needed by the
41: application-provided call-back routines.
42: */
44: typedef struct {
45: PetscInt n; /* number of nodes */
46: PetscReal *nodes; /* GLL nodes */
47: PetscReal *weights; /* GLL weights */
48: } PetscGLL;
50: typedef struct {
51: PetscInt N; /* grid points per elements*/
52: PetscInt E; /* number of elements */
53: PetscReal tol_L2, tol_max; /* error norms */
54: PetscInt steps; /* number of timesteps */
55: PetscReal Tend; /* endtime */
56: PetscReal mu; /* viscosity */
57: PetscReal a; /* advection speed */
58: PetscReal L; /* total length of domain */
59: PetscReal Le;
60: PetscReal Tadj;
61: } PetscParam;
63: typedef struct {
64: Vec reference; /* desired end state */
65: Vec grid; /* total grid */
66: Vec grad;
67: Vec ic;
68: Vec curr_sol;
69: Vec joe;
70: Vec true_solution; /* actual initial conditions for the final solution */
71: } PetscData;
73: typedef struct {
74: Vec grid; /* total grid */
75: Vec mass; /* mass matrix for total integration */
76: Mat stiff; /* stifness matrix */
77: Mat advec;
78: Mat keptstiff;
79: PetscGLL gll;
80: } PetscSEMOperators;
82: typedef struct {
83: DM da; /* distributed array data structure */
84: PetscSEMOperators SEMop;
85: PetscParam param;
86: PetscData dat;
87: TS ts;
88: PetscReal initial_dt;
89: PetscReal *solutioncoefficients;
90: PetscInt ncoeff;
91: } AppCtx;
93: /*
94: User-defined routines
95: */
96: extern PetscErrorCode FormFunctionGradient(Tao, Vec, PetscReal *, Vec, void *);
97: extern PetscErrorCode RHSLaplacian(TS, PetscReal, Vec, Mat, Mat, void *);
98: extern PetscErrorCode RHSAdvection(TS, PetscReal, Vec, Mat, Mat, void *);
99: extern PetscErrorCode InitialConditions(Vec, AppCtx *);
100: extern PetscErrorCode ComputeReference(TS, PetscReal, Vec, AppCtx *);
101: extern PetscErrorCode MonitorError(Tao, void *);
102: extern PetscErrorCode MonitorDestroy(void **);
103: extern PetscErrorCode ComputeSolutionCoefficients(AppCtx *);
104: extern PetscErrorCode RHSFunction(TS, PetscReal, Vec, Vec, void *);
105: extern PetscErrorCode RHSJacobian(TS, PetscReal, Vec, Mat, Mat, void *);
107: int main(int argc, char **argv)
108: {
109: AppCtx appctx; /* user-defined application context */
110: Tao tao;
111: Vec u; /* approximate solution vector */
112: PetscInt i, xs, xm, ind, j, lenglob;
113: PetscReal x, *wrk_ptr1, *wrk_ptr2;
114: MatNullSpace nsp;
116: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
117: Initialize program and set problem parameters
118: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
121: PetscInitialize(&argc, &argv, (char *)0, help);
123: /*initialize parameters */
124: appctx.param.N = 10; /* order of the spectral element */
125: appctx.param.E = 8; /* number of elements */
126: appctx.param.L = 1.0; /* length of the domain */
127: appctx.param.mu = 0.00001; /* diffusion coefficient */
128: appctx.param.a = 0.0; /* advection speed */
129: appctx.initial_dt = 1e-4;
130: appctx.param.steps = PETSC_MAX_INT;
131: appctx.param.Tend = 0.01;
132: appctx.ncoeff = 2;
134: PetscOptionsGetInt(NULL, NULL, "-N", &appctx.param.N, NULL);
135: PetscOptionsGetInt(NULL, NULL, "-E", &appctx.param.E, NULL);
136: PetscOptionsGetInt(NULL, NULL, "-ncoeff", &appctx.ncoeff, NULL);
137: PetscOptionsGetReal(NULL, NULL, "-Tend", &appctx.param.Tend, NULL);
138: PetscOptionsGetReal(NULL, NULL, "-mu", &appctx.param.mu, NULL);
139: PetscOptionsGetReal(NULL, NULL, "-a", &appctx.param.a, NULL);
140: appctx.param.Le = appctx.param.L / appctx.param.E;
142: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
143: Create GLL data structures
144: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
145: PetscMalloc2(appctx.param.N, &appctx.SEMop.gll.nodes, appctx.param.N, &appctx.SEMop.gll.weights);
146: PetscDTGaussLobattoLegendreQuadrature(appctx.param.N, PETSCGAUSSLOBATTOLEGENDRE_VIA_LINEAR_ALGEBRA, appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights);
147: appctx.SEMop.gll.n = appctx.param.N;
148: lenglob = appctx.param.E * (appctx.param.N - 1);
150: /*
151: Create distributed array (DMDA) to manage parallel grid and vectors
152: and to set up the ghost point communication pattern. There are E*(Nl-1)+1
153: total grid values spread equally among all the processors, except first and last
154: */
156: DMDACreate1d(PETSC_COMM_WORLD, DM_BOUNDARY_PERIODIC, lenglob, 1, 1, NULL, &appctx.da);
157: DMSetFromOptions(appctx.da);
158: DMSetUp(appctx.da);
160: /*
161: Extract global and local vectors from DMDA; we use these to store the
162: approximate solution. Then duplicate these for remaining vectors that
163: have the same types.
164: */
166: DMCreateGlobalVector(appctx.da, &u);
167: VecDuplicate(u, &appctx.dat.ic);
168: VecDuplicate(u, &appctx.dat.true_solution);
169: VecDuplicate(u, &appctx.dat.reference);
170: VecDuplicate(u, &appctx.SEMop.grid);
171: VecDuplicate(u, &appctx.SEMop.mass);
172: VecDuplicate(u, &appctx.dat.curr_sol);
173: VecDuplicate(u, &appctx.dat.joe);
175: DMDAGetCorners(appctx.da, &xs, NULL, NULL, &xm, NULL, NULL);
176: DMDAVecGetArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1);
177: DMDAVecGetArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2);
179: /* Compute function over the locally owned part of the grid */
181: xs = xs / (appctx.param.N - 1);
182: xm = xm / (appctx.param.N - 1);
184: /*
185: Build total grid and mass over entire mesh (multi-elemental)
186: */
188: for (i = xs; i < xs + xm; i++) {
189: for (j = 0; j < appctx.param.N - 1; j++) {
190: x = (appctx.param.Le / 2.0) * (appctx.SEMop.gll.nodes[j] + 1.0) + appctx.param.Le * i;
191: ind = i * (appctx.param.N - 1) + j;
192: wrk_ptr1[ind] = x;
193: wrk_ptr2[ind] = .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
194: if (j == 0) wrk_ptr2[ind] += .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
195: }
196: }
197: DMDAVecRestoreArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1);
198: DMDAVecRestoreArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2);
200: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
201: Create matrix data structure; set matrix evaluation routine.
202: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
203: DMSetMatrixPreallocateOnly(appctx.da, PETSC_TRUE);
204: DMCreateMatrix(appctx.da, &appctx.SEMop.stiff);
205: DMCreateMatrix(appctx.da, &appctx.SEMop.advec);
207: /*
208: For linear problems with a time-dependent f(u,t) in the equation
209: u_t = f(u,t), the user provides the discretized right-hand-side
210: as a time-dependent matrix.
211: */
212: RHSLaplacian(appctx.ts, 0.0, u, appctx.SEMop.stiff, appctx.SEMop.stiff, &appctx);
213: RHSAdvection(appctx.ts, 0.0, u, appctx.SEMop.advec, appctx.SEMop.advec, &appctx);
214: MatAXPY(appctx.SEMop.stiff, -1.0, appctx.SEMop.advec, DIFFERENT_NONZERO_PATTERN);
215: MatDuplicate(appctx.SEMop.stiff, MAT_COPY_VALUES, &appctx.SEMop.keptstiff);
217: /* attach the null space to the matrix, this probably is not needed but does no harm */
218: MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nsp);
219: MatSetNullSpace(appctx.SEMop.stiff, nsp);
220: MatNullSpaceTest(nsp, appctx.SEMop.stiff, NULL);
221: MatNullSpaceDestroy(&nsp);
223: /* Create the TS solver that solves the ODE and its adjoint; set its options */
224: TSCreate(PETSC_COMM_WORLD, &appctx.ts);
225: TSSetSolutionFunction(appctx.ts, (PetscErrorCode(*)(TS, PetscReal, Vec, void *))ComputeReference, &appctx);
226: TSSetProblemType(appctx.ts, TS_LINEAR);
227: TSSetType(appctx.ts, TSRK);
228: TSSetDM(appctx.ts, appctx.da);
229: TSSetTime(appctx.ts, 0.0);
230: TSSetTimeStep(appctx.ts, appctx.initial_dt);
231: TSSetMaxSteps(appctx.ts, appctx.param.steps);
232: TSSetMaxTime(appctx.ts, appctx.param.Tend);
233: TSSetExactFinalTime(appctx.ts, TS_EXACTFINALTIME_MATCHSTEP);
234: TSSetTolerances(appctx.ts, 1e-7, NULL, 1e-7, NULL);
235: TSSetFromOptions(appctx.ts);
236: /* Need to save initial timestep user may have set with -ts_dt so it can be reset for each new TSSolve() */
237: TSGetTimeStep(appctx.ts, &appctx.initial_dt);
238: TSSetRHSFunction(appctx.ts, NULL, TSComputeRHSFunctionLinear, &appctx);
239: TSSetRHSJacobian(appctx.ts, appctx.SEMop.stiff, appctx.SEMop.stiff, TSComputeRHSJacobianConstant, &appctx);
240: /* TSSetRHSFunction(appctx.ts,NULL,RHSFunction,&appctx);
241: TSSetRHSJacobian(appctx.ts,appctx.SEMop.stiff,appctx.SEMop.stiff,RHSJacobian,&appctx); */
243: /* Set random initial conditions as initial guess, compute analytic reference solution and analytic (true) initial conditions */
244: ComputeSolutionCoefficients(&appctx);
245: InitialConditions(appctx.dat.ic, &appctx);
246: ComputeReference(appctx.ts, appctx.param.Tend, appctx.dat.reference, &appctx);
247: ComputeReference(appctx.ts, 0.0, appctx.dat.true_solution, &appctx);
249: /* Set up to save trajectory before TSSetFromOptions() so that TSTrajectory options can be captured */
250: TSSetSaveTrajectory(appctx.ts);
251: TSSetFromOptions(appctx.ts);
253: /* Create TAO solver and set desired solution method */
254: TaoCreate(PETSC_COMM_WORLD, &tao);
255: TaoSetMonitor(tao, MonitorError, &appctx, MonitorDestroy);
256: TaoSetType(tao, TAOBQNLS);
257: TaoSetSolution(tao, appctx.dat.ic);
258: /* Set routine for function and gradient evaluation */
259: TaoSetObjectiveAndGradient(tao, NULL, FormFunctionGradient, (void *)&appctx);
260: /* Check for any TAO command line options */
261: TaoSetTolerances(tao, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);
262: TaoSetFromOptions(tao);
263: TaoSolve(tao);
265: TaoDestroy(&tao);
266: PetscFree(appctx.solutioncoefficients);
267: MatDestroy(&appctx.SEMop.advec);
268: MatDestroy(&appctx.SEMop.stiff);
269: MatDestroy(&appctx.SEMop.keptstiff);
270: VecDestroy(&u);
271: VecDestroy(&appctx.dat.ic);
272: VecDestroy(&appctx.dat.joe);
273: VecDestroy(&appctx.dat.true_solution);
274: VecDestroy(&appctx.dat.reference);
275: VecDestroy(&appctx.SEMop.grid);
276: VecDestroy(&appctx.SEMop.mass);
277: VecDestroy(&appctx.dat.curr_sol);
278: PetscFree2(appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights);
279: DMDestroy(&appctx.da);
280: TSDestroy(&appctx.ts);
282: /*
283: Always call PetscFinalize() before exiting a program. This routine
284: - finalizes the PETSc libraries as well as MPI
285: - provides summary and diagnostic information if certain runtime
286: options are chosen (e.g., -log_summary).
287: */
288: PetscFinalize();
289: return 0;
290: }
292: /*
293: Computes the coefficients for the analytic solution to the PDE
294: */
295: PetscErrorCode ComputeSolutionCoefficients(AppCtx *appctx)
296: {
297: PetscRandom rand;
298: PetscInt i;
300: PetscMalloc1(appctx->ncoeff, &appctx->solutioncoefficients);
301: PetscRandomCreate(PETSC_COMM_WORLD, &rand);
302: PetscRandomSetInterval(rand, .9, 1.0);
303: for (i = 0; i < appctx->ncoeff; i++) PetscRandomGetValue(rand, &appctx->solutioncoefficients[i]);
304: PetscRandomDestroy(&rand);
305: return 0;
306: }
308: /* --------------------------------------------------------------------- */
309: /*
310: InitialConditions - Computes the (random) initial conditions for the Tao optimization solve (these are also initial conditions for the first TSSolve()
312: Input Parameter:
313: u - uninitialized solution vector (global)
314: appctx - user-defined application context
316: Output Parameter:
317: u - vector with solution at initial time (global)
318: */
319: PetscErrorCode InitialConditions(Vec u, AppCtx *appctx)
320: {
321: PetscScalar *s;
322: const PetscScalar *xg;
323: PetscInt i, j, lenglob;
324: PetscReal sum, val;
325: PetscRandom rand;
327: PetscRandomCreate(PETSC_COMM_WORLD, &rand);
328: PetscRandomSetInterval(rand, .9, 1.0);
329: DMDAVecGetArray(appctx->da, u, &s);
330: DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
331: lenglob = appctx->param.E * (appctx->param.N - 1);
332: for (i = 0; i < lenglob; i++) {
333: s[i] = 0;
334: for (j = 0; j < appctx->ncoeff; j++) {
335: PetscRandomGetValue(rand, &val);
336: s[i] += val * PetscSinScalar(2 * (j + 1) * PETSC_PI * xg[i]);
337: }
338: }
339: PetscRandomDestroy(&rand);
340: DMDAVecRestoreArray(appctx->da, u, &s);
341: DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
342: /* make sure initial conditions do not contain the constant functions, since with periodic boundary conditions the constant functions introduce a null space */
343: VecSum(u, &sum);
344: VecShift(u, -sum / lenglob);
345: return 0;
346: }
348: /*
349: TrueSolution() computes the true solution for the Tao optimization solve which means they are the initial conditions for the objective function.
351: InitialConditions() computes the initial conditions for the beginning of the Tao iterations
353: Input Parameter:
354: u - uninitialized solution vector (global)
355: appctx - user-defined application context
357: Output Parameter:
358: u - vector with solution at initial time (global)
359: */
360: PetscErrorCode TrueSolution(Vec u, AppCtx *appctx)
361: {
362: PetscScalar *s;
363: const PetscScalar *xg;
364: PetscInt i, j, lenglob;
365: PetscReal sum;
367: DMDAVecGetArray(appctx->da, u, &s);
368: DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
369: lenglob = appctx->param.E * (appctx->param.N - 1);
370: for (i = 0; i < lenglob; i++) {
371: s[i] = 0;
372: for (j = 0; j < appctx->ncoeff; j++) s[i] += appctx->solutioncoefficients[j] * PetscSinScalar(2 * (j + 1) * PETSC_PI * xg[i]);
373: }
374: DMDAVecRestoreArray(appctx->da, u, &s);
375: DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
376: /* make sure initial conditions do not contain the constant functions, since with periodic boundary conditions the constant functions introduce a null space */
377: VecSum(u, &sum);
378: VecShift(u, -sum / lenglob);
379: return 0;
380: }
381: /* --------------------------------------------------------------------- */
382: /*
383: Sets the desired profile for the final end time
385: Input Parameters:
386: t - final time
387: obj - vector storing the desired profile
388: appctx - user-defined application context
390: */
391: PetscErrorCode ComputeReference(TS ts, PetscReal t, Vec obj, AppCtx *appctx)
392: {
393: PetscScalar *s, tc;
394: const PetscScalar *xg;
395: PetscInt i, j, lenglob;
397: DMDAVecGetArray(appctx->da, obj, &s);
398: DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
399: lenglob = appctx->param.E * (appctx->param.N - 1);
400: for (i = 0; i < lenglob; i++) {
401: s[i] = 0;
402: for (j = 0; j < appctx->ncoeff; j++) {
403: tc = -appctx->param.mu * (j + 1) * (j + 1) * 4.0 * PETSC_PI * PETSC_PI * t;
404: s[i] += appctx->solutioncoefficients[j] * PetscSinScalar(2 * (j + 1) * PETSC_PI * (xg[i] + appctx->param.a * t)) * PetscExpReal(tc);
405: }
406: }
407: DMDAVecRestoreArray(appctx->da, obj, &s);
408: DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg);
409: return 0;
410: }
412: PetscErrorCode RHSFunction(TS ts, PetscReal t, Vec globalin, Vec globalout, void *ctx)
413: {
414: AppCtx *appctx = (AppCtx *)ctx;
416: MatMult(appctx->SEMop.keptstiff, globalin, globalout);
417: return 0;
418: }
420: PetscErrorCode RHSJacobian(TS ts, PetscReal t, Vec globalin, Mat A, Mat B, void *ctx)
421: {
422: AppCtx *appctx = (AppCtx *)ctx;
424: MatCopy(appctx->SEMop.keptstiff, A, DIFFERENT_NONZERO_PATTERN);
425: return 0;
426: }
428: /* --------------------------------------------------------------------- */
430: /*
431: RHSLaplacian - matrix for diffusion
433: Input Parameters:
434: ts - the TS context
435: t - current time (ignored)
436: X - current solution (ignored)
437: dummy - optional user-defined context, as set by TSetRHSJacobian()
439: Output Parameters:
440: AA - Jacobian matrix
441: BB - optionally different matrix from which the preconditioner is built
442: str - flag indicating matrix structure
444: Scales by the inverse of the mass matrix (perhaps that should be pulled out)
446: */
447: PetscErrorCode RHSLaplacian(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
448: {
449: PetscReal **temp;
450: PetscReal vv;
451: AppCtx *appctx = (AppCtx *)ctx; /* user-defined application context */
452: PetscInt i, xs, xn, l, j;
453: PetscInt *rowsDM;
455: /*
456: Creates the element stiffness matrix for the given gll
457: */
458: PetscGaussLobattoLegendreElementLaplacianCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp);
460: /* scale by the size of the element */
461: for (i = 0; i < appctx->param.N; i++) {
462: vv = -appctx->param.mu * 2.0 / appctx->param.Le;
463: for (j = 0; j < appctx->param.N; j++) temp[i][j] = temp[i][j] * vv;
464: }
466: MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
467: DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL);
470: xs = xs / (appctx->param.N - 1);
471: xn = xn / (appctx->param.N - 1);
473: PetscMalloc1(appctx->param.N, &rowsDM);
474: /*
475: loop over local elements
476: */
477: for (j = xs; j < xs + xn; j++) {
478: for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
479: MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES);
480: }
481: PetscFree(rowsDM);
482: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
483: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
484: VecReciprocal(appctx->SEMop.mass);
485: MatDiagonalScale(A, appctx->SEMop.mass, 0);
486: VecReciprocal(appctx->SEMop.mass);
488: PetscGaussLobattoLegendreElementLaplacianDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp);
489: return 0;
490: }
492: /*
493: Almost identical to Laplacian
495: Note that the element matrix is NOT scaled by the size of element like the Laplacian term.
496: */
497: PetscErrorCode RHSAdvection(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
498: {
499: PetscReal **temp;
500: PetscReal vv;
501: AppCtx *appctx = (AppCtx *)ctx; /* user-defined application context */
502: PetscInt i, xs, xn, l, j;
503: PetscInt *rowsDM;
505: /*
506: Creates the element stiffness matrix for the given gll
507: */
508: PetscGaussLobattoLegendreElementAdvectionCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp);
510: /* scale by the size of the element */
511: for (i = 0; i < appctx->param.N; i++) {
512: vv = -appctx->param.a;
513: for (j = 0; j < appctx->param.N; j++) temp[i][j] = temp[i][j] * vv;
514: }
516: MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
517: DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL);
520: xs = xs / (appctx->param.N - 1);
521: xn = xn / (appctx->param.N - 1);
523: PetscMalloc1(appctx->param.N, &rowsDM);
524: /*
525: loop over local elements
526: */
527: for (j = xs; j < xs + xn; j++) {
528: for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
529: MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES);
530: }
531: PetscFree(rowsDM);
532: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
533: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
534: VecReciprocal(appctx->SEMop.mass);
535: MatDiagonalScale(A, appctx->SEMop.mass, 0);
536: VecReciprocal(appctx->SEMop.mass);
538: PetscGaussLobattoLegendreElementAdvectionDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp);
539: return 0;
540: }
542: /* ------------------------------------------------------------------ */
543: /*
544: FormFunctionGradient - Evaluates the function and corresponding gradient.
546: Input Parameters:
547: tao - the Tao context
548: ic - the input vector
549: ctx - optional user-defined context, as set when calling TaoSetObjectiveAndGradient()
551: Output Parameters:
552: f - the newly evaluated function
553: G - the newly evaluated gradient
555: Notes:
557: The forward equation is
558: M u_t = F(U)
559: which is converted to
560: u_t = M^{-1} F(u)
561: in the user code since TS has no direct way of providing a mass matrix. The Jacobian of this is
562: M^{-1} J
563: where J is the Jacobian of F. Now the adjoint equation is
564: M v_t = J^T v
565: but TSAdjoint does not solve this since it can only solve the transposed system for the
566: Jacobian the user provided. Hence TSAdjoint solves
567: w_t = J^T M^{-1} w (where w = M v)
568: since there is no way to indicate the mass matrix as a separate entity to TS. Thus one
569: must be careful in initializing the "adjoint equation" and using the result. This is
570: why
571: G = -2 M(u(T) - u_d)
572: below (instead of -2(u(T) - u_d)
574: */
575: PetscErrorCode FormFunctionGradient(Tao tao, Vec ic, PetscReal *f, Vec G, void *ctx)
576: {
577: AppCtx *appctx = (AppCtx *)ctx; /* user-defined application context */
578: Vec temp;
580: TSSetTime(appctx->ts, 0.0);
581: TSSetStepNumber(appctx->ts, 0);
582: TSSetTimeStep(appctx->ts, appctx->initial_dt);
583: VecCopy(ic, appctx->dat.curr_sol);
585: TSSolve(appctx->ts, appctx->dat.curr_sol);
586: VecCopy(appctx->dat.curr_sol, appctx->dat.joe);
588: /* Compute the difference between the current ODE solution and target ODE solution */
589: VecWAXPY(G, -1.0, appctx->dat.curr_sol, appctx->dat.reference);
591: /* Compute the objective/cost function */
592: VecDuplicate(G, &temp);
593: VecPointwiseMult(temp, G, G);
594: VecDot(temp, appctx->SEMop.mass, f);
595: VecDestroy(&temp);
597: /* Compute initial conditions for the adjoint integration. See Notes above */
598: VecScale(G, -2.0);
599: VecPointwiseMult(G, G, appctx->SEMop.mass);
600: TSSetCostGradients(appctx->ts, 1, &G, NULL);
602: TSAdjointSolve(appctx->ts);
603: /* VecPointwiseDivide(G,G,appctx->SEMop.mass);*/
604: return 0;
605: }
607: PetscErrorCode MonitorError(Tao tao, void *ctx)
608: {
609: AppCtx *appctx = (AppCtx *)ctx;
610: Vec temp, grad;
611: PetscReal nrm;
612: PetscInt its;
613: PetscReal fct, gnorm;
615: VecDuplicate(appctx->dat.ic, &temp);
616: VecWAXPY(temp, -1.0, appctx->dat.ic, appctx->dat.true_solution);
617: VecPointwiseMult(temp, temp, temp);
618: VecDot(temp, appctx->SEMop.mass, &nrm);
619: nrm = PetscSqrtReal(nrm);
620: TaoGetGradient(tao, &grad, NULL, NULL);
621: VecPointwiseMult(temp, temp, temp);
622: VecDot(temp, appctx->SEMop.mass, &gnorm);
623: gnorm = PetscSqrtReal(gnorm);
624: VecDestroy(&temp);
625: TaoGetIterationNumber(tao, &its);
626: TaoGetSolutionStatus(tao, NULL, &fct, NULL, NULL, NULL, NULL);
627: if (!its) {
628: PetscPrintf(PETSC_COMM_WORLD, "%% Iteration Error Objective Gradient-norm\n");
629: PetscPrintf(PETSC_COMM_WORLD, "history = [\n");
630: }
631: PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT " %g %g %g\n", its, (double)nrm, (double)fct, (double)gnorm);
632: return 0;
633: }
635: PetscErrorCode MonitorDestroy(void **ctx)
636: {
637: PetscPrintf(PETSC_COMM_WORLD, "];\n");
638: return 0;
639: }
641: /*TEST
643: build:
644: requires: !complex
646: test:
647: requires: !single
648: args: -ts_adapt_dt_max 3.e-3 -E 10 -N 8 -ncoeff 5 -tao_bqnls_mat_lmvm_scale_type none
650: test:
651: suffix: cn
652: requires: !single
653: args: -ts_type cn -ts_dt .003 -pc_type lu -E 10 -N 8 -ncoeff 5 -tao_bqnls_mat_lmvm_scale_type none
655: test:
656: suffix: 2
657: requires: !single
658: args: -ts_adapt_dt_max 3.e-3 -E 10 -N 8 -ncoeff 5 -a .1 -tao_bqnls_mat_lmvm_scale_type none
660: TEST*/