Actual source code: burgers_spectral.c

  1: static char help[] = "Solves a simple data assimilation problem with one dimensional Burger's equation using TSAdjoint\n\n";

  3: /*

  5:     Not yet tested in parallel

  7: */

  9: /* ------------------------------------------------------------------------

 11:    This program uses the one-dimensional Burger's equation
 12:        u_t = mu*u_xx - u u_x,
 13:    on the domain 0 <= x <= 1, with periodic boundary conditions

 15:    to demonstrate solving a data assimilation problem of finding the initial conditions
 16:    to produce a given solution at a fixed time.

 18:    The operators are discretized with the spectral element method

 20:    See the paper PDE-CONSTRAINED OPTIMIZATION WITH SPECTRAL ELEMENTS USING PETSC AND TAO
 21:    by OANA MARIN, EMIL CONSTANTINESCU, AND BARRY SMITH for details on the exact solution
 22:    used

 24:   ------------------------------------------------------------------------- */

 26: #include <petsctao.h>
 27: #include <petscts.h>
 28: #include <petscdt.h>
 29: #include <petscdraw.h>
 30: #include <petscdmda.h>

 32: /*
 33:    User-defined application context - contains data needed by the
 34:    application-provided call-back routines.
 35: */

 37: typedef struct {
 38:   PetscInt   n;       /* number of nodes */
 39:   PetscReal *nodes;   /* GLL nodes */
 40:   PetscReal *weights; /* GLL weights */
 41: } PetscGLL;

 43: typedef struct {
 44:   PetscInt  N;               /* grid points per elements*/
 45:   PetscInt  E;               /* number of elements */
 46:   PetscReal tol_L2, tol_max; /* error norms */
 47:   PetscInt  steps;           /* number of timesteps */
 48:   PetscReal Tend;            /* endtime */
 49:   PetscReal mu;              /* viscosity */
 50:   PetscReal L;               /* total length of domain */
 51:   PetscReal Le;
 52:   PetscReal Tadj;
 53: } PetscParam;

 55: typedef struct {
 56:   Vec obj;  /* desired end state */
 57:   Vec grid; /* total grid */
 58:   Vec grad;
 59:   Vec ic;
 60:   Vec curr_sol;
 61:   Vec true_solution; /* actual initial conditions for the final solution */
 62: } PetscData;

 64: typedef struct {
 65:   Vec      grid;  /* total grid */
 66:   Vec      mass;  /* mass matrix for total integration */
 67:   Mat      stiff; /* stifness matrix */
 68:   Mat      keptstiff;
 69:   Mat      grad;
 70:   PetscGLL gll;
 71: } PetscSEMOperators;

 73: typedef struct {
 74:   DM                da; /* distributed array data structure */
 75:   PetscSEMOperators SEMop;
 76:   PetscParam        param;
 77:   PetscData         dat;
 78:   TS                ts;
 79:   PetscReal         initial_dt;
 80: } AppCtx;

 82: /*
 83:    User-defined routines
 84: */
 85: extern PetscErrorCode FormFunctionGradient(Tao, Vec, PetscReal *, Vec, void *);
 86: extern PetscErrorCode RHSMatrixLaplaciangllDM(TS, PetscReal, Vec, Mat, Mat, void *);
 87: extern PetscErrorCode RHSMatrixAdvectiongllDM(TS, PetscReal, Vec, Mat, Mat, void *);
 88: extern PetscErrorCode InitialConditions(Vec, AppCtx *);
 89: extern PetscErrorCode TrueSolution(Vec, AppCtx *);
 90: extern PetscErrorCode ComputeObjective(PetscReal, Vec, AppCtx *);
 91: extern PetscErrorCode MonitorError(Tao, void *);
 92: extern PetscErrorCode RHSFunction(TS, PetscReal, Vec, Vec, void *);
 93: extern PetscErrorCode RHSJacobian(TS, PetscReal, Vec, Mat, Mat, void *);

 95: int main(int argc, char **argv)
 96: {
 97:   AppCtx       appctx; /* user-defined application context */
 98:   Tao          tao;
 99:   Vec          u; /* approximate solution vector */
100:   PetscInt     i, xs, xm, ind, j, lenglob;
101:   PetscReal    x, *wrk_ptr1, *wrk_ptr2;
102:   MatNullSpace nsp;
103:   PetscMPIInt  size;

105:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
106:      Initialize program and set problem parameters
107:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
108:   PetscFunctionBeginUser;
109:   PetscCall(PetscInitialize(&argc, &argv, NULL, help));

111:   /*initialize parameters */
112:   appctx.param.N     = 10;   /* order of the spectral element */
113:   appctx.param.E     = 10;   /* number of elements */
114:   appctx.param.L     = 4.0;  /* length of the domain */
115:   appctx.param.mu    = 0.01; /* diffusion coefficient */
116:   appctx.initial_dt  = 5e-3;
117:   appctx.param.steps = PETSC_INT_MAX;
118:   appctx.param.Tend  = 4;

120:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-N", &appctx.param.N, NULL));
121:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-E", &appctx.param.E, NULL));
122:   PetscCall(PetscOptionsGetReal(NULL, NULL, "-Tend", &appctx.param.Tend, NULL));
123:   PetscCall(PetscOptionsGetReal(NULL, NULL, "-mu", &appctx.param.mu, NULL));
124:   appctx.param.Le = appctx.param.L / appctx.param.E;

126:   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
127:   PetscCheck((appctx.param.E % size) == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Number of elements must be divisible by number of processes");

129:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
130:      Create GLL data structures
131:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
132:   PetscCall(PetscMalloc2(appctx.param.N, &appctx.SEMop.gll.nodes, appctx.param.N, &appctx.SEMop.gll.weights));
133:   PetscCall(PetscDTGaussLobattoLegendreQuadrature(appctx.param.N, PETSCGAUSSLOBATTOLEGENDRE_VIA_LINEAR_ALGEBRA, appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights));
134:   appctx.SEMop.gll.n = appctx.param.N;
135:   lenglob            = appctx.param.E * (appctx.param.N - 1);

137:   /*
138:      Create distributed array (DMDA) to manage parallel grid and vectors
139:      and to set up the ghost point communication pattern.  There are E*(Nl-1)+1
140:      total grid values spread equally among all the processors, except first and last
141:   */

143:   PetscCall(DMDACreate1d(PETSC_COMM_WORLD, DM_BOUNDARY_PERIODIC, lenglob, 1, 1, NULL, &appctx.da));
144:   PetscCall(DMSetFromOptions(appctx.da));
145:   PetscCall(DMSetUp(appctx.da));

147:   /*
148:      Extract global and local vectors from DMDA; we use these to store the
149:      approximate solution.  Then duplicate these for remaining vectors that
150:      have the same types.
151:   */

153:   PetscCall(DMCreateGlobalVector(appctx.da, &u));
154:   PetscCall(VecDuplicate(u, &appctx.dat.ic));
155:   PetscCall(VecDuplicate(u, &appctx.dat.true_solution));
156:   PetscCall(VecDuplicate(u, &appctx.dat.obj));
157:   PetscCall(VecDuplicate(u, &appctx.SEMop.grid));
158:   PetscCall(VecDuplicate(u, &appctx.SEMop.mass));
159:   PetscCall(VecDuplicate(u, &appctx.dat.curr_sol));

161:   PetscCall(DMDAGetCorners(appctx.da, &xs, NULL, NULL, &xm, NULL, NULL));
162:   PetscCall(DMDAVecGetArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1));
163:   PetscCall(DMDAVecGetArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2));

165:   /* Compute function over the locally owned part of the grid */

167:   xs = xs / (appctx.param.N - 1);
168:   xm = xm / (appctx.param.N - 1);

170:   /*
171:      Build total grid and mass over entire mesh (multi-elemental)
172:   */

174:   for (i = xs; i < xs + xm; i++) {
175:     for (j = 0; j < appctx.param.N - 1; j++) {
176:       x             = (appctx.param.Le / 2.0) * (appctx.SEMop.gll.nodes[j] + 1.0) + appctx.param.Le * i;
177:       ind           = i * (appctx.param.N - 1) + j;
178:       wrk_ptr1[ind] = x;
179:       wrk_ptr2[ind] = .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
180:       if (j == 0) wrk_ptr2[ind] += .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
181:     }
182:   }
183:   PetscCall(DMDAVecRestoreArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1));
184:   PetscCall(DMDAVecRestoreArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2));

186:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
187:    Create matrix data structure; set matrix evaluation routine.
188:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
189:   PetscCall(DMSetMatrixPreallocateOnly(appctx.da, PETSC_TRUE));
190:   PetscCall(DMCreateMatrix(appctx.da, &appctx.SEMop.stiff));
191:   PetscCall(DMCreateMatrix(appctx.da, &appctx.SEMop.grad));
192:   /*
193:    For linear problems with a time-dependent f(u,t) in the equation
194:    u_t = f(u,t), the user provides the discretized right-hand side
195:    as a time-dependent matrix.
196:    */
197:   PetscCall(RHSMatrixLaplaciangllDM(appctx.ts, 0.0, u, appctx.SEMop.stiff, appctx.SEMop.stiff, &appctx));
198:   PetscCall(RHSMatrixAdvectiongllDM(appctx.ts, 0.0, u, appctx.SEMop.grad, appctx.SEMop.grad, &appctx));
199:   /*
200:        For linear problems with a time-dependent f(u,t) in the equation
201:        u_t = f(u,t), the user provides the discretized right-hand side
202:        as a time-dependent matrix.
203:     */

205:   PetscCall(MatDuplicate(appctx.SEMop.stiff, MAT_COPY_VALUES, &appctx.SEMop.keptstiff));

207:   /* attach the null space to the matrix, this probably is not needed but does no harm */
208:   PetscCall(MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nsp));
209:   PetscCall(MatSetNullSpace(appctx.SEMop.stiff, nsp));
210:   PetscCall(MatSetNullSpace(appctx.SEMop.keptstiff, nsp));
211:   PetscCall(MatNullSpaceTest(nsp, appctx.SEMop.stiff, NULL));
212:   PetscCall(MatNullSpaceDestroy(&nsp));
213:   /* attach the null space to the matrix, this probably is not needed but does no harm */
214:   PetscCall(MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nsp));
215:   PetscCall(MatSetNullSpace(appctx.SEMop.grad, nsp));
216:   PetscCall(MatNullSpaceTest(nsp, appctx.SEMop.grad, NULL));
217:   PetscCall(MatNullSpaceDestroy(&nsp));

219:   /* Create the TS solver that solves the ODE and its adjoint; set its options */
220:   PetscCall(TSCreate(PETSC_COMM_WORLD, &appctx.ts));
221:   PetscCall(TSSetProblemType(appctx.ts, TS_NONLINEAR));
222:   PetscCall(TSSetType(appctx.ts, TSRK));
223:   PetscCall(TSSetDM(appctx.ts, appctx.da));
224:   PetscCall(TSSetTime(appctx.ts, 0.0));
225:   PetscCall(TSSetTimeStep(appctx.ts, appctx.initial_dt));
226:   PetscCall(TSSetMaxSteps(appctx.ts, appctx.param.steps));
227:   PetscCall(TSSetMaxTime(appctx.ts, appctx.param.Tend));
228:   PetscCall(TSSetExactFinalTime(appctx.ts, TS_EXACTFINALTIME_MATCHSTEP));
229:   PetscCall(TSSetTolerances(appctx.ts, 1e-7, NULL, 1e-7, NULL));
230:   PetscCall(TSSetFromOptions(appctx.ts));
231:   /* Need to save initial timestep user may have set with -ts_dt so it can be reset for each new TSSolve() */
232:   PetscCall(TSGetTimeStep(appctx.ts, &appctx.initial_dt));
233:   PetscCall(TSSetRHSFunction(appctx.ts, NULL, RHSFunction, &appctx));
234:   PetscCall(TSSetRHSJacobian(appctx.ts, appctx.SEMop.stiff, appctx.SEMop.stiff, RHSJacobian, &appctx));

236:   /* Set Objective and Initial conditions for the problem and compute Objective function (evolution of true_solution to final time */
237:   PetscCall(InitialConditions(appctx.dat.ic, &appctx));
238:   PetscCall(TrueSolution(appctx.dat.true_solution, &appctx));
239:   PetscCall(ComputeObjective(appctx.param.Tend, appctx.dat.obj, &appctx));

241:   PetscCall(TSSetSaveTrajectory(appctx.ts));
242:   PetscCall(TSSetFromOptions(appctx.ts));

244:   /* Create TAO solver and set desired solution method  */
245:   PetscCall(TaoCreate(PETSC_COMM_WORLD, &tao));
246:   PetscCall(TaoMonitorSet(tao, MonitorError, &appctx, NULL));
247:   PetscCall(TaoSetType(tao, TAOBQNLS));
248:   PetscCall(TaoSetSolution(tao, appctx.dat.ic));
249:   /* Set routine for function and gradient evaluation  */
250:   PetscCall(TaoSetObjectiveAndGradient(tao, NULL, FormFunctionGradient, (void *)&appctx));
251:   /* Check for any TAO command line options  */
252:   PetscCall(TaoSetTolerances(tao, 1e-8, PETSC_CURRENT, PETSC_CURRENT));
253:   PetscCall(TaoSetFromOptions(tao));
254:   PetscCall(TaoSolve(tao));

256:   PetscCall(TaoDestroy(&tao));
257:   PetscCall(MatDestroy(&appctx.SEMop.stiff));
258:   PetscCall(MatDestroy(&appctx.SEMop.keptstiff));
259:   PetscCall(MatDestroy(&appctx.SEMop.grad));
260:   PetscCall(VecDestroy(&u));
261:   PetscCall(VecDestroy(&appctx.dat.ic));
262:   PetscCall(VecDestroy(&appctx.dat.true_solution));
263:   PetscCall(VecDestroy(&appctx.dat.obj));
264:   PetscCall(VecDestroy(&appctx.SEMop.grid));
265:   PetscCall(VecDestroy(&appctx.SEMop.mass));
266:   PetscCall(VecDestroy(&appctx.dat.curr_sol));
267:   PetscCall(PetscFree2(appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights));
268:   PetscCall(DMDestroy(&appctx.da));
269:   PetscCall(TSDestroy(&appctx.ts));

271:   /*
272:      Always call PetscFinalize() before exiting a program.  This routine
273:        - finalizes the PETSc libraries as well as MPI
274:        - provides summary and diagnostic information if certain runtime
275:          options are chosen (e.g., -log_view).
276:   */
277:   PetscCall(PetscFinalize());
278:   return 0;
279: }

281: /* --------------------------------------------------------------------- */
282: /*
283:    InitialConditions - Computes the initial conditions for the Tao optimization solve (these are also initial conditions for the first TSSolve()

285:                        The routine TrueSolution() computes the true solution for the Tao optimization solve which means they are the initial conditions for the objective function

287:    Input Parameter:
288:    u - uninitialized solution vector (global)
289:    appctx - user-defined application context

291:    Output Parameter:
292:    u - vector with solution at initial time (global)
293: */
294: PetscErrorCode InitialConditions(Vec u, AppCtx *appctx)
295: {
296:   PetscScalar       *s;
297:   const PetscScalar *xg;
298:   PetscInt           i, xs, xn;

300:   PetscFunctionBegin;
301:   PetscCall(DMDAVecGetArray(appctx->da, u, &s));
302:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
303:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
304:   for (i = xs; i < xs + xn; i++) s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) / (2.0 + PetscCosScalar(PETSC_PI * xg[i])) + 0.25 * PetscExpReal(-4.0 * PetscPowReal(xg[i] - 2.0, 2.0));
305:   PetscCall(DMDAVecRestoreArray(appctx->da, u, &s));
306:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
307:   PetscFunctionReturn(PETSC_SUCCESS);
308: }

310: /*
311:    TrueSolution() computes the true solution for the Tao optimization solve which means they are the initial conditions for the objective function.

313:              InitialConditions() computes the initial conditions for the beginning of the Tao iterations

315:    Input Parameter:
316:    u - uninitialized solution vector (global)
317:    appctx - user-defined application context

319:    Output Parameter:
320:    u - vector with solution at initial time (global)
321: */
322: PetscErrorCode TrueSolution(Vec u, AppCtx *appctx)
323: {
324:   PetscScalar       *s;
325:   const PetscScalar *xg;
326:   PetscInt           i, xs, xn;

328:   PetscFunctionBegin;
329:   PetscCall(DMDAVecGetArray(appctx->da, u, &s));
330:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
331:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
332:   for (i = xs; i < xs + xn; i++) s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) / (2.0 + PetscCosScalar(PETSC_PI * xg[i]));
333:   PetscCall(DMDAVecRestoreArray(appctx->da, u, &s));
334:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
335:   PetscFunctionReturn(PETSC_SUCCESS);
336: }
337: /* --------------------------------------------------------------------- */
338: /*
339:    Sets the desired profile for the final end time

341:    Input Parameters:
342:    t - final time
343:    obj - vector storing the desired profile
344:    appctx - user-defined application context

346: */
347: PetscErrorCode ComputeObjective(PetscReal t, Vec obj, AppCtx *appctx)
348: {
349:   PetscScalar       *s;
350:   const PetscScalar *xg;
351:   PetscInt           i, xs, xn;

353:   PetscFunctionBegin;
354:   PetscCall(DMDAVecGetArray(appctx->da, obj, &s));
355:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
356:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
357:   for (i = xs; i < xs + xn; i++) {
358:     s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) * PetscExpScalar(-PETSC_PI * PETSC_PI * t * appctx->param.mu) / (2.0 + PetscExpScalar(-PETSC_PI * PETSC_PI * t * appctx->param.mu) * PetscCosScalar(PETSC_PI * xg[i]));
359:   }
360:   PetscCall(DMDAVecRestoreArray(appctx->da, obj, &s));
361:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
362:   PetscFunctionReturn(PETSC_SUCCESS);
363: }

365: PetscErrorCode RHSFunction(TS ts, PetscReal t, Vec globalin, Vec globalout, void *ctx)
366: {
367:   AppCtx *appctx = (AppCtx *)ctx;

369:   PetscFunctionBegin;
370:   PetscCall(MatMult(appctx->SEMop.grad, globalin, globalout)); /* grad u */
371:   PetscCall(VecPointwiseMult(globalout, globalin, globalout)); /* u grad u */
372:   PetscCall(VecScale(globalout, -1.0));
373:   PetscCall(MatMultAdd(appctx->SEMop.keptstiff, globalin, globalout, globalout));
374:   PetscFunctionReturn(PETSC_SUCCESS);
375: }

377: /*

379:       K is the discretiziation of the Laplacian
380:       G is the discretization of the gradient

382:       Computes Jacobian of      K u + diag(u) G u   which is given by
383:               K   + diag(u)G + diag(Gu)
384: */
385: PetscErrorCode RHSJacobian(TS ts, PetscReal t, Vec globalin, Mat A, Mat B, void *ctx)
386: {
387:   AppCtx *appctx = (AppCtx *)ctx;
388:   Vec     Gglobalin;

390:   PetscFunctionBegin;
391:   /*    A = diag(u) G */

393:   PetscCall(MatCopy(appctx->SEMop.grad, A, SAME_NONZERO_PATTERN));
394:   PetscCall(MatDiagonalScale(A, globalin, NULL));

396:   /*    A  = A + diag(Gu) */
397:   PetscCall(VecDuplicate(globalin, &Gglobalin));
398:   PetscCall(MatMult(appctx->SEMop.grad, globalin, Gglobalin));
399:   PetscCall(MatDiagonalSet(A, Gglobalin, ADD_VALUES));
400:   PetscCall(VecDestroy(&Gglobalin));

402:   /*   A  = K - A    */
403:   PetscCall(MatScale(A, -1.0));
404:   PetscCall(MatAXPY(A, 1.0, appctx->SEMop.keptstiff, SAME_NONZERO_PATTERN));
405:   PetscFunctionReturn(PETSC_SUCCESS);
406: }

408: /* --------------------------------------------------------------------- */

410: /*
411:    RHSMatrixLaplacian - User-provided routine to compute the right-hand-side
412:    matrix for the heat equation.

414:    Input Parameters:
415:    ts - the TS context
416:    t - current time  (ignored)
417:    X - current solution (ignored)
418:    dummy - optional user-defined context, as set by TSetRHSJacobian()

420:    Output Parameters:
421:    AA - Jacobian matrix
422:    BB - optionally different matrix from which the preconditioner is built
423:    str - flag indicating matrix structure

425: */
426: PetscErrorCode RHSMatrixLaplaciangllDM(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
427: {
428:   PetscReal **temp;
429:   PetscReal   vv;
430:   AppCtx     *appctx = (AppCtx *)ctx; /* user-defined application context */
431:   PetscInt    i, xs, xn, l, j;
432:   PetscInt   *rowsDM;

434:   PetscFunctionBegin;
435:   /*
436:    Creates the element stiffness matrix for the given gll
437:    */
438:   PetscCall(PetscGaussLobattoLegendreElementLaplacianCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
439:   /* workaround for clang analyzer warning: Division by zero */
440:   PetscCheck(appctx->param.N > 1, PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Spectral element order should be > 1");

442:   /* scale by the size of the element */
443:   for (i = 0; i < appctx->param.N; i++) {
444:     vv = -appctx->param.mu * 2.0 / appctx->param.Le;
445:     for (j = 0; j < appctx->param.N; j++) temp[i][j] = temp[i][j] * vv;
446:   }

448:   PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
449:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));

451:   xs = xs / (appctx->param.N - 1);
452:   xn = xn / (appctx->param.N - 1);

454:   PetscCall(PetscMalloc1(appctx->param.N, &rowsDM));
455:   /*
456:    loop over local elements
457:    */
458:   for (j = xs; j < xs + xn; j++) {
459:     for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
460:     PetscCall(MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES));
461:   }
462:   PetscCall(PetscFree(rowsDM));
463:   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
464:   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
465:   PetscCall(VecReciprocal(appctx->SEMop.mass));
466:   PetscCall(MatDiagonalScale(A, appctx->SEMop.mass, 0));
467:   PetscCall(VecReciprocal(appctx->SEMop.mass));

469:   PetscCall(PetscGaussLobattoLegendreElementLaplacianDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
470:   PetscFunctionReturn(PETSC_SUCCESS);
471: }

473: /*
474:    RHSMatrixAdvection - User-provided routine to compute the right-hand-side
475:    matrix for the Advection equation.

477:    Input Parameters:
478:    ts - the TS context
479:    t - current time
480:    global_in - global input vector
481:    dummy - optional user-defined context, as set by TSetRHSJacobian()

483:    Output Parameters:
484:    AA - Jacobian matrix
485:    BB - optionally different preconditioning matrix
486:    str - flag indicating matrix structure

488: */
489: PetscErrorCode RHSMatrixAdvectiongllDM(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
490: {
491:   PetscReal **temp;
492:   AppCtx     *appctx = (AppCtx *)ctx; /* user-defined application context */
493:   PetscInt    xs, xn, l, j;
494:   PetscInt   *rowsDM;

496:   PetscFunctionBegin;
497:   /*
498:    Creates the advection matrix for the given gll
499:    */
500:   PetscCall(PetscGaussLobattoLegendreElementAdvectionCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
501:   PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));

503:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));

505:   xs = xs / (appctx->param.N - 1);
506:   xn = xn / (appctx->param.N - 1);

508:   PetscCall(PetscMalloc1(appctx->param.N, &rowsDM));
509:   for (j = xs; j < xs + xn; j++) {
510:     for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
511:     PetscCall(MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES));
512:   }
513:   PetscCall(PetscFree(rowsDM));
514:   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
515:   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));

517:   PetscCall(VecReciprocal(appctx->SEMop.mass));
518:   PetscCall(MatDiagonalScale(A, appctx->SEMop.mass, 0));
519:   PetscCall(VecReciprocal(appctx->SEMop.mass));
520:   PetscCall(PetscGaussLobattoLegendreElementAdvectionDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
521:   PetscFunctionReturn(PETSC_SUCCESS);
522: }
523: /* ------------------------------------------------------------------ */
524: /*
525:    FormFunctionGradient - Evaluates the function and corresponding gradient.

527:    Input Parameters:
528:    tao - the Tao context
529:    IC   - the input vector
530:    ctx - optional user-defined context, as set when calling TaoSetObjectiveAndGradient()

532:    Output Parameters:
533:    f   - the newly evaluated function
534:    G   - the newly evaluated gradient

536:    Notes:

538:           The forward equation is
539:               M u_t = F(U)
540:           which is converted to
541:                 u_t = M^{-1} F(u)
542:           in the user code since TS has no direct way of providing a mass matrix. The Jacobian of this is
543:                  M^{-1} J
544:           where J is the Jacobian of F. Now the adjoint equation is
545:                 M v_t = J^T v
546:           but TSAdjoint does not solve this since it can only solve the transposed system for the
547:           Jacobian the user provided. Hence TSAdjoint solves
548:                  w_t = J^T M^{-1} w  (where w = M v)
549:           since there is no way to indicate the mass matrix as a separate entity to TS. Thus one
550:           must be careful in initializing the "adjoint equation" and using the result. This is
551:           why
552:               G = -2 M(u(T) - u_d)
553:           below (instead of -2(u(T) - u_d) and why the result is
554:               G = G/appctx->SEMop.mass (that is G = M^{-1}w)
555:           below (instead of just the result of the "adjoint solve").

557: */
558: PetscErrorCode FormFunctionGradient(Tao tao, Vec IC, PetscReal *f, Vec G, void *ctx)
559: {
560:   AppCtx            *appctx = (AppCtx *)ctx; /* user-defined application context */
561:   Vec                temp;
562:   PetscInt           its;
563:   PetscReal          ff, gnorm, cnorm, xdiff, errex;
564:   TaoConvergedReason reason;

566:   PetscFunctionBegin;
567:   PetscCall(TSSetTime(appctx->ts, 0.0));
568:   PetscCall(TSSetStepNumber(appctx->ts, 0));
569:   PetscCall(TSSetTimeStep(appctx->ts, appctx->initial_dt));
570:   PetscCall(VecCopy(IC, appctx->dat.curr_sol));

572:   PetscCall(TSSolve(appctx->ts, appctx->dat.curr_sol));

574:   PetscCall(VecWAXPY(G, -1.0, appctx->dat.curr_sol, appctx->dat.obj));

576:   /*
577:      Compute the L2-norm of the objective function, cost function is f
578:   */
579:   PetscCall(VecDuplicate(G, &temp));
580:   PetscCall(VecPointwiseMult(temp, G, G));
581:   PetscCall(VecDot(temp, appctx->SEMop.mass, f));

583:   /* local error evaluation   */
584:   PetscCall(VecWAXPY(temp, -1.0, appctx->dat.ic, appctx->dat.true_solution));
585:   PetscCall(VecPointwiseMult(temp, temp, temp));
586:   /* for error evaluation */
587:   PetscCall(VecDot(temp, appctx->SEMop.mass, &errex));
588:   PetscCall(VecDestroy(&temp));
589:   errex = PetscSqrtReal(errex);

591:   /*
592:      Compute initial conditions for the adjoint integration. See Notes above
593:   */

595:   PetscCall(VecScale(G, -2.0));
596:   PetscCall(VecPointwiseMult(G, G, appctx->SEMop.mass));
597:   PetscCall(TSSetCostGradients(appctx->ts, 1, &G, NULL));
598:   PetscCall(TSAdjointSolve(appctx->ts));
599:   PetscCall(VecPointwiseDivide(G, G, appctx->SEMop.mass));

601:   PetscCall(TaoGetSolutionStatus(tao, &its, &ff, &gnorm, &cnorm, &xdiff, &reason));
602:   PetscFunctionReturn(PETSC_SUCCESS);
603: }

605: PetscErrorCode MonitorError(Tao tao, void *ctx)
606: {
607:   AppCtx   *appctx = (AppCtx *)ctx;
608:   Vec       temp;
609:   PetscReal nrm;

611:   PetscFunctionBegin;
612:   PetscCall(VecDuplicate(appctx->dat.ic, &temp));
613:   PetscCall(VecWAXPY(temp, -1.0, appctx->dat.ic, appctx->dat.true_solution));
614:   PetscCall(VecPointwiseMult(temp, temp, temp));
615:   PetscCall(VecDot(temp, appctx->SEMop.mass, &nrm));
616:   PetscCall(VecDestroy(&temp));
617:   nrm = PetscSqrtReal(nrm);
618:   PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Error for initial conditions %g\n", (double)nrm));
619:   PetscFunctionReturn(PETSC_SUCCESS);
620: }

622: /*TEST

624:     build:
625:       requires: !complex

627:     test:
628:       args: -tao_max_it 5 -tao_gatol 1.e-4
629:       requires: !single

631:     test:
632:       suffix: 2
633:       nsize: 2
634:       args: -tao_max_it 5 -tao_gatol 1.e-4
635:       requires: !single

637: TEST*/