Actual source code: ex62.c
1: static char help[] = "Stokes Problem discretized with finite elements,\n\
2: using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
4: /*
5: For the isoviscous Stokes problem, which we discretize using the finite
6: element method on an unstructured mesh, the weak form equations are
8: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9: < q, -\nabla\cdot u > = 0
11: Viewing:
13: To produce nice output, use
15: -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
17: You can get a LaTeX view of the mesh, with point numbering using
19: -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
21: The data layout can be viewed using
23: -dm_petscsection_view
25: Lots of information about the FEM assembly can be printed using
27: -dm_plex_print_fem 3
28: */
30: #include <petscdmplex.h>
31: #include <petscsnes.h>
32: #include <petscds.h>
33: #include <petscbag.h>
35: // TODO: Plot residual by fields after each smoother iterate
37: typedef enum {
38: SOL_QUADRATIC,
39: SOL_TRIG,
40: SOL_UNKNOWN
41: } SolType;
42: const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
44: typedef struct {
45: PetscScalar mu; /* dynamic shear viscosity */
46: } Parameter;
48: typedef struct {
49: PetscBag bag; /* Problem parameters */
50: SolType sol; /* MMS solution */
51: } AppCtx;
53: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
54: {
55: const PetscReal mu = PetscRealPart(constants[0]);
56: const PetscInt Nc = uOff[1] - uOff[0];
57: PetscInt c, d;
59: for (c = 0; c < Nc; ++c) {
60: for (d = 0; d < dim; ++d) f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]);
61: f1[c * dim + c] -= u[uOff[1]];
62: }
63: }
65: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
66: {
67: PetscInt d;
68: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
69: }
71: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
72: {
73: PetscInt d;
74: for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
75: }
77: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
78: {
79: PetscInt d;
80: for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
81: }
83: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
84: {
85: const PetscReal mu = PetscRealPart(constants[0]);
86: const PetscInt Nc = uOff[1] - uOff[0];
87: PetscInt c, d;
89: for (c = 0; c < Nc; ++c) {
90: for (d = 0; d < dim; ++d) {
91: g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
92: g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
93: }
94: }
95: }
97: static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
98: {
99: const PetscReal mu = PetscRealPart(constants[0]);
101: g0[0] = 1.0 / mu;
102: }
104: /* Quadratic MMS Solution
105: 2D:
107: u = x^2 + y^2
108: v = 2 x^2 - 2xy
109: p = x + y - 1
110: f = <1 - 4 mu, 1 - 4 mu>
112: so that
114: e(u) = (grad u + grad u^T) = / 4x 4x \
115: \ 4x -4x /
116: div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
117: \nabla \cdot u = 2x - 2x = 0
119: 3D:
121: u = 2 x^2 + y^2 + z^2
122: v = 2 x^2 - 2xy
123: w = 2 x^2 - 2xz
124: p = x + y + z - 3/2
125: f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
127: so that
129: e(u) = (grad u + grad u^T) = / 8x 4x 4x \
130: | 4x -4x 0 |
131: \ 4x 0 -4x /
132: div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
133: \nabla \cdot u = 4x - 2x - 2x = 0
134: */
135: static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
136: {
137: PetscInt c;
139: u[0] = (dim - 1) * PetscSqr(x[0]);
140: for (c = 1; c < Nc; ++c) {
141: u[0] += PetscSqr(x[c]);
142: u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
143: }
144: return PETSC_SUCCESS;
145: }
147: static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
148: {
149: PetscInt d;
151: u[0] = -0.5 * dim;
152: for (d = 0; d < dim; ++d) u[0] += x[d];
153: return PETSC_SUCCESS;
154: }
156: static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
157: {
158: const PetscReal mu = PetscRealPart(constants[0]);
159: PetscInt d;
161: f0[0] = (dim - 1) * 4.0 * mu - 1.0;
162: for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
163: }
165: /* Trigonometric MMS Solution
166: 2D:
168: u = sin(pi x) + sin(pi y)
169: v = -pi cos(pi x) y
170: p = sin(2 pi x) + sin(2 pi y)
171: f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
173: so that
175: e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \
176: \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) /
177: div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
178: \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0
180: 3D:
182: u = 2 sin(pi x) + sin(pi y) + sin(pi z)
183: v = -pi cos(pi x) y
184: w = -pi cos(pi x) z
185: p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
186: f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
188: so that
190: e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \
191: | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 |
192: \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) /
193: div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
194: \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
195: */
196: static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
197: {
198: PetscInt c;
200: u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
201: for (c = 1; c < Nc; ++c) {
202: u[0] += PetscSinReal(PETSC_PI * x[c]);
203: u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
204: }
205: return PETSC_SUCCESS;
206: }
208: static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
209: {
210: PetscInt d;
212: for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
213: return PETSC_SUCCESS;
214: }
216: static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
217: {
218: const PetscReal mu = PetscRealPart(constants[0]);
219: PetscInt d;
221: f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
222: for (d = 1; d < dim; ++d) {
223: f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
224: f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
225: }
226: }
228: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
229: {
230: PetscInt sol;
232: PetscFunctionBeginUser;
233: options->sol = SOL_QUADRATIC;
234: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
235: sol = options->sol;
236: PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
237: options->sol = (SolType)sol;
238: PetscOptionsEnd();
239: PetscFunctionReturn(PETSC_SUCCESS);
240: }
242: static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
243: {
244: PetscFunctionBeginUser;
245: PetscCall(DMCreate(comm, dm));
246: PetscCall(DMSetType(*dm, DMPLEX));
247: PetscCall(DMSetFromOptions(*dm));
248: PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
249: PetscFunctionReturn(PETSC_SUCCESS);
250: }
252: static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
253: {
254: Parameter *p;
256: PetscFunctionBeginUser;
257: /* setup PETSc parameter bag */
258: PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
259: PetscCall(PetscBagGetData(ctx->bag, (void **)&p));
260: PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
261: PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
262: PetscCall(PetscBagSetFromOptions(ctx->bag));
263: {
264: PetscViewer viewer;
265: PetscViewerFormat format;
266: PetscBool flg;
268: PetscCall(PetscOptionsCreateViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
269: if (flg) {
270: PetscCall(PetscViewerPushFormat(viewer, format));
271: PetscCall(PetscBagView(ctx->bag, viewer));
272: PetscCall(PetscViewerFlush(viewer));
273: PetscCall(PetscViewerPopFormat(viewer));
274: PetscCall(PetscViewerDestroy(&viewer));
275: }
276: }
277: PetscFunctionReturn(PETSC_SUCCESS);
278: }
280: static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
281: {
282: PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
283: PetscDS ds;
284: DMLabel label;
285: const PetscInt id = 1;
287: PetscFunctionBeginUser;
288: PetscCall(DMGetDS(dm, &ds));
289: switch (user->sol) {
290: case SOL_QUADRATIC:
291: PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
292: exactFuncs[0] = quadratic_u;
293: exactFuncs[1] = quadratic_p;
294: break;
295: case SOL_TRIG:
296: PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
297: exactFuncs[0] = trig_u;
298: exactFuncs[1] = trig_p;
299: break;
300: default:
301: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
302: }
303: PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
304: PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
305: PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
306: PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
307: PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
308: PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
310: PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
311: PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
313: PetscCall(DMGetLabel(dm, "marker", &label));
314: PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (void (*)(void))exactFuncs[0], NULL, user, NULL));
316: /* Make constant values available to pointwise functions */
317: {
318: Parameter *param;
319: PetscScalar constants[1];
321: PetscCall(PetscBagGetData(user->bag, (void **)¶m));
322: constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
323: PetscCall(PetscDSSetConstants(ds, 1, constants));
324: }
325: PetscFunctionReturn(PETSC_SUCCESS);
326: }
328: static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
329: {
330: PetscInt c;
331: for (c = 0; c < Nc; ++c) u[c] = 0.0;
332: return PETSC_SUCCESS;
333: }
334: static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
335: {
336: PetscInt c;
337: for (c = 0; c < Nc; ++c) u[c] = 1.0;
338: return PETSC_SUCCESS;
339: }
341: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
342: {
343: Vec vec;
344: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) = {zero, one};
346: PetscFunctionBeginUser;
347: PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
348: funcs[field] = one;
349: {
350: PetscDS ds;
351: PetscCall(DMGetDS(dm, &ds));
352: PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
353: }
354: PetscCall(DMCreateGlobalVector(dm, &vec));
355: PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
356: PetscCall(VecNormalize(vec, NULL));
357: PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
358: PetscCall(VecDestroy(&vec));
359: /* New style for field null spaces */
360: {
361: PetscObject pressure;
362: MatNullSpace nullspacePres;
364: PetscCall(DMGetField(dm, field, NULL, &pressure));
365: PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
366: PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
367: PetscCall(MatNullSpaceDestroy(&nullspacePres));
368: }
369: PetscFunctionReturn(PETSC_SUCCESS);
370: }
372: static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
373: {
374: DM cdm = dm;
375: PetscQuadrature q = NULL;
376: PetscBool simplex;
377: PetscInt dim, Nf = 2, f, Nc[2];
378: const char *name[2] = {"velocity", "pressure"};
379: const char *prefix[2] = {"vel_", "pres_"};
381: PetscFunctionBegin;
382: PetscCall(DMGetDimension(dm, &dim));
383: PetscCall(DMPlexIsSimplex(dm, &simplex));
384: Nc[0] = dim;
385: Nc[1] = 1;
386: for (f = 0; f < Nf; ++f) {
387: PetscFE fe;
389: PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
390: PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
391: if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
392: PetscCall(PetscFESetQuadrature(fe, q));
393: PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
394: PetscCall(PetscFEDestroy(&fe));
395: }
396: PetscCall(DMCreateDS(dm));
397: PetscCall((*setupEqn)(dm, user));
398: while (cdm) {
399: PetscCall(DMCopyDisc(dm, cdm));
400: PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
401: PetscCall(DMGetCoarseDM(cdm, &cdm));
402: }
403: PetscFunctionReturn(PETSC_SUCCESS);
404: }
406: int main(int argc, char **argv)
407: {
408: SNES snes;
409: DM dm;
410: Vec u;
411: AppCtx user;
413: PetscFunctionBeginUser;
414: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
415: PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
416: PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
417: PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
418: PetscCall(SNESSetDM(snes, dm));
419: PetscCall(DMSetApplicationContext(dm, &user));
421: PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
422: PetscCall(SetupProblem(dm, SetupEqn, &user));
423: PetscCall(DMPlexCreateClosureIndex(dm, NULL));
425: PetscCall(DMCreateGlobalVector(dm, &u));
426: PetscCall(DMPlexSetSNESLocalFEM(dm, PETSC_FALSE, &user));
427: PetscCall(SNESSetFromOptions(snes));
428: PetscCall(DMSNESCheckFromOptions(snes, u));
429: PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
430: {
431: Mat J;
432: MatNullSpace sp;
434: PetscCall(SNESSetUp(snes));
435: PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
436: PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
437: PetscCall(MatSetNullSpace(J, sp));
438: PetscCall(MatNullSpaceDestroy(&sp));
439: PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
440: PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
441: }
442: PetscCall(SNESSolve(snes, NULL, u));
444: PetscCall(VecDestroy(&u));
445: PetscCall(SNESDestroy(&snes));
446: PetscCall(DMDestroy(&dm));
447: PetscCall(PetscBagDestroy(&user.bag));
448: PetscCall(PetscFinalize());
449: return 0;
450: }
451: /*TEST
453: test:
454: suffix: 2d_p2_p1_check
455: requires: triangle
456: args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
458: test:
459: suffix: 2d_p2_p1_check_parallel
460: nsize: {{2 3 5}}
461: requires: triangle
462: args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
464: test:
465: suffix: 3d_p2_p1_check
466: requires: ctetgen
467: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
469: test:
470: suffix: 3d_p2_p1_check_parallel
471: nsize: {{2 3 5}}
472: requires: ctetgen
473: args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
475: test:
476: suffix: 2d_p2_p1_conv
477: requires: triangle
478: # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
479: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
480: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
481: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
482: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
484: test:
485: suffix: 2d_p2_p1_conv_gamg
486: requires: triangle
487: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 \
488: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
489: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
491: test:
492: suffix: 3d_p2_p1_conv
493: requires: ctetgen !single
494: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
495: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
496: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
497: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
498: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
500: test:
501: suffix: 2d_q2_q1_check
502: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
504: test:
505: suffix: 3d_q2_q1_check
506: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
508: test:
509: suffix: 2d_q2_q1_conv
510: # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
511: args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
512: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
513: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
514: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
516: test:
517: suffix: 3d_q2_q1_conv
518: requires: !single
519: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
520: args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
521: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
522: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
523: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
525: test:
526: suffix: 2d_p3_p2_check
527: requires: triangle
528: args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
530: test:
531: suffix: 3d_p3_p2_check
532: requires: ctetgen !single
533: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
535: test:
536: suffix: 2d_p3_p2_conv
537: requires: triangle
538: # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
539: args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
540: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
541: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
542: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
544: test:
545: suffix: 3d_p3_p2_conv
546: requires: ctetgen long_runtime
547: # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
548: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
549: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
550: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
551: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
553: test:
554: suffix: 2d_q1_p0_conv
555: requires: !single
556: # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
557: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
558: -ksp_atol 1e-10 -petscds_jac_pre 0 \
559: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
560: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
562: test:
563: suffix: 3d_q1_p0_conv
564: requires: !single
565: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
566: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
567: -ksp_atol 1e-10 -petscds_jac_pre 0 \
568: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
569: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
571: # Stokes preconditioners
572: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
573: test:
574: suffix: 2d_p2_p1_block_diagonal
575: requires: triangle
576: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
577: -snes_error_if_not_converged \
578: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
579: -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
580: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
581: test:
582: suffix: 2d_p2_p1_block_triangular
583: requires: triangle
584: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
585: -snes_error_if_not_converged \
586: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
587: -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
588: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
589: test:
590: suffix: 2d_p2_p1_schur_diagonal
591: requires: triangle
592: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
593: -snes_error_if_not_converged \
594: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
595: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
596: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
597: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
598: test:
599: suffix: 2d_p2_p1_schur_upper
600: requires: triangle
601: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
602: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
603: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
604: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
605: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
606: test:
607: suffix: 2d_p2_p1_schur_lower
608: requires: triangle
609: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
610: -snes_error_if_not_converged \
611: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
612: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
613: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
614: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
615: test:
616: suffix: 2d_p2_p1_schur_full
617: requires: triangle
618: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
619: -snes_error_if_not_converged \
620: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
621: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
622: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
623: # Full Schur + Velocity GMG
624: test:
625: suffix: 2d_p2_p1_gmg_vcycle
626: TODO: broken (requires subDMs hooks)
627: requires: triangle
628: args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
629: -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
630: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
631: -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
632: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
633: test:
634: suffix: 2d_p2_p1_simple
635: requires: triangle
636: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
637: -snes_error_if_not_converged \
638: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
639: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
640: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
641: -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
642: # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
643: test:
644: suffix: 2d_p2_p1_fetidp
645: requires: triangle mumps
646: nsize: 5
647: args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
648: -snes_error_if_not_converged \
649: -ksp_type fetidp -ksp_rtol 1.0e-8 \
650: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
651: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
652: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
653: test:
654: suffix: 2d_q2_q1_fetidp
655: requires: mumps
656: nsize: 5
657: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
658: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
659: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
660: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
661: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
662: test:
663: suffix: 3d_p2_p1_fetidp
664: requires: ctetgen mumps suitesparse
665: nsize: 5
666: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
667: -snes_error_if_not_converged \
668: -ksp_type fetidp -ksp_rtol 1.0e-9 \
669: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
670: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
671: -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
672: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
673: -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
674: -fetidp_bddelta_pc_factor_mat_ordering_type external \
675: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
676: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
677: test:
678: suffix: 3d_q2_q1_fetidp
679: requires: suitesparse
680: nsize: 5
681: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
682: -snes_error_if_not_converged \
683: -ksp_type fetidp -ksp_rtol 1.0e-8 \
684: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
685: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
686: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
687: -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
688: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
689: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
690: # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
691: test:
692: suffix: 2d_p2_p1_bddc
693: nsize: 2
694: requires: triangle !single
695: args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
696: -snes_error_if_not_converged \
697: -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
698: -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
699: # Vanka
700: test:
701: suffix: 2d_q1_p0_vanka
702: requires: double !complex
703: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
704: -snes_rtol 1.0e-4 \
705: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
706: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
707: -sub_ksp_type preonly -sub_pc_type lu
708: test:
709: suffix: 2d_q1_p0_vanka_denseinv
710: requires: double !complex
711: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
712: -snes_rtol 1.0e-4 \
713: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
714: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
715: -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
716: # Vanka smoother
717: test:
718: suffix: 2d_q1_p0_gmg_vanka
719: requires: double !complex
720: args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
721: -snes_rtol 1.0e-4 \
722: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
723: -pc_type mg \
724: -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
725: -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
726: -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
727: -mg_coarse_pc_type svd
729: TEST*/