Actual source code: ex62.c
1: static char help[] = "Stokes Problem discretized with finite elements,\n\
2: using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
4: /*
5: For the isoviscous Stokes problem, which we discretize using the finite
6: element method on an unstructured mesh, the weak form equations are
8: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9: < q, -\nabla\cdot u > = 0
11: Viewing:
13: To produce nice output, use
15: -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
17: You can get a LaTeX view of the mesh, with point numbering using
19: -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
21: The data layout can be viewed using
23: -dm_petscsection_view
25: Lots of information about the FEM assembly can be printed using
27: -dm_plex_print_fem 3
28: */
30: #include <petscdmplex.h>
31: #include <petscsnes.h>
32: #include <petscds.h>
33: #include <petscbag.h>
35: // TODO: Plot residual by fields after each smoother iterate
37: typedef enum {
38: SOL_QUADRATIC,
39: SOL_TRIG,
40: SOL_UNKNOWN
41: } SolType;
42: const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
44: typedef enum {
45: BC_ESSENTIAL,
46: BC_NITSCHE,
47: BC_UNKNOWN
48: } BCType;
49: const char *BCTypes[] = {"essential", "nitsche", "unknown", "BCType", "BC_", 0};
51: typedef struct {
52: PetscScalar mu; /* dynamic shear viscosity */
53: PetscScalar eta; /* Nitsche penalty parameter (dimensionless) */
54: } Parameter;
56: typedef struct {
57: PetscBag bag; /* Problem parameters */
58: SolType sol; /* MMS solution */
59: BCType bc; /* Boundary condition type */
60: } AppCtx;
62: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
63: {
64: const PetscReal mu = PetscRealPart(constants[0]);
65: const PetscInt Nc = uOff[1] - uOff[0];
66: PetscInt c, d;
68: for (c = 0; c < Nc; ++c) {
69: for (d = 0; d < dim; ++d) f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]);
70: f1[c * dim + c] -= u[uOff[1]];
71: }
72: }
74: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
75: {
76: PetscInt d;
77: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
78: }
80: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
81: {
82: PetscInt d;
83: for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
84: }
86: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
87: {
88: PetscInt d;
89: for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
90: }
92: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
93: {
94: const PetscReal mu = PetscRealPart(constants[0]);
95: const PetscInt Nc = uOff[1] - uOff[0];
96: PetscInt c, d;
98: for (c = 0; c < Nc; ++c) {
99: for (d = 0; d < dim; ++d) {
100: g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
101: g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
102: }
103: }
104: }
106: static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
107: {
108: const PetscReal mu = PetscRealPart(constants[0]);
110: g0[0] = 1.0 / mu;
111: }
113: /* Quadratic MMS Solution
114: 2D:
116: u = x^2 + y^2
117: v = 2 x^2 - 2xy
118: p = x + y - 1
119: f = <1 - 4 mu, 1 - 4 mu>
121: so that
123: e(u) = (grad u + grad u^T) = / 4x 4x \
124: \ 4x -4x /
125: div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
126: \nabla \cdot u = 2x - 2x = 0
128: 3D:
130: u = 2 x^2 + y^2 + z^2
131: v = 2 x^2 - 2xy
132: w = 2 x^2 - 2xz
133: p = x + y + z - 3/2
134: f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
136: so that
138: e(u) = (grad u + grad u^T) = / 8x 4x 4x \
139: | 4x -4x 0 |
140: \ 4x 0 -4x /
141: div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
142: \nabla \cdot u = 4x - 2x - 2x = 0
143: */
144: static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
145: {
146: PetscInt c;
148: u[0] = (dim - 1) * PetscSqr(x[0]);
149: for (c = 1; c < Nc; ++c) {
150: u[0] += PetscSqr(x[c]);
151: u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
152: }
153: return PETSC_SUCCESS;
154: }
156: static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
157: {
158: PetscInt d;
160: u[0] = -0.5 * dim;
161: for (d = 0; d < dim; ++d) u[0] += x[d];
162: return PETSC_SUCCESS;
163: }
165: static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
166: {
167: const PetscReal mu = PetscRealPart(constants[0]);
168: PetscInt d;
170: f0[0] = (dim - 1) * 4.0 * mu - 1.0;
171: for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
172: }
174: /* Trigonometric MMS Solution
175: 2D:
177: u = sin(pi x) + sin(pi y)
178: v = -pi cos(pi x) y
179: p = sin(2 pi x) + sin(2 pi y)
180: f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
182: so that
184: e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \
185: \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) /
186: div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
187: \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0
189: 3D:
191: u = 2 sin(pi x) + sin(pi y) + sin(pi z)
192: v = -pi cos(pi x) y
193: w = -pi cos(pi x) z
194: p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
195: f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
197: so that
199: e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \
200: | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 |
201: \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) /
202: div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
203: \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
204: */
205: static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
206: {
207: PetscInt c;
209: u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
210: for (c = 1; c < Nc; ++c) {
211: u[0] += PetscSinReal(PETSC_PI * x[c]);
212: u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
213: }
214: return PETSC_SUCCESS;
215: }
217: static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
218: {
219: PetscInt d;
221: for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
222: return PETSC_SUCCESS;
223: }
225: static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
226: {
227: const PetscReal mu = PetscRealPart(constants[0]);
228: PetscInt d;
230: f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
231: for (d = 1; d < dim; ++d) {
232: f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
233: f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
234: }
235: }
237: /* Inline helpers for computing exact velocity in void boundary kernels */
238: static inline void ExactVelocityQuadratic(PetscInt dim, const PetscReal x[], PetscScalar g[])
239: {
240: PetscInt c;
242: g[0] = (dim - 1) * x[0] * x[0];
243: for (c = 1; c < dim; ++c) {
244: g[0] += x[c] * x[c];
245: g[c] = 2.0 * x[0] * x[0] - 2.0 * x[0] * x[c];
246: }
247: }
249: static inline void ExactVelocityTrig(PetscInt dim, const PetscReal x[], PetscScalar g[])
250: {
251: PetscInt c;
253: g[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
254: for (c = 1; c < dim; ++c) {
255: g[0] += PetscSinReal(PETSC_PI * x[c]);
256: g[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
257: }
258: }
260: /* Nitsche boundary residual kernels for velocity (field 0)
261: f0_bd_u[c] = -mu * sum_d (u_x[c*dim+d] + u_x[d*dim+c]) * n[d] (consistency: stress flux from IBP)
262: + p * n[c] (pressure flux from IBP)
263: + penalty * (u[c] - g[c]) (penalty) */
264: static void f0_bd_nitsche_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
265: {
266: const PetscReal mu = PetscRealPart(constants[0]);
267: const PetscReal penalty = PetscRealPart(constants[1]);
268: PetscScalar g[3];
269: PetscInt c, d;
271: ExactVelocityQuadratic(dim, x, g);
272: for (c = 0; c < dim; ++c) {
273: f0[c] = penalty * (u[c] - g[c]) + u[uOff[1]] * n[c];
274: for (d = 0; d < dim; ++d) f0[c] -= mu * (u_x[c * dim + d] + u_x[d * dim + c]) * n[d];
275: }
276: }
278: static void f0_bd_nitsche_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
279: {
280: const PetscReal mu = PetscRealPart(constants[0]);
281: const PetscReal penalty = PetscRealPart(constants[1]);
282: PetscScalar g[3];
283: PetscInt c, d;
285: ExactVelocityTrig(dim, x, g);
286: for (c = 0; c < dim; ++c) {
287: f0[c] = penalty * (u[c] - g[c]) + u[uOff[1]] * n[c];
288: for (d = 0; d < dim; ++d) f0[c] -= mu * (u_x[c * dim + d] + u_x[d * dim + c]) * n[d];
289: }
290: }
292: /* f1_bd_u[c*dim+d] = -mu * (n[d]*(u[c]-g[c]) + n[c]*(u[d]-g[d])) (symmetry / adjoint consistency) */
293: static void f1_bd_nitsche_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
294: {
295: const PetscReal mu = PetscRealPart(constants[0]);
296: PetscScalar g[3];
297: PetscInt c, d;
299: ExactVelocityQuadratic(dim, x, g);
300: for (c = 0; c < dim; ++c)
301: for (d = 0; d < dim; ++d) f1[c * dim + d] = -mu * (n[d] * (u[c] - g[c]) + n[c] * (u[d] - g[d]));
302: }
304: static void f1_bd_nitsche_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
305: {
306: const PetscReal mu = PetscRealPart(constants[0]);
307: PetscScalar g[3];
308: PetscInt c, d;
310: ExactVelocityTrig(dim, x, g);
311: for (c = 0; c < dim; ++c)
312: for (d = 0; d < dim; ++d) f1[c * dim + d] = -mu * (n[d] * (u[c] - g[c]) + n[c] * (u[d] - g[d]));
313: }
315: /* Nitsche boundary residual kernels for pressure (field 1)
316: f0_bd_p = sum_d n[d] * (u[d] - g[d]) (continuity equation boundary correction) */
317: static void f0_bd_nitsche_quadratic_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
318: {
319: PetscScalar g[3];
320: PetscInt d;
322: ExactVelocityQuadratic(dim, x, g);
323: f0[0] = 0.0;
324: for (d = 0; d < dim; ++d) f0[0] += n[d] * (u[d] - g[d]);
325: }
327: static void f0_bd_nitsche_trig_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
328: {
329: PetscScalar g[3];
330: PetscInt d;
332: ExactVelocityTrig(dim, x, g);
333: f0[0] = 0.0;
334: for (d = 0; d < dim; ++d) f0[0] += n[d] * (u[d] - g[d]);
335: }
337: /* Nitsche boundary Jacobian kernels (solution-independent)
338: g0_bd_uu[c*Nc+d] = delta(c,d) * penalty (penalty Jacobian) */
339: static void g0_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
340: {
341: const PetscReal penalty = PetscRealPart(constants[1]);
342: const PetscInt Nc = uOff[1] - uOff[0];
343: PetscInt c;
345: for (c = 0; c < Nc; ++c) g0[c * Nc + c] = penalty;
346: }
348: /* g1_bd_uu[(c*Nc+d)*dim+e] = -mu * (delta(c,d)*n[e] + delta(c,e)*n[d]) (consistency Jacobian: df0/du_x) */
349: static void g1_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
350: {
351: const PetscReal mu = PetscRealPart(constants[0]);
352: const PetscInt Nc = uOff[1] - uOff[0];
353: PetscInt c, d, e;
355: for (c = 0; c < Nc; ++c)
356: for (d = 0; d < Nc; ++d)
357: for (e = 0; e < dim; ++e) g1[(c * Nc + d) * dim + e] = -mu * ((c == d ? 1.0 : 0.0) * n[e] + (c == e ? 1.0 : 0.0) * n[d]);
358: }
360: /* g2_bd_uu[(c*Nc+d)*dim+e] = -mu * (n[e]*delta(c,d) + n[c]*delta(e,d)) (symmetry Jacobian: df1/du) */
361: static void g2_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
362: {
363: const PetscReal mu = PetscRealPart(constants[0]);
364: const PetscInt Nc = uOff[1] - uOff[0];
365: PetscInt c, d, e;
367: for (c = 0; c < Nc; ++c)
368: for (d = 0; d < Nc; ++d)
369: for (e = 0; e < dim; ++e) g2[(c * Nc + d) * dim + e] = -mu * (n[e] * (c == d ? 1.0 : 0.0) + n[c] * (e == d ? 1.0 : 0.0));
370: }
372: /* g0_bd_up[c*1+0] = n[c] (velocity-pressure coupling: df0_u/dp) */
373: static void g0_bd_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
374: {
375: PetscInt c;
377: for (c = 0; c < dim; ++c) g0[c] = n[c];
378: }
380: /* g0_bd_pu[0*Nc+d] = n[d] (pressure-velocity coupling: df0_p/du) */
381: static void g0_bd_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
382: {
383: PetscInt d;
385: for (d = 0; d < dim; ++d) g0[d] = n[d];
386: }
388: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
389: {
390: PetscInt sol, bc;
392: PetscFunctionBeginUser;
393: options->sol = SOL_QUADRATIC;
394: options->bc = BC_ESSENTIAL;
395: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
396: sol = options->sol;
397: PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
398: options->sol = (SolType)sol;
399: bc = options->bc;
400: PetscCall(PetscOptionsEList("-bc", "The boundary condition type", "ex62.c", BCTypes, PETSC_STATIC_ARRAY_LENGTH(BCTypes) - 3, BCTypes[options->bc], &bc, NULL));
401: options->bc = (BCType)bc;
402: PetscOptionsEnd();
403: PetscFunctionReturn(PETSC_SUCCESS);
404: }
406: static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
407: {
408: PetscFunctionBeginUser;
409: PetscCall(DMCreate(comm, dm));
410: PetscCall(DMSetType(*dm, DMPLEX));
411: PetscCall(DMSetFromOptions(*dm));
412: PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
413: PetscFunctionReturn(PETSC_SUCCESS);
414: }
416: static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
417: {
418: Parameter *p;
420: PetscFunctionBeginUser;
421: /* setup PETSc parameter bag */
422: PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
423: PetscCall(PetscBagGetData(ctx->bag, &p));
424: PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
425: PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
426: PetscCall(PetscBagRegisterScalar(ctx->bag, &p->eta, 100.0, "eta", "Nitsche penalty parameter (dimensionless)"));
427: PetscCall(PetscBagSetFromOptions(ctx->bag));
428: {
429: PetscViewer viewer;
430: PetscViewerFormat format;
431: PetscBool flg;
433: PetscCall(PetscOptionsCreateViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
434: if (flg) {
435: PetscCall(PetscViewerPushFormat(viewer, format));
436: PetscCall(PetscBagView(ctx->bag, viewer));
437: PetscCall(PetscViewerFlush(viewer));
438: PetscCall(PetscViewerPopFormat(viewer));
439: PetscCall(PetscViewerDestroy(&viewer));
440: }
441: }
442: PetscFunctionReturn(PETSC_SUCCESS);
443: }
445: static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
446: {
447: PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
448: void (*f0_bd_u)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
449: void (*f1_bd_u)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
450: void (*f0_bd_p)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
451: PetscDS ds;
452: DMLabel label;
453: const PetscInt id = 1;
455: PetscFunctionBeginUser;
456: PetscCall(DMGetDS(dm, &ds));
457: switch (user->sol) {
458: case SOL_QUADRATIC:
459: PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
460: exactFuncs[0] = quadratic_u;
461: exactFuncs[1] = quadratic_p;
462: f0_bd_u = f0_bd_nitsche_quadratic_u;
463: f1_bd_u = f1_bd_nitsche_quadratic_u;
464: f0_bd_p = f0_bd_nitsche_quadratic_p;
465: break;
466: case SOL_TRIG:
467: PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
468: exactFuncs[0] = trig_u;
469: exactFuncs[1] = trig_p;
470: f0_bd_u = f0_bd_nitsche_trig_u;
471: f1_bd_u = f1_bd_nitsche_trig_u;
472: f0_bd_p = f0_bd_nitsche_trig_p;
473: break;
474: default:
475: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
476: }
477: PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
478: PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
479: PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
480: PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
481: PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
482: PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
484: PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
485: PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
487: PetscCall(DMGetLabel(dm, "marker", &label));
488: switch (user->bc) {
489: case BC_ESSENTIAL:
490: PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (PetscVoidFn *)exactFuncs[0], NULL, user, NULL));
491: break;
492: case BC_NITSCHE: {
493: PetscWeakForm wf;
494: DMLabel faceSetsLabel;
495: IS valueIS;
496: const PetscInt *faceSetValues;
497: PetscInt numValues, bd, i;
499: PetscCall(DMGetLabel(dm, "Face Sets", &faceSetsLabel));
500: PetscCall(DMLabelGetNumValues(faceSetsLabel, &numValues));
501: PetscCall(DMLabelGetValueIS(faceSetsLabel, &valueIS));
502: PetscCall(ISGetIndices(valueIS, &faceSetValues));
504: /* Velocity boundary: natural BC with Nitsche terms on all boundary faces */
505: PetscCall(DMAddBoundary(dm, DM_BC_NATURAL, "wall", faceSetsLabel, numValues, faceSetValues, 0, 0, NULL, NULL, NULL, user, &bd));
506: PetscCall(PetscDSGetBoundary(ds, bd, &wf, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL));
507: for (i = 0; i < numValues; ++i) {
508: /* Velocity residual (field 0): f0 and f1 */
509: PetscCall(PetscWeakFormSetIndexBdResidual(wf, faceSetsLabel, faceSetValues[i], 0, 0, 0, f0_bd_u, 0, f1_bd_u));
510: /* Velocity-velocity Jacobian (field 0, field 0): g0 (penalty), g1 (consistency), g2 (symmetry) */
511: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 0, 0, 0, 0, g0_bd_uu, 0, g1_bd_uu, 0, g2_bd_uu, 0, NULL));
512: /* Velocity-pressure Jacobian (field 0, field 1): g0 (pressure coupling) */
513: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 0, 1, 0, 0, g0_bd_up, 0, NULL, 0, NULL, 0, NULL));
514: }
516: /* Pressure boundary: natural BC for continuity equation correction */
517: PetscCall(DMAddBoundary(dm, DM_BC_NATURAL, "wall_pres", faceSetsLabel, numValues, faceSetValues, 1, 0, NULL, NULL, NULL, user, &bd));
518: PetscCall(PetscDSGetBoundary(ds, bd, &wf, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL));
519: for (i = 0; i < numValues; ++i) {
520: /* Pressure residual (field 1): f0 */
521: PetscCall(PetscWeakFormSetIndexBdResidual(wf, faceSetsLabel, faceSetValues[i], 1, 0, 0, f0_bd_p, 0, NULL));
522: /* Pressure-velocity Jacobian (field 1, field 0): g0 */
523: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 1, 0, 0, 0, g0_bd_pu, 0, NULL, 0, NULL, 0, NULL));
524: }
525: PetscCall(ISRestoreIndices(valueIS, &faceSetValues));
526: PetscCall(ISDestroy(&valueIS));
527: } break;
528: default:
529: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported BC type: %s (%d)", BCTypes[PetscMin(user->bc, BC_UNKNOWN)], user->bc);
530: }
532: /* Make constant values available to pointwise functions */
533: {
534: Parameter *param;
535: PetscScalar constants[2];
537: PetscCall(PetscBagGetData(user->bag, ¶m));
538: constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
539: constants[1] = 0.0; /* Nitsche penalty (set below if needed) */
540: if (user->bc == BC_NITSCHE) {
541: /* Compute cell size h from mesh */
542: PetscInt dim, cStart;
543: PetscReal vol, h;
545: PetscCall(DMGetDimension(dm, &dim));
546: PetscCall(DMPlexGetHeightStratum(dm, 0, &cStart, NULL));
547: PetscCall(DMPlexComputeCellGeometryFVM(dm, cStart, &vol, NULL, NULL));
548: h = PetscPowReal(vol, 1.0 / dim);
549: constants[1] = PetscRealPart(param->eta) * PetscRealPart(param->mu) / h;
550: }
551: PetscCall(PetscDSSetConstants(ds, 2, constants));
552: }
553: PetscFunctionReturn(PETSC_SUCCESS);
554: }
556: static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
557: {
558: PetscInt c;
559: for (c = 0; c < Nc; ++c) u[c] = 0.0;
560: return PETSC_SUCCESS;
561: }
562: static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
563: {
564: PetscInt c;
565: for (c = 0; c < Nc; ++c) u[c] = 1.0;
566: return PETSC_SUCCESS;
567: }
569: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
570: {
571: Vec vec;
572: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, PetscCtx ctx) = {zero, one};
574: PetscFunctionBeginUser;
575: PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
576: funcs[field] = one;
577: {
578: PetscDS ds;
579: PetscCall(DMGetDS(dm, &ds));
580: PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
581: }
582: PetscCall(DMCreateGlobalVector(dm, &vec));
583: PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
584: PetscCall(VecNormalize(vec, NULL));
585: PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
586: PetscCall(VecDestroy(&vec));
587: /* New style for field null spaces */
588: {
589: PetscObject pressure;
590: MatNullSpace nullspacePres;
592: PetscCall(DMGetField(dm, field, NULL, &pressure));
593: PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
594: PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
595: PetscCall(MatNullSpaceDestroy(&nullspacePres));
596: }
597: PetscFunctionReturn(PETSC_SUCCESS);
598: }
600: static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
601: {
602: DM cdm = dm;
603: PetscQuadrature q = NULL;
604: PetscBool simplex;
605: PetscInt dim, Nf = 2, f, Nc[2];
606: const char *name[2] = {"velocity", "pressure"};
607: const char *prefix[2] = {"vel_", "pres_"};
609: PetscFunctionBegin;
610: PetscCall(DMGetDimension(dm, &dim));
611: PetscCall(DMPlexIsSimplex(dm, &simplex));
612: Nc[0] = dim;
613: Nc[1] = 1;
614: for (f = 0; f < Nf; ++f) {
615: PetscFE fe;
617: PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
618: PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
619: if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
620: PetscCall(PetscFESetQuadrature(fe, q));
621: PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
622: PetscCall(PetscFEDestroy(&fe));
623: }
624: PetscCall(DMCreateDS(dm));
625: PetscCall((*setupEqn)(dm, user));
626: while (cdm) {
627: PetscCall(DMCopyDisc(dm, cdm));
628: PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
629: PetscCall(DMGetCoarseDM(cdm, &cdm));
630: }
631: PetscFunctionReturn(PETSC_SUCCESS);
632: }
634: int main(int argc, char **argv)
635: {
636: SNES snes;
637: DM dm;
638: Vec u;
639: AppCtx user;
641: PetscFunctionBeginUser;
642: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
643: PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
644: PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
645: PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
646: PetscCall(SNESSetDM(snes, dm));
647: PetscCall(DMSetApplicationContext(dm, &user));
649: PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
650: PetscCall(SetupProblem(dm, SetupEqn, &user));
651: PetscCall(DMPlexCreateClosureIndex(dm, NULL));
653: PetscCall(DMCreateGlobalVector(dm, &u));
654: PetscCall(DMPlexSetSNESLocalFEM(dm, PETSC_FALSE, &user));
655: PetscCall(SNESSetFromOptions(snes));
656: PetscCall(DMSNESCheckFromOptions(snes, u));
657: PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
658: {
659: Mat J;
660: MatNullSpace sp;
662: PetscCall(SNESSetUp(snes));
663: PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
664: PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
665: PetscCall(MatSetNullSpace(J, sp));
666: PetscCall(MatNullSpaceDestroy(&sp));
667: PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
668: PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
669: }
670: PetscCall(SNESSolve(snes, NULL, u));
672: PetscCall(VecDestroy(&u));
673: PetscCall(SNESDestroy(&snes));
674: PetscCall(DMDestroy(&dm));
675: PetscCall(PetscBagDestroy(&user.bag));
676: PetscCall(PetscFinalize());
677: return 0;
678: }
679: /*TEST
681: test:
682: suffix: 2d_p2_p1_check
683: requires: triangle
684: args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
686: test:
687: suffix: 2d_p2_p1_check_parallel
688: nsize: {{2 3 5}}
689: requires: triangle
690: args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
692: test:
693: suffix: 3d_p2_p1_check
694: requires: ctetgen
695: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
697: test:
698: suffix: 3d_p2_p1_check_parallel
699: nsize: {{2 3 5}}
700: requires: ctetgen
701: args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
703: test:
704: suffix: 2d_p2_p1_conv
705: requires: triangle
706: # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
707: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
708: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
709: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
710: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
712: test:
713: suffix: 2d_p2_p1_conv_gamg
714: requires: triangle
715: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 \
716: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
717: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
719: test:
720: suffix: 3d_p2_p1_conv
721: requires: ctetgen !single
722: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
723: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
724: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
725: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
726: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
728: test:
729: suffix: 2d_q2_q1_check
730: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
732: test:
733: suffix: 3d_q2_q1_check
734: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
736: test:
737: suffix: 2d_q2_q1_conv
738: # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
739: args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
740: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
741: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
742: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
744: test:
745: suffix: 3d_q2_q1_conv
746: requires: !single
747: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
748: args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
749: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
750: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
751: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
753: test:
754: suffix: 2d_p3_p2_check
755: requires: triangle
756: args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
758: test:
759: suffix: 3d_p3_p2_check
760: requires: ctetgen !single
761: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
763: test:
764: suffix: 2d_p3_p2_conv
765: requires: triangle
766: # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
767: args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
768: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
769: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
770: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
772: test:
773: suffix: 3d_p3_p2_conv
774: requires: ctetgen long_runtime
775: # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
776: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
777: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
778: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
779: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
781: test:
782: suffix: 2d_q1_p0_conv
783: requires: !single
784: # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
785: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
786: -ksp_atol 1e-10 -petscds_jac_pre 0 \
787: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
788: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
790: test:
791: suffix: 3d_q1_p0_conv
792: requires: !single
793: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
794: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
795: -ksp_atol 1e-10 -petscds_jac_pre 0 \
796: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
797: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
799: # Stokes preconditioners
800: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
801: test:
802: suffix: 2d_p2_p1_block_diagonal
803: requires: triangle
804: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
805: -snes_error_if_not_converged \
806: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
807: -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
808: output_file: output/empty.out
809: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
810: test:
811: suffix: 2d_p2_p1_block_triangular
812: requires: triangle
813: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
814: -snes_error_if_not_converged \
815: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
816: -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
817: output_file: output/empty.out
818: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
819: test:
820: suffix: 2d_p2_p1_schur_diagonal
821: requires: triangle
822: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
823: -snes_error_if_not_converged \
824: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
825: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
826: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
827: output_file: output/empty.out
828: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
829: test:
830: suffix: 2d_p2_p1_schur_upper
831: requires: triangle
832: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
833: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
834: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
835: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
836: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
837: test:
838: suffix: 2d_p2_p1_schur_lower
839: requires: triangle
840: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
841: -snes_error_if_not_converged \
842: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
843: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
844: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
845: output_file: output/empty.out
846: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
847: test:
848: suffix: 2d_p2_p1_schur_full
849: requires: triangle
850: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
851: -snes_error_if_not_converged \
852: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
853: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
854: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
855: output_file: output/empty.out
856: # Full Schur + Velocity GMG
857: test:
858: suffix: 2d_p2_p1_gmg_vcycle
859: TODO: broken (requires subDMs hooks)
860: requires: triangle
861: args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
862: -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
863: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
864: -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
865: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
866: test:
867: suffix: 2d_p2_p1_simple
868: requires: triangle
869: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
870: -snes_error_if_not_converged \
871: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
872: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
873: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
874: -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
875: output_file: output/empty.out
876: # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
877: test:
878: suffix: 2d_p2_p1_fetidp
879: requires: triangle mumps
880: nsize: 5
881: args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
882: -snes_error_if_not_converged \
883: -ksp_type fetidp -ksp_rtol 1.0e-8 \
884: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
885: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
886: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
887: output_file: output/empty.out
888: test:
889: suffix: 2d_q2_q1_fetidp
890: requires: mumps
891: nsize: 5
892: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
893: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
894: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
895: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
896: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
897: output_file: output/empty.out
898: test:
899: suffix: 3d_p2_p1_fetidp
900: requires: ctetgen mumps suitesparse
901: nsize: 5
902: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
903: -snes_error_if_not_converged \
904: -ksp_type fetidp -ksp_rtol 1.0e-9 \
905: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
906: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
907: -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
908: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
909: -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
910: -fetidp_bddelta_pc_factor_mat_ordering_type external \
911: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
912: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
913: output_file: output/empty.out
914: test:
915: suffix: 3d_q2_q1_fetidp
916: requires: suitesparse
917: nsize: 5
918: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
919: -snes_error_if_not_converged \
920: -ksp_type fetidp -ksp_rtol 1.0e-8 \
921: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
922: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
923: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
924: -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
925: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
926: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
927: output_file: output/empty.out
928: # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
929: test:
930: suffix: 2d_p2_p1_bddc
931: nsize: 2
932: requires: triangle !single
933: args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
934: -snes_error_if_not_converged \
935: -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
936: -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
937: output_file: output/empty.out
938: # Vanka
939: test:
940: suffix: 2d_q1_p0_vanka
941: output_file: output/empty.out
942: requires: double !complex
943: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
944: -snes_rtol 1.0e-4 \
945: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
946: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
947: -sub_ksp_type preonly -sub_pc_type lu
948: test:
949: suffix: 2d_q1_p0_vanka_denseinv
950: output_file: output/empty.out
951: requires: double !complex
952: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
953: -snes_rtol 1.0e-4 \
954: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
955: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
956: -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
957: # Vanka smoother
958: test:
959: suffix: 2d_q1_p0_gmg_vanka
960: output_file: output/empty.out
961: requires: double !complex
962: args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
963: -snes_rtol 1.0e-4 \
964: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
965: -pc_type mg \
966: -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
967: -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
968: -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
969: -mg_coarse_pc_type svd
970: # Nitsche BC consistency check
971: test:
972: suffix: 2d_q2_q1_nitsche_check
973: requires: double !complex
974: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 1 \
975: -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
976: # Nitsche BC + Vanka
977: test:
978: suffix: 2d_q1_p0_nitsche_vanka
979: output_file: output/empty.out
980: requires: double !complex
981: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
982: -snes_rtol 1.0e-4 \
983: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
984: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
985: -sub_ksp_type preonly -sub_pc_type lu
986: # Nitsche BC + GMRES (sanity check that Nitsche formulation solves correctly)
987: test:
988: suffix: 2d_q2_q1_nitsche_gmres
989: output_file: output/empty.out
990: requires: double !complex
991: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
992: -snes_error_if_not_converged \
993: -ksp_type gmres -ksp_rtol 1e-12 -pc_type jacobi
995: TEST*/