Actual source code: ex62.c
1: static char help[] = "Stokes Problem discretized with finite elements,\n\
2: using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
4: /*
5: For the isoviscous Stokes problem, which we discretize using the finite
6: element method on an unstructured mesh, the weak form equations are
8: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9: < q, -\nabla\cdot u > = 0
11: Viewing:
13: To produce nice output, use
15: -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
17: You can get a LaTeX view of the mesh, with point numbering using
19: -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
21: The data layout can be viewed using
23: -dm_petscsection_view
25: Lots of information about the FEM assembly can be printed using
27: -dm_plex_print_fem 3
28: */
30: #include <petscdmplex.h>
31: #include <petscsnes.h>
32: #include <petscds.h>
33: #include <petscbag.h>
35: // TODO: Plot residual by fields after each smoother iterate
37: typedef enum {
38: SOL_QUADRATIC,
39: SOL_TRIG,
40: SOL_UNKNOWN
41: } SolType;
42: const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
44: typedef enum {
45: BC_ESSENTIAL,
46: BC_NITSCHE,
47: BC_UNKNOWN
48: } BCType;
49: const char *BCTypes[] = {"essential", "nitsche", "unknown", "BCType", "BC_", 0};
51: typedef struct {
52: PetscScalar mu; /* dynamic shear viscosity */
53: PetscScalar eta; /* Nitsche penalty parameter (dimensionless) */
54: } Parameter;
56: typedef struct {
57: PetscBag bag; /* Problem parameters */
58: SolType sol; /* MMS solution */
59: BCType bc; /* Boundary condition type */
60: } AppCtx;
62: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
63: {
64: const PetscReal mu = PetscRealPart(constants[0]);
65: const PetscInt Nc = uOff[1] - uOff[0];
66: PetscInt c, d;
68: for (c = 0; c < Nc; ++c) {
69: for (d = 0; d < dim; ++d) f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]);
70: f1[c * dim + c] -= u[uOff[1]];
71: }
72: }
74: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
75: {
76: PetscInt d;
77: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
78: }
80: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
81: {
82: PetscInt d;
83: for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
84: }
86: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
87: {
88: PetscInt d;
89: for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
90: }
92: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
93: {
94: const PetscReal mu = PetscRealPart(constants[0]);
95: const PetscInt Nc = uOff[1] - uOff[0];
96: PetscInt c, d;
98: for (c = 0; c < Nc; ++c) {
99: for (d = 0; d < dim; ++d) {
100: g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
101: g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
102: }
103: }
104: }
106: static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
107: {
108: const PetscReal mu = PetscRealPart(constants[0]);
110: g0[0] = 1.0 / mu;
111: }
113: /* Quadratic MMS Solution
114: 2D:
116: u = x^2 + y^2
117: v = 2 x^2 - 2xy
118: p = x + y - 1
119: f = <1 - 4 mu, 1 - 4 mu>
121: so that
123: e(u) = (grad u + grad u^T) = / 4x 4x \
124: \ 4x -4x /
125: div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
126: \nabla \cdot u = 2x - 2x = 0
128: 3D:
130: u = 2 x^2 + y^2 + z^2
131: v = 2 x^2 - 2xy
132: w = 2 x^2 - 2xz
133: p = x + y + z - 3/2
134: f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
136: so that
138: e(u) = (grad u + grad u^T) = / 8x 4x 4x \
139: | 4x -4x 0 |
140: \ 4x 0 -4x /
141: div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
142: \nabla \cdot u = 4x - 2x - 2x = 0
143: */
144: static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
145: {
146: PetscInt c;
148: u[0] = (dim - 1) * PetscSqr(x[0]);
149: for (c = 1; c < Nc; ++c) {
150: u[0] += PetscSqr(x[c]);
151: u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
152: }
153: return PETSC_SUCCESS;
154: }
156: static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
157: {
158: PetscInt d;
160: u[0] = -0.5 * dim;
161: for (d = 0; d < dim; ++d) u[0] += x[d];
162: return PETSC_SUCCESS;
163: }
165: static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
166: {
167: const PetscReal mu = PetscRealPart(constants[0]);
168: PetscInt d;
170: f0[0] = (dim - 1) * 4.0 * mu - 1.0;
171: for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
172: }
174: /* Trigonometric MMS Solution
175: 2D:
177: u = sin(pi x) + sin(pi y)
178: v = -pi cos(pi x) y
179: p = sin(2 pi x) + sin(2 pi y)
180: f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
182: so that
184: e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \
185: \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) /
186: div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
187: \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0
189: 3D:
191: u = 2 sin(pi x) + sin(pi y) + sin(pi z)
192: v = -pi cos(pi x) y
193: w = -pi cos(pi x) z
194: p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
195: f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
197: so that
199: e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \
200: | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 |
201: \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) /
202: div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
203: \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
204: */
205: static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
206: {
207: PetscInt c;
209: u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
210: for (c = 1; c < Nc; ++c) {
211: u[0] += PetscSinReal(PETSC_PI * x[c]);
212: u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
213: }
214: return PETSC_SUCCESS;
215: }
217: static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
218: {
219: PetscInt d;
221: for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
222: return PETSC_SUCCESS;
223: }
225: static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
226: {
227: const PetscReal mu = PetscRealPart(constants[0]);
228: PetscInt d;
230: f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
231: for (d = 1; d < dim; ++d) {
232: f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
233: f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
234: }
235: }
237: /* Inline helpers for computing exact velocity in void boundary kernels */
238: static inline void ExactVelocityQuadratic(PetscInt dim, const PetscReal x[], PetscScalar g[])
239: {
240: PetscInt c;
242: g[0] = (dim - 1) * x[0] * x[0];
243: for (c = 1; c < dim; ++c) {
244: g[0] += x[c] * x[c];
245: g[c] = 2.0 * x[0] * x[0] - 2.0 * x[0] * x[c];
246: }
247: }
249: static inline void ExactVelocityTrig(PetscInt dim, const PetscReal x[], PetscScalar g[])
250: {
251: PetscInt c;
253: g[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
254: for (c = 1; c < dim; ++c) {
255: g[0] += PetscSinReal(PETSC_PI * x[c]);
256: g[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
257: }
258: }
260: /* Nitsche boundary residual kernels for velocity (field 0)
261: f0_bd_u[c] = -mu * sum_d (u_x[c*dim+d] + u_x[d*dim+c]) * n[d] (consistency: stress flux from IBP)
262: + p * n[c] (pressure flux from IBP)
263: + penalty * (u[c] - g[c]) (penalty) */
264: static void f0_bd_nitsche_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
265: {
266: const PetscReal mu = PetscRealPart(constants[0]);
267: const PetscReal penalty = PetscRealPart(constants[1]);
268: PetscScalar g[3];
269: PetscInt c, d;
271: ExactVelocityQuadratic(dim, x, g);
272: for (c = 0; c < dim; ++c) {
273: f0[c] = penalty * (u[c] - g[c]) + u[uOff[1]] * n[c];
274: for (d = 0; d < dim; ++d) f0[c] -= mu * (u_x[c * dim + d] + u_x[d * dim + c]) * n[d];
275: }
276: }
278: static void f0_bd_nitsche_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
279: {
280: const PetscReal mu = PetscRealPart(constants[0]);
281: const PetscReal penalty = PetscRealPart(constants[1]);
282: PetscScalar g[3];
283: PetscInt c, d;
285: ExactVelocityTrig(dim, x, g);
286: for (c = 0; c < dim; ++c) {
287: f0[c] = penalty * (u[c] - g[c]) + u[uOff[1]] * n[c];
288: for (d = 0; d < dim; ++d) f0[c] -= mu * (u_x[c * dim + d] + u_x[d * dim + c]) * n[d];
289: }
290: }
292: /* f1_bd_u[c*dim+d] = -mu * (n[d]*(u[c]-g[c]) + n[c]*(u[d]-g[d])) (symmetry / adjoint consistency) */
293: static void f1_bd_nitsche_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
294: {
295: const PetscReal mu = PetscRealPart(constants[0]);
296: PetscScalar g[3];
297: PetscInt c, d;
299: ExactVelocityQuadratic(dim, x, g);
300: for (c = 0; c < dim; ++c)
301: for (d = 0; d < dim; ++d) f1[c * dim + d] = -mu * (n[d] * (u[c] - g[c]) + n[c] * (u[d] - g[d]));
302: }
304: static void f1_bd_nitsche_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
305: {
306: const PetscReal mu = PetscRealPart(constants[0]);
307: PetscScalar g[3];
308: PetscInt c, d;
310: ExactVelocityTrig(dim, x, g);
311: for (c = 0; c < dim; ++c)
312: for (d = 0; d < dim; ++d) f1[c * dim + d] = -mu * (n[d] * (u[c] - g[c]) + n[c] * (u[d] - g[d]));
313: }
315: /* Nitsche boundary residual kernels for pressure (field 1)
316: f0_bd_p = sum_d n[d] * (u[d] - g[d]) (continuity equation boundary correction) */
317: static void f0_bd_nitsche_quadratic_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
318: {
319: PetscScalar g[3];
320: PetscInt d;
322: ExactVelocityQuadratic(dim, x, g);
323: f0[0] = 0.0;
324: for (d = 0; d < dim; ++d) f0[0] += n[d] * (u[d] - g[d]);
325: }
327: static void f0_bd_nitsche_trig_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
328: {
329: PetscScalar g[3];
330: PetscInt d;
332: ExactVelocityTrig(dim, x, g);
333: f0[0] = 0.0;
334: for (d = 0; d < dim; ++d) f0[0] += n[d] * (u[d] - g[d]);
335: }
337: /* Nitsche boundary Jacobian kernels (solution-independent)
338: g0_bd_uu[c*Nc+d] = delta(c,d) * penalty (penalty Jacobian) */
339: static void g0_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
340: {
341: const PetscReal penalty = PetscRealPart(constants[1]);
342: const PetscInt Nc = uOff[1] - uOff[0];
343: PetscInt c;
345: for (c = 0; c < Nc; ++c) g0[c * Nc + c] = penalty;
346: }
348: /* g1_bd_uu[(c*Nc+d)*dim+e] = -mu * (delta(c,d)*n[e] + delta(c,e)*n[d]) (consistency Jacobian: df0/du_x) */
349: static void g1_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
350: {
351: const PetscReal mu = PetscRealPart(constants[0]);
352: const PetscInt Nc = uOff[1] - uOff[0];
353: PetscInt c, d, e;
355: for (c = 0; c < Nc; ++c)
356: for (d = 0; d < Nc; ++d)
357: for (e = 0; e < dim; ++e) g1[(c * Nc + d) * dim + e] = -mu * ((c == d ? 1.0 : 0.0) * n[e] + (c == e ? 1.0 : 0.0) * n[d]);
358: }
360: /* g2_bd_uu[(c*Nc+d)*dim+e] = -mu * (n[e]*delta(c,d) + n[c]*delta(e,d)) (symmetry Jacobian: df1/du) */
361: static void g2_bd_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
362: {
363: const PetscReal mu = PetscRealPart(constants[0]);
364: const PetscInt Nc = uOff[1] - uOff[0];
365: PetscInt c, d, e;
367: for (c = 0; c < Nc; ++c)
368: for (d = 0; d < Nc; ++d)
369: for (e = 0; e < dim; ++e) g2[(c * Nc + d) * dim + e] = -mu * (n[e] * (c == d ? 1.0 : 0.0) + n[c] * (e == d ? 1.0 : 0.0));
370: }
372: /* g0_bd_up[c*1+0] = n[c] (velocity-pressure coupling: df0_u/dp) */
373: static void g0_bd_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
374: {
375: for (PetscInt c = 0; c < dim; ++c) g0[c] = n[c];
376: }
378: /* g0_bd_pu[0*Nc+d] = n[d] (pressure-velocity coupling: df0_p/du) */
379: static void g0_bd_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], const PetscReal n[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
380: {
381: for (PetscInt d = 0; d < dim; ++d) g0[d] = n[d];
382: }
384: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
385: {
386: PetscInt sol, bc;
388: PetscFunctionBeginUser;
389: options->sol = SOL_QUADRATIC;
390: options->bc = BC_ESSENTIAL;
391: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
392: sol = options->sol;
393: PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
394: options->sol = (SolType)sol;
395: bc = options->bc;
396: PetscCall(PetscOptionsEList("-bc", "The boundary condition type", "ex62.c", BCTypes, PETSC_STATIC_ARRAY_LENGTH(BCTypes) - 3, BCTypes[options->bc], &bc, NULL));
397: options->bc = (BCType)bc;
398: PetscOptionsEnd();
399: PetscFunctionReturn(PETSC_SUCCESS);
400: }
402: static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
403: {
404: PetscFunctionBeginUser;
405: PetscCall(DMCreate(comm, dm));
406: PetscCall(DMSetType(*dm, DMPLEX));
407: PetscCall(DMSetFromOptions(*dm));
408: PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
409: PetscFunctionReturn(PETSC_SUCCESS);
410: }
412: static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
413: {
414: Parameter *p;
416: PetscFunctionBeginUser;
417: /* setup PETSc parameter bag */
418: PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
419: PetscCall(PetscBagGetData(ctx->bag, &p));
420: PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
421: PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
422: PetscCall(PetscBagRegisterScalar(ctx->bag, &p->eta, 100.0, "eta", "Nitsche penalty parameter (dimensionless)"));
423: PetscCall(PetscBagSetFromOptions(ctx->bag));
424: {
425: PetscViewer viewer;
426: PetscViewerFormat format;
427: PetscBool flg;
429: PetscCall(PetscOptionsCreateViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
430: if (flg) {
431: PetscCall(PetscViewerPushFormat(viewer, format));
432: PetscCall(PetscBagView(ctx->bag, viewer));
433: PetscCall(PetscViewerFlush(viewer));
434: PetscCall(PetscViewerPopFormat(viewer));
435: PetscCall(PetscViewerDestroy(&viewer));
436: }
437: }
438: PetscFunctionReturn(PETSC_SUCCESS);
439: }
441: static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
442: {
443: PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
444: void (*f0_bd_u)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
445: void (*f1_bd_u)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
446: void (*f0_bd_p)(PetscInt, PetscInt, PetscInt, const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], const PetscInt[], const PetscInt[], const PetscScalar[], const PetscScalar[], const PetscScalar[], PetscReal, const PetscReal[], const PetscReal[], PetscInt, const PetscScalar[], PetscScalar[]);
447: PetscDS ds;
448: DMLabel label;
449: const PetscInt id = 1;
451: PetscFunctionBeginUser;
452: PetscCall(DMGetDS(dm, &ds));
453: switch (user->sol) {
454: case SOL_QUADRATIC:
455: PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
456: exactFuncs[0] = quadratic_u;
457: exactFuncs[1] = quadratic_p;
458: f0_bd_u = f0_bd_nitsche_quadratic_u;
459: f1_bd_u = f1_bd_nitsche_quadratic_u;
460: f0_bd_p = f0_bd_nitsche_quadratic_p;
461: break;
462: case SOL_TRIG:
463: PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
464: exactFuncs[0] = trig_u;
465: exactFuncs[1] = trig_p;
466: f0_bd_u = f0_bd_nitsche_trig_u;
467: f1_bd_u = f1_bd_nitsche_trig_u;
468: f0_bd_p = f0_bd_nitsche_trig_p;
469: break;
470: default:
471: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
472: }
473: PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
474: PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
475: PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
476: PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
477: PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
478: PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
480: PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
481: PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
483: PetscCall(DMGetLabel(dm, "marker", &label));
484: switch (user->bc) {
485: case BC_ESSENTIAL:
486: PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (PetscVoidFn *)exactFuncs[0], NULL, user, NULL));
487: break;
488: case BC_NITSCHE: {
489: PetscWeakForm wf;
490: DMLabel faceSetsLabel;
491: IS valueIS;
492: const PetscInt *faceSetValues;
493: PetscInt numValues, bd, i;
495: PetscCall(DMGetLabel(dm, "Face Sets", &faceSetsLabel));
496: PetscCall(DMLabelGetNumValues(faceSetsLabel, &numValues));
497: PetscCall(DMLabelGetValueIS(faceSetsLabel, &valueIS));
498: PetscCall(ISGetIndices(valueIS, &faceSetValues));
500: /* Velocity boundary: natural BC with Nitsche terms on all boundary faces */
501: PetscCall(DMAddBoundary(dm, DM_BC_NATURAL, "wall", faceSetsLabel, numValues, faceSetValues, 0, 0, NULL, NULL, NULL, user, &bd));
502: PetscCall(PetscDSGetBoundary(ds, bd, &wf, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL));
503: for (i = 0; i < numValues; ++i) {
504: /* Velocity residual (field 0): f0 and f1 */
505: PetscCall(PetscWeakFormSetIndexBdResidual(wf, faceSetsLabel, faceSetValues[i], 0, 0, 0, f0_bd_u, 0, f1_bd_u));
506: /* Velocity-velocity Jacobian (field 0, field 0): g0 (penalty), g1 (consistency), g2 (symmetry) */
507: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 0, 0, 0, 0, g0_bd_uu, 0, g1_bd_uu, 0, g2_bd_uu, 0, NULL));
508: /* Velocity-pressure Jacobian (field 0, field 1): g0 (pressure coupling) */
509: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 0, 1, 0, 0, g0_bd_up, 0, NULL, 0, NULL, 0, NULL));
510: }
512: /* Pressure boundary: natural BC for continuity equation correction */
513: PetscCall(DMAddBoundary(dm, DM_BC_NATURAL, "wall_pres", faceSetsLabel, numValues, faceSetValues, 1, 0, NULL, NULL, NULL, user, &bd));
514: PetscCall(PetscDSGetBoundary(ds, bd, &wf, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL));
515: for (i = 0; i < numValues; ++i) {
516: /* Pressure residual (field 1): f0 */
517: PetscCall(PetscWeakFormSetIndexBdResidual(wf, faceSetsLabel, faceSetValues[i], 1, 0, 0, f0_bd_p, 0, NULL));
518: /* Pressure-velocity Jacobian (field 1, field 0): g0 */
519: PetscCall(PetscWeakFormSetIndexBdJacobian(wf, faceSetsLabel, faceSetValues[i], 1, 0, 0, 0, g0_bd_pu, 0, NULL, 0, NULL, 0, NULL));
520: }
521: PetscCall(ISRestoreIndices(valueIS, &faceSetValues));
522: PetscCall(ISDestroy(&valueIS));
523: } break;
524: default:
525: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported BC type: %s (%d)", BCTypes[PetscMin(user->bc, BC_UNKNOWN)], user->bc);
526: }
528: /* Make constant values available to pointwise functions */
529: {
530: Parameter *param;
531: PetscScalar constants[2];
533: PetscCall(PetscBagGetData(user->bag, ¶m));
534: constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
535: constants[1] = 0.0; /* Nitsche penalty (set below if needed) */
536: if (user->bc == BC_NITSCHE) {
537: /* Compute cell size h from mesh */
538: PetscInt dim, cStart;
539: PetscReal vol, h;
541: PetscCall(DMGetDimension(dm, &dim));
542: PetscCall(DMPlexGetHeightStratum(dm, 0, &cStart, NULL));
543: PetscCall(DMPlexComputeCellGeometryFVM(dm, cStart, &vol, NULL, NULL));
544: h = PetscPowReal(vol, 1.0 / dim);
545: constants[1] = PetscRealPart(param->eta) * PetscRealPart(param->mu) / h;
546: }
547: PetscCall(PetscDSSetConstants(ds, 2, constants));
548: }
549: PetscFunctionReturn(PETSC_SUCCESS);
550: }
552: static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
553: {
554: for (PetscInt c = 0; c < Nc; ++c) u[c] = 0.0;
555: return PETSC_SUCCESS;
556: }
557: static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
558: {
559: for (PetscInt c = 0; c < Nc; ++c) u[c] = 1.0;
560: return PETSC_SUCCESS;
561: }
563: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
564: {
565: Vec vec;
566: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, PetscCtx ctx) = {zero, one};
568: PetscFunctionBeginUser;
569: PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
570: funcs[field] = one;
571: {
572: PetscDS ds;
573: PetscCall(DMGetDS(dm, &ds));
574: PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
575: }
576: PetscCall(DMCreateGlobalVector(dm, &vec));
577: PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
578: PetscCall(VecNormalize(vec, NULL));
579: PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
580: PetscCall(VecDestroy(&vec));
581: /* New style for field null spaces */
582: {
583: PetscObject pressure;
584: MatNullSpace nullspacePres;
586: PetscCall(DMGetField(dm, field, NULL, &pressure));
587: PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
588: PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
589: PetscCall(MatNullSpaceDestroy(&nullspacePres));
590: }
591: PetscFunctionReturn(PETSC_SUCCESS);
592: }
594: static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
595: {
596: DM cdm = dm;
597: PetscQuadrature q = NULL;
598: PetscBool simplex;
599: PetscInt dim, Nf = 2, f, Nc[2];
600: const char *name[2] = {"velocity", "pressure"};
601: const char *prefix[2] = {"vel_", "pres_"};
603: PetscFunctionBegin;
604: PetscCall(DMGetDimension(dm, &dim));
605: PetscCall(DMPlexIsSimplex(dm, &simplex));
606: Nc[0] = dim;
607: Nc[1] = 1;
608: for (f = 0; f < Nf; ++f) {
609: PetscFE fe;
611: PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
612: PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
613: if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
614: PetscCall(PetscFESetQuadrature(fe, q));
615: PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
616: PetscCall(PetscFEDestroy(&fe));
617: }
618: PetscCall(DMCreateDS(dm));
619: PetscCall((*setupEqn)(dm, user));
620: while (cdm) {
621: PetscCall(DMCopyDisc(dm, cdm));
622: PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
623: PetscCall(DMGetCoarseDM(cdm, &cdm));
624: }
625: PetscFunctionReturn(PETSC_SUCCESS);
626: }
628: int main(int argc, char **argv)
629: {
630: SNES snes;
631: DM dm;
632: Vec u;
633: AppCtx user;
635: PetscFunctionBeginUser;
636: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
637: PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
638: PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
639: PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
640: PetscCall(SNESSetDM(snes, dm));
641: PetscCall(DMSetApplicationContext(dm, &user));
643: PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
644: PetscCall(SetupProblem(dm, SetupEqn, &user));
645: PetscCall(DMPlexCreateClosureIndex(dm, NULL));
647: PetscCall(DMCreateGlobalVector(dm, &u));
648: PetscCall(DMPlexSetSNESLocalFEM(dm, PETSC_FALSE, &user));
649: PetscCall(SNESSetFromOptions(snes));
650: PetscCall(DMSNESCheckFromOptions(snes, u));
651: PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
652: {
653: Mat J;
654: MatNullSpace sp;
656: PetscCall(SNESSetUp(snes));
657: PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
658: PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
659: PetscCall(MatSetNullSpace(J, sp));
660: PetscCall(MatNullSpaceDestroy(&sp));
661: PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
662: PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
663: }
664: PetscCall(SNESSolve(snes, NULL, u));
666: PetscCall(VecDestroy(&u));
667: PetscCall(SNESDestroy(&snes));
668: PetscCall(DMDestroy(&dm));
669: PetscCall(PetscBagDestroy(&user.bag));
670: PetscCall(PetscFinalize());
671: return 0;
672: }
673: /*TEST
675: test:
676: suffix: 2d_p2_p1_check
677: requires: triangle
678: args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
680: test:
681: suffix: 2d_p2_p1_check_parallel
682: nsize: {{2 3 5}}
683: requires: triangle
684: args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
686: test:
687: suffix: 3d_p2_p1_check
688: requires: ctetgen
689: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
691: test:
692: suffix: 3d_p2_p1_check_parallel
693: nsize: {{2 3 5}}
694: requires: ctetgen
695: args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
697: test:
698: suffix: 2d_p2_p1_conv
699: requires: triangle
700: # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
701: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
702: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
703: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
704: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
706: test:
707: suffix: 2d_p2_p1_conv_gamg
708: requires: triangle
709: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 \
710: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
711: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
713: test:
714: suffix: 3d_p2_p1_conv
715: requires: ctetgen !single
716: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
717: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
718: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
719: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
720: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
722: test:
723: suffix: 2d_q2_q1_check
724: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
726: test:
727: suffix: 3d_q2_q1_check
728: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
730: test:
731: suffix: 2d_q2_q1_conv
732: # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
733: args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
734: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
735: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
736: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
738: test:
739: suffix: 3d_q2_q1_conv
740: requires: !single
741: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
742: args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
743: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
744: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
745: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
747: test:
748: suffix: 2d_p3_p2_check
749: requires: triangle
750: args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
752: test:
753: suffix: 3d_p3_p2_check
754: requires: ctetgen !single
755: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
757: test:
758: suffix: 2d_p3_p2_conv
759: requires: triangle
760: # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
761: args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
762: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
763: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
764: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
766: test:
767: suffix: 3d_p3_p2_conv
768: requires: ctetgen long_runtime
769: # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
770: args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
771: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
772: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
773: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
775: test:
776: suffix: 2d_q1_p0_conv
777: requires: !single
778: # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
779: args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
780: -ksp_atol 1e-10 -petscds_jac_pre 0 \
781: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
782: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
784: test:
785: suffix: 3d_q1_p0_conv
786: requires: !single
787: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
788: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
789: -ksp_atol 1e-10 -petscds_jac_pre 0 \
790: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
791: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
793: # Stokes preconditioners
794: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
795: test:
796: suffix: 2d_p2_p1_block_diagonal
797: requires: triangle
798: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
799: -snes_error_if_not_converged \
800: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
801: -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
802: output_file: output/empty.out
803: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
804: test:
805: suffix: 2d_p2_p1_block_triangular
806: requires: triangle
807: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
808: -snes_error_if_not_converged \
809: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
810: -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
811: output_file: output/empty.out
812: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
813: test:
814: suffix: 2d_p2_p1_schur_diagonal
815: requires: triangle
816: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
817: -snes_error_if_not_converged \
818: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
819: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
820: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
821: output_file: output/empty.out
822: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
823: test:
824: suffix: 2d_p2_p1_schur_upper
825: requires: triangle
826: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
827: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
828: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
829: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
830: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
831: test:
832: suffix: 2d_p2_p1_schur_lower
833: requires: triangle
834: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
835: -snes_error_if_not_converged \
836: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
837: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
838: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
839: output_file: output/empty.out
840: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
841: test:
842: suffix: 2d_p2_p1_schur_full
843: requires: triangle
844: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
845: -snes_error_if_not_converged \
846: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
847: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
848: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
849: output_file: output/empty.out
850: # Full Schur + Velocity GMG
851: test:
852: suffix: 2d_p2_p1_gmg_vcycle
853: TODO: broken (requires subDMs hooks)
854: requires: triangle
855: args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
856: -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
857: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
858: -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
859: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
860: test:
861: suffix: 2d_p2_p1_simple
862: requires: triangle
863: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
864: -snes_error_if_not_converged \
865: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
866: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
867: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
868: -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
869: output_file: output/empty.out
870: # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
871: test:
872: suffix: 2d_p2_p1_fetidp
873: requires: triangle mumps
874: nsize: 5
875: args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
876: -snes_error_if_not_converged \
877: -ksp_type fetidp -ksp_rtol 1.0e-8 \
878: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
879: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
880: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
881: output_file: output/empty.out
882: test:
883: suffix: 2d_q2_q1_fetidp
884: requires: mumps
885: nsize: 5
886: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
887: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
888: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
889: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
890: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
891: output_file: output/empty.out
892: test:
893: suffix: 3d_p2_p1_fetidp
894: requires: ctetgen mumps suitesparse
895: nsize: 5
896: args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
897: -snes_error_if_not_converged \
898: -ksp_type fetidp -ksp_rtol 1.0e-9 \
899: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
900: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
901: -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
902: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
903: -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
904: -fetidp_bddelta_pc_factor_mat_ordering_type external \
905: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
906: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
907: output_file: output/empty.out
908: test:
909: suffix: 3d_q2_q1_fetidp
910: requires: suitesparse
911: nsize: 5
912: args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
913: -snes_error_if_not_converged \
914: -ksp_type fetidp -ksp_rtol 1.0e-8 \
915: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
916: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
917: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
918: -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
919: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
920: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
921: output_file: output/empty.out
922: # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
923: test:
924: suffix: 2d_p2_p1_bddc
925: nsize: 2
926: requires: triangle !single
927: args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
928: -snes_error_if_not_converged \
929: -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
930: -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
931: output_file: output/empty.out
932: # Vanka
933: test:
934: suffix: 2d_q1_p0_vanka
935: output_file: output/empty.out
936: requires: double !complex
937: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
938: -snes_rtol 1.0e-4 \
939: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
940: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
941: -sub_ksp_type preonly -sub_pc_type lu
942: test:
943: suffix: 2d_q1_p0_vanka_denseinv
944: output_file: output/empty.out
945: requires: double !complex
946: args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
947: -snes_rtol 1.0e-4 \
948: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
949: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
950: -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
951: # Vanka smoother
952: test:
953: suffix: 2d_q1_p0_gmg_vanka
954: output_file: output/empty.out
955: requires: double !complex
956: args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
957: -snes_rtol 1.0e-4 \
958: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
959: -pc_type mg \
960: -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
961: -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
962: -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
963: -mg_coarse_pc_type svd
964: # Nitsche BC consistency check
965: test:
966: suffix: 2d_q2_q1_nitsche_check
967: requires: double !complex
968: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 1 \
969: -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
970: # Nitsche BC + Vanka
971: test:
972: suffix: 2d_q1_p0_nitsche_vanka
973: output_file: output/empty.out
974: requires: double !complex
975: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
976: -snes_rtol 1.0e-4 \
977: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
978: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
979: -sub_ksp_type preonly -sub_pc_type lu
980: # Nitsche BC + GMRES (sanity check that Nitsche formulation solves correctly)
981: test:
982: suffix: 2d_q2_q1_nitsche_gmres
983: output_file: output/empty.out
984: requires: double !complex
985: args: -sol quadratic -bc nitsche -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
986: -snes_error_if_not_converged \
987: -ksp_type gmres -ksp_rtol 1e-12 -pc_type jacobi
989: TEST*/