Actual source code: ex76.c
1: #include <petscksp.h>
2: #include <petsc/private/petscimpl.h>
4: static char help[] = "Solves a linear system using PCHPDDM.\n\n";
6: int main(int argc, char **args)
7: {
8: Vec b; /* computed solution and RHS */
9: Mat A, aux, X, B; /* linear system matrix */
10: KSP ksp; /* linear solver context */
11: PC pc;
12: IS is, sizes;
13: const PetscInt *idx;
14: PetscMPIInt rank, size;
15: PetscInt m, N = 1;
16: PetscLayout map;
17: PetscViewer viewer;
18: char dir[PETSC_MAX_PATH_LEN], name[PETSC_MAX_PATH_LEN], type[256];
19: PetscBool3 share = PETSC_BOOL3_UNKNOWN;
20: PetscBool flg, set, transpose = PETSC_FALSE;
22: PetscFunctionBeginUser;
23: PetscCall(PetscInitialize(&argc, &args, NULL, help));
24: PetscCall(PetscLogDefaultBegin());
25: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
26: PetscCheck(size == 4, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, "This example requires 4 processes");
27: PetscCall(PetscOptionsGetInt(NULL, NULL, "-rhs", &N, NULL));
28: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
29: PetscCall(MatCreate(PETSC_COMM_WORLD, &A));
30: PetscCall(PetscStrncpy(dir, ".", sizeof(dir)));
31: PetscCall(PetscOptionsGetString(NULL, NULL, "-load_dir", dir, sizeof(dir), NULL));
32: /* loading matrices */
33: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/sizes_%d.dat", dir, size));
34: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
35: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
36: PetscCall(ISLoad(sizes, viewer));
37: PetscCall(ISGetIndices(sizes, &idx));
38: PetscCall(MatSetSizes(A, idx[0], idx[1], idx[2], idx[3]));
39: PetscCall(MatCreate(PETSC_COMM_WORLD, &X));
40: PetscCall(MatSetSizes(X, idx[4], idx[4], PETSC_DETERMINE, PETSC_DETERMINE));
41: PetscCall(MatSetUp(X));
42: PetscCall(ISRestoreIndices(sizes, &idx));
43: PetscCall(ISDestroy(&sizes));
44: PetscCall(PetscViewerDestroy(&viewer));
45: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/A.dat", dir));
46: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
47: PetscCall(MatLoad(A, viewer));
48: PetscCall(PetscViewerDestroy(&viewer));
49: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/is_%d.dat", dir, size));
50: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
51: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
52: PetscCall(MatGetLayouts(X, &map, NULL));
53: PetscCall(ISSetLayout(sizes, map));
54: PetscCall(ISLoad(sizes, viewer));
55: PetscCall(ISGetLocalSize(sizes, &m));
56: PetscCall(ISGetIndices(sizes, &idx));
57: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_COPY_VALUES, &is));
58: PetscCall(ISRestoreIndices(sizes, &idx));
59: PetscCall(ISDestroy(&sizes));
60: PetscCall(MatGetBlockSize(A, &m));
61: PetscCall(ISSetBlockSize(is, m));
62: PetscCall(PetscViewerDestroy(&viewer));
63: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/Neumann_%d.dat", dir, size));
64: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
65: PetscCall(MatLoad(X, viewer));
66: PetscCall(PetscViewerDestroy(&viewer));
67: PetscCall(MatGetDiagonalBlock(X, &B));
68: PetscCall(MatDuplicate(B, MAT_COPY_VALUES, &aux));
69: PetscCall(MatDestroy(&X));
70: flg = PETSC_FALSE;
71: PetscCall(PetscOptionsGetBool(NULL, NULL, "-sort", &flg, NULL));
72: if (flg) {
73: Mat B;
74: IS perm;
76: PetscCall(ISSortPermutation(is, PETSC_FALSE, &perm));
77: PetscCall(ISSort(is));
78: PetscCall(MatPermute(aux, perm, perm, &B));
79: PetscCall(ISDestroy(&perm));
80: PetscCall(MatDestroy(&aux));
81: aux = B;
82: PetscCall(PetscOptionsGetBool(NULL, NULL, "-reset_is_block_size", &flg, NULL));
83: if (flg) {
84: PetscCall(ISSetBlockSize(is, 1));
85: flg = PETSC_FALSE;
86: }
87: }
88: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_levels_1_st_share_sub_ksp", &flg, &set));
89: if (flg) { /* PETSc LU/Cholesky is struggling numerically for bs > 1 */
90: /* only set the proper bs for the geneo_share_* tests, 1 otherwise */
91: PetscCall(MatSetBlockSizesFromMats(aux, A, A));
92: share = PETSC_BOOL3_TRUE;
93: } else if (set) share = PETSC_BOOL3_FALSE;
94: PetscCall(MatSetOption(A, MAT_SYMMETRIC, PETSC_TRUE));
95: PetscCall(MatSetOption(aux, MAT_SYMMETRIC, PETSC_TRUE));
96: /* ready for testing */
97: PetscOptionsBegin(PETSC_COMM_WORLD, "", "", "");
98: PetscCall(PetscStrncpy(type, MATAIJ, sizeof(type)));
99: PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, type, type, 256, &flg));
100: PetscOptionsEnd();
101: PetscCall(MatConvert(A, type, MAT_INPLACE_MATRIX, &A));
102: PetscCall(MatConvert(aux, type, MAT_INPLACE_MATRIX, &aux));
103: PetscCall(KSPCreate(PETSC_COMM_WORLD, &ksp));
104: PetscCall(KSPSetOperators(ksp, A, A));
105: PetscCall(KSPGetPC(ksp, &pc));
106: PetscCall(PCSetType(pc, PCHPDDM));
107: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
108: flg = PETSC_FALSE;
109: PetscCall(PetscOptionsGetBool(NULL, NULL, "-reset", &flg, NULL));
110: if (flg) {
111: PetscCall(PetscOptionsSetValue(NULL, "-pc_hpddm_block_splitting", "true"));
112: PetscCall(PCSetFromOptions(pc));
113: PetscCall(PCSetUp(pc));
114: PetscCall(PetscOptionsClearValue(NULL, "-pc_hpddm_block_splitting"));
115: }
116: PetscCall(PCHPDDMSetAuxiliaryMat(pc, is, aux, NULL, NULL));
117: PetscCall(PCHPDDMHasNeumannMat(pc, PETSC_FALSE)); /* PETSC_TRUE is fine as well, just testing */
118: if (share == PETSC_BOOL3_UNKNOWN) PetscCall(PCHPDDMSetSTShareSubKSP(pc, PetscBool3ToBool(share)));
119: flg = PETSC_FALSE;
120: PetscCall(PetscOptionsGetBool(NULL, NULL, "-set_rhs", &flg, NULL));
121: if (flg) { /* user-provided RHS for concurrent generalized eigenvalue problems */
122: Mat a, c, P; /* usually assembled automatically in PCHPDDM, this is solely for testing PCHPDDMSetRHSMat() */
123: PetscInt rstart, rend, location;
125: PetscCall(MatDuplicate(aux, MAT_DO_NOT_COPY_VALUES, &B)); /* duplicate so that MatStructure is SAME_NONZERO_PATTERN */
126: PetscCall(MatGetDiagonalBlock(A, &a));
127: PetscCall(MatGetOwnershipRange(A, &rstart, &rend));
128: PetscCall(ISGetLocalSize(is, &m));
129: PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, rend - rstart, m, 1, NULL, &P));
130: for (m = rstart; m < rend; ++m) {
131: PetscCall(ISLocate(is, m, &location));
132: PetscCheck(location >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "IS of the auxiliary Mat does not include all local rows of A");
133: PetscCall(MatSetValue(P, m - rstart, location, 1.0, INSERT_VALUES));
134: }
135: PetscCall(MatAssemblyBegin(P, MAT_FINAL_ASSEMBLY));
136: PetscCall(MatAssemblyEnd(P, MAT_FINAL_ASSEMBLY));
137: PetscCall(PetscObjectTypeCompare((PetscObject)a, MATSEQAIJ, &flg));
138: if (flg) PetscCall(MatPtAP(a, P, MAT_INITIAL_MATRIX, 1.0, &X)); // MatPtAP() is used to extend diagonal blocks with zeros on the overlap
139: else { // workaround for MatPtAP() limitations with some types
140: PetscCall(MatConvert(a, MATSEQAIJ, MAT_INITIAL_MATRIX, &c));
141: PetscCall(MatPtAP(c, P, MAT_INITIAL_MATRIX, 1.0, &X));
142: PetscCall(MatDestroy(&c));
143: }
144: PetscCall(MatDestroy(&P));
145: PetscCall(MatAXPY(B, 1.0, X, SUBSET_NONZERO_PATTERN));
146: PetscCall(MatDestroy(&X));
147: PetscCall(MatSetOption(B, MAT_SYMMETRIC, PETSC_TRUE));
148: PetscCall(PCHPDDMSetRHSMat(pc, B));
149: PetscCall(MatDestroy(&B));
150: }
151: #else
152: (void)share;
153: #endif
154: PetscCall(MatDestroy(&aux));
155: PetscCall(KSPSetFromOptions(ksp));
156: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCASM, &flg));
157: if (flg) {
158: flg = PETSC_FALSE;
159: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_define_subdomains", &flg, NULL));
160: if (flg) {
161: IS rows;
163: PetscCall(MatGetOwnershipIS(A, &rows, NULL));
164: PetscCall(PCASMSetLocalSubdomains(pc, 1, &is, &rows));
165: PetscCall(ISDestroy(&rows));
166: }
167: }
168: PetscCall(ISDestroy(&is));
169: PetscCall(MatCreateVecs(A, NULL, &b));
170: PetscCall(VecSet(b, 1.0));
171: PetscCall(PetscOptionsGetBool(NULL, NULL, "-transpose", &transpose, NULL));
172: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
173: else {
174: PetscCall(KSPSolveTranspose(ksp, b, b));
175: set = PETSC_FALSE;
176: PetscCall(PetscOptionsGetBool(NULL, NULL, "-ksp_use_explicittranspose", &set, NULL));
177: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* -ksp_use_explicittranspose does not cache the initial Mat and will transpose the explicit transpose again if not set back to the original Mat */
178: }
179: PetscCall(VecGetLocalSize(b, &m));
180: PetscCall(VecDestroy(&b));
181: if (N > 1) {
182: KSPType type;
183: VecType vt;
185: PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
186: PetscCall(KSPSetFromOptions(ksp));
187: PetscCall(MatGetVecType(A, &vt));
188: PetscCall(MatCreateDenseFromVecType(PETSC_COMM_WORLD, vt, m, PETSC_DECIDE, PETSC_DECIDE, N, PETSC_DECIDE, NULL, &B));
189: PetscCall(MatCreateDenseFromVecType(PETSC_COMM_WORLD, vt, m, PETSC_DECIDE, PETSC_DECIDE, N, PETSC_DECIDE, NULL, &X));
190: PetscCall(MatSetRandom(B, NULL));
191: /* this is algorithmically optimal in the sense that blocks of vectors are coarsened or interpolated using matrix--matrix operations */
192: /* PCHPDDM however heavily relies on MPI[S]BAIJ format for which there is no efficient MatProduct implementation */
193: if (!transpose) PetscCall(KSPMatSolve(ksp, B, X));
194: else {
195: PetscCall(KSPMatSolveTranspose(ksp, B, X));
196: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* same as in the prior KSPSolveTranspose() */
197: }
198: PetscCall(KSPGetType(ksp, &type));
199: PetscCall(PetscStrcmp(type, KSPHPDDM, &flg));
200: #if defined(PETSC_HAVE_HPDDM)
201: if (flg) {
202: PetscReal norm;
203: KSPHPDDMType type;
205: PetscCall(KSPHPDDMGetType(ksp, &type));
206: if (type == KSP_HPDDM_TYPE_PREONLY || type == KSP_HPDDM_TYPE_CG || type == KSP_HPDDM_TYPE_GMRES || type == KSP_HPDDM_TYPE_GCRODR) {
207: Mat C;
209: PetscCall(MatDuplicate(X, MAT_DO_NOT_COPY_VALUES, &C));
210: PetscCall(KSPSetMatSolveBatchSize(ksp, 1));
211: if (!transpose) PetscCall(KSPMatSolve(ksp, B, C));
212: else {
213: PetscCall(KSPMatSolveTranspose(ksp, B, C));
214: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* same as in the prior KSPMatSolveTranspose() */
215: }
216: PetscCall(MatAYPX(C, -1.0, X, SAME_NONZERO_PATTERN));
217: PetscCall(MatNorm(C, NORM_INFINITY, &norm));
218: PetscCall(MatDestroy(&C));
219: PetscCheck(norm <= 100 * PETSC_MACHINE_EPSILON, PetscObjectComm((PetscObject)pc), PETSC_ERR_PLIB, "KSPMatSolve%s() and KSPSolve%s() difference has nonzero norm %g with pseudo-block KSPHPDDMType %s", (transpose ? "Transpose" : ""), (transpose ? "Transpose" : ""), (double)norm, KSPHPDDMTypes[type]);
220: }
221: }
222: #endif
223: PetscCall(MatDestroy(&X));
224: PetscCall(MatDestroy(&B));
225: }
226: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCHPDDM, &flg));
227: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
228: if (flg) PetscCall(PCHPDDMGetSTShareSubKSP(pc, &flg));
229: #endif
230: if (flg && PetscDefined(USE_LOG)) {
231: PetscCall(PetscOptionsHasName(NULL, NULL, "-pc_hpddm_harmonic_overlap", &flg));
232: if (!flg) {
233: PetscLogEvent event;
234: PetscEventPerfInfo info1, info2;
236: PetscCall(PetscLogEventRegister("MatLUFactorSym", PC_CLASSID, &event));
237: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info1));
238: PetscCall(PetscLogEventRegister("MatLUFactorNum", PC_CLASSID, &event));
239: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info2));
240: if (!info1.count && !info2.count) {
241: PetscCall(PetscLogEventRegister("MatCholFctrSym", PC_CLASSID, &event));
242: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info1));
243: PetscCall(PetscLogEventRegister("MatCholFctrNum", PC_CLASSID, &event));
244: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info2));
245: PetscCheck(info2.count > info1.count, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cholesky numerical factorization (%d) not called more times than Cholesky symbolic factorization (%d), broken -pc_hpddm_levels_1_st_share_sub_ksp", info2.count, info1.count);
246: } else PetscCheck(info2.count > info1.count, PETSC_COMM_SELF, PETSC_ERR_PLIB, "LU numerical factorization (%d) not called more times than LU symbolic factorization (%d), broken -pc_hpddm_levels_1_st_share_sub_ksp", info2.count, info1.count);
247: }
248: }
249: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
250: if (N == 1) {
251: flg = PETSC_FALSE;
252: PetscCall(PetscOptionsGetBool(NULL, NULL, "-successive_solves", &flg, NULL));
253: if (flg) {
254: KSPConvergedReason reason[2];
255: PetscInt iterations[3];
257: PetscCall(KSPGetConvergedReason(ksp, reason));
258: PetscCall(KSPGetTotalIterations(ksp, iterations));
259: PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
260: PetscCall(KSPSetFromOptions(ksp));
261: flg = PETSC_FALSE;
262: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_block_splitting", &flg, NULL));
263: if (!flg) {
264: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/sizes_%d.dat", dir, size));
265: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
266: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
267: PetscCall(ISLoad(sizes, viewer));
268: PetscCall(ISGetIndices(sizes, &idx));
269: PetscCall(MatCreate(PETSC_COMM_WORLD, &X));
270: PetscCall(MatSetSizes(X, idx[4], idx[4], PETSC_DETERMINE, PETSC_DETERMINE));
271: PetscCall(MatSetUp(X));
272: PetscCall(ISRestoreIndices(sizes, &idx));
273: PetscCall(ISDestroy(&sizes));
274: PetscCall(PetscViewerDestroy(&viewer));
275: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/is_%d.dat", dir, size));
276: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
277: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
278: PetscCall(MatGetLayouts(X, &map, NULL));
279: PetscCall(ISSetLayout(sizes, map));
280: PetscCall(ISLoad(sizes, viewer));
281: PetscCall(ISGetLocalSize(sizes, &m));
282: PetscCall(ISGetIndices(sizes, &idx));
283: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_COPY_VALUES, &is));
284: PetscCall(ISRestoreIndices(sizes, &idx));
285: PetscCall(ISDestroy(&sizes));
286: PetscCall(MatGetBlockSize(A, &m));
287: PetscCall(ISSetBlockSize(is, m));
288: PetscCall(PetscViewerDestroy(&viewer));
289: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/Neumann_%d.dat", dir, size));
290: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
291: PetscCall(MatLoad(X, viewer));
292: PetscCall(PetscViewerDestroy(&viewer));
293: PetscCall(MatGetDiagonalBlock(X, &B));
294: PetscCall(MatDuplicate(B, MAT_COPY_VALUES, &aux));
295: PetscCall(MatDestroy(&X));
296: PetscCall(MatSetBlockSizesFromMats(aux, A, A));
297: PetscCall(MatSetOption(aux, MAT_SYMMETRIC, PETSC_TRUE));
298: PetscCall(MatConvert(aux, type, MAT_INPLACE_MATRIX, &aux));
299: }
300: PetscCall(MatCreateVecs(A, NULL, &b));
301: PetscCall(PetscObjectStateIncrease((PetscObject)A));
302: if (!flg) PetscCall(PCHPDDMSetAuxiliaryMat(pc, NULL, aux, NULL, NULL));
303: PetscCall(VecSet(b, 1.0));
304: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
305: else PetscCall(KSPSolveTranspose(ksp, b, b));
306: PetscCall(KSPGetConvergedReason(ksp, reason + 1));
307: PetscCall(KSPGetTotalIterations(ksp, iterations + 1));
308: iterations[1] -= iterations[0];
309: PetscCheck(reason[0] == reason[1] && PetscAbs(iterations[0] - iterations[1]) <= 3, PetscObjectComm((PetscObject)ksp), PETSC_ERR_PLIB, "Successive calls to KSPSolve%s() did not converge for the same reason (%s v. %s) or with the same number of iterations (+/- 3, %" PetscInt_FMT " v. %" PetscInt_FMT ")", (transpose ? "Transpose" : ""), KSPConvergedReasons[reason[0]], KSPConvergedReasons[reason[1]], iterations[0], iterations[1]);
310: PetscCall(PetscObjectStateIncrease((PetscObject)A));
311: if (!flg) PetscCall(PCHPDDMSetAuxiliaryMat(pc, is, aux, NULL, NULL));
312: PetscCall(PCSetFromOptions(pc));
313: PetscCall(VecSet(b, 1.0));
314: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
315: else PetscCall(KSPSolveTranspose(ksp, b, b));
316: PetscCall(KSPGetConvergedReason(ksp, reason + 1));
317: PetscCall(KSPGetTotalIterations(ksp, iterations + 2));
318: iterations[2] -= iterations[0] + iterations[1];
319: PetscCheck(reason[0] == reason[1] && PetscAbs(iterations[0] - iterations[2]) <= 3, PetscObjectComm((PetscObject)ksp), PETSC_ERR_PLIB, "Successive calls to KSPSolve%s() did not converge for the same reason (%s v. %s) or with the same number of iterations (+/- 3, %" PetscInt_FMT " v. %" PetscInt_FMT ")", (transpose ? "Transpose" : ""), KSPConvergedReasons[reason[0]], KSPConvergedReasons[reason[1]], iterations[0], iterations[2]);
320: PetscCall(VecDestroy(&b));
321: PetscCall(ISDestroy(&is));
322: PetscCall(MatDestroy(&aux));
323: }
324: }
325: PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewer", &flg, NULL));
326: if (flg) {
327: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCHPDDM, &flg));
328: if (flg) {
329: PetscCall(PetscStrncpy(dir, "XXXXXX", sizeof(dir)));
330: if (rank == 0) PetscCall(PetscMkdtemp(dir));
331: PetscCallMPI(MPI_Bcast(dir, 6, MPI_CHAR, 0, PETSC_COMM_WORLD));
332: for (PetscInt i = 0; i < 2; ++i) {
333: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/%s", dir, i == 0 ? "A" : "A.dat"));
334: PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD, name, &viewer));
335: PetscCall(PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO_DETAIL));
336: PetscCall(PCView(pc, viewer));
337: PetscCall(PetscViewerPopFormat(viewer));
338: PetscCall(PetscViewerDestroy(&viewer));
339: }
340: PetscCallMPI(MPI_Barrier(PETSC_COMM_WORLD));
341: if (rank == 0) PetscCall(PetscRMTree(dir));
342: }
343: }
344: #endif
345: PetscCall(KSPDestroy(&ksp));
346: PetscCall(MatDestroy(&A));
347: PetscCall(PetscFinalize());
348: return 0;
349: }
351: /*TEST
353: test:
354: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
355: nsize: 4
356: args: -ksp_rtol 1e-3 -ksp_converged_reason -pc_type {{bjacobi hpddm}shared output} -pc_hpddm_coarse_sub_pc_type lu -sub_pc_type lu -options_left no -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
358: testset:
359: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
360: suffix: define_subdomains
361: nsize: 4
362: args: -ksp_rtol 1e-3 -ksp_converged_reason -pc_hpddm_define_subdomains -options_left no -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
363: test:
364: args: -pc_type {{asm hpddm}shared output} -pc_hpddm_coarse_sub_pc_type lu -sub_pc_type lu -viewer
365: test:
366: args: -pc_type hpddm -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_coarse_sub_pc_type lu -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_coarse_correction none
368: testset:
369: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
370: nsize: 4
371: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_coarse_pc_type redundant -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
372: test:
373: suffix: geneo
374: args: -pc_hpddm_coarse_p {{1 2}shared output} -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev {{5 15}separate output} -mat_type {{aij baij sbaij}shared output}
375: test:
376: suffix: geneo_block_splitting
377: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-15.out
378: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[5-9]/Linear solve converged due to CONVERGED_RTOL iterations 11/g"
379: args: -pc_hpddm_coarse_p 2 -pc_hpddm_levels_1_eps_nev 15 -pc_hpddm_block_splitting -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_gen_non_hermitian -mat_type {{aij baij}shared output} -successive_solves
380: test:
381: suffix: geneo_share
382: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
383: args: -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_levels_1_st_share_sub_ksp -reset {{false true}shared output}
384: test:
385: suffix: harmonic_overlap_1_define_false
386: output_file: output/ex76_geneo_share.out
387: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
388: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -pc_hpddm_define_subdomains false -pc_hpddm_levels_1_pc_type asm -pc_hpddm_levels_1_pc_asm_overlap 2 -mat_type baij
389: test:
390: suffix: harmonic_overlap_1
391: output_file: output/ex76_geneo_share.out
392: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
393: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -mat_type baij
394: test:
395: requires: cuda
396: suffix: harmonic_overlap_1_cuda
397: output_file: output/ex76_geneo_share.out
398: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
399: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -mat_type aijcusparse
400: test:
401: suffix: harmonic_overlap_1_share_petsc
402: output_file: output/ex76_geneo_share.out
403: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
404: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type petsc -pc_hpddm_levels_1_eps_pc_type lu -mat_type baij
405: test:
406: requires: mumps
407: suffix: harmonic_overlap_1_share_mumps
408: output_file: output/ex76_geneo_share.out
409: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
410: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_pc_type cholesky -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_15 1
411: test:
412: requires: mumps
413: suffix: harmonic_overlap_1_share_mumps_not_set_explicitly
414: output_file: output/ex76_geneo_share.out
415: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
416: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type baij
417: test:
418: requires: mkl_pardiso
419: suffix: harmonic_overlap_1_share_mkl_pardiso
420: output_file: output/ex76_geneo_share.out
421: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations [12][0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
422: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type shell -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mkl_pardiso
423: test:
424: requires: mkl_pardiso !mumps
425: suffix: harmonic_overlap_1_share_mkl_pardiso_no_set_explicitly
426: output_file: output/ex76_geneo_share.out
427: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations [12][0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
428: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type shell
429: test:
430: suffix: harmonic_overlap_2_threshold_relative
431: output_file: output/ex76_geneo_share.out
432: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
433: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 15 -pc_hpddm_levels_1_svd_threshold_relative 1e-1 -pc_hpddm_levels_1_st_share_sub_ksp -mat_type sbaij
434: test:
435: suffix: harmonic_overlap_2
436: output_file: output/ex76_geneo_share.out
437: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
438: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 12 -pc_hpddm_levels_1_svd_type {{trlanczos randomized}shared output} -pc_hpddm_levels_1_st_share_sub_ksp -mat_type sbaij
439: test:
440: requires: cuda
441: suffix: harmonic_overlap_2_cuda
442: output_file: output/ex76_geneo_share.out
443: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
444: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 12 -pc_hpddm_levels_1_svd_type trlanczos -pc_hpddm_levels_1_st_share_sub_ksp -mat_type aijcusparse
446: testset:
447: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
448: nsize: 4
449: args: -ksp_converged_reason -ksp_max_it 150 -pc_type hpddm -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_coarse_p 1 -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_define_subdomains
450: test:
451: suffix: geneo_share_cholesky
452: output_file: output/ex76_geneo_share.out
453: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
454: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -mat_type {{aij sbaij}shared output} -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp {{false true}shared output} -successive_solves
455: test:
456: suffix: geneo_share_cholesky_matstructure
457: output_file: output/ex76_geneo_share.out
458: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
459: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 14/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
460: args: -pc_hpddm_levels_1_sub_pc_type cholesky -mat_type {{baij sbaij}shared output} -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_st_matstructure same -set_rhs {{false true} shared output}
461: test:
462: suffix: geneo_transpose
463: output_file: output/ex76_geneo_share.out
464: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[234]/Linear solve converged due to CONVERGED_RTOL iterations 15/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations 26/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
465: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -successive_solves -transpose -pc_hpddm_coarse_correction {{additive deflated balanced}shared output}
466: test:
467: TODO: broken # slightly different convergence rate, which may be a sign of something wrong somewhere
468: requires: cuda
469: suffix: geneo_transpose_cuda
470: output_file: output/ex76_geneo_share.out
471: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[2-4]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
472: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -transpose -pc_hpddm_coarse_correction balanced -mat_type aijcusparse
473: test:
474: suffix: geneo_explicittranspose
475: output_file: output/ex76_geneo_share.out
476: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[234]/Linear solve converged due to CONVERGED_RTOL iterations 15/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations 26/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
477: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -transpose -ksp_use_explicittranspose -rhs 2 -sort -reset_is_block_size {{false true}shared output}
478: test:
479: requires: mumps
480: suffix: geneo_share_lu
481: output_file: output/ex76_geneo_share.out
482: # extra -pc_factor_mat_solver_type mumps needed to avoid failures with PETSc LU
483: args: -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_levels_1_st_pc_type lu -mat_type baij -pc_hpddm_levels_1_st_pc_factor_mat_solver_type mumps -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp {{false true}shared output} -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_15 1
484: test:
485: requires: mumps
486: suffix: geneo_share_lu_matstructure
487: output_file: output/ex76_geneo_share.out
488: # extra -pc_factor_mat_solver_type mumps needed to avoid failures with PETSc LU
489: args: -pc_hpddm_levels_1_sub_pc_type lu -mat_type aij -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_st_matstructure {{same different}shared output} -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_st_pc_factor_mat_solver_type mumps -successive_solves -pc_hpddm_levels_1_eps_target 1e-5
490: test:
491: suffix: geneo_share_not_asm
492: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
493: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
494: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp true -pc_hpddm_levels_1_pc_type gasm -successive_solves
495: test:
496: TODO: broken # PCGASM does not handle MATAIJCUSPARSE, see GitLab issue #1873
497: requires: cuda
498: suffix: geneo_share_not_asm_cuda
499: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
500: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp true -pc_hpddm_levels_1_pc_type gasm -successive_solves -mat_type aijcusparse
502: test:
503: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
504: suffix: fgmres_geneo_20_p_2
505: nsize: 4
506: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_levels_1_eps_nev 20 -pc_hpddm_coarse_p 2 -pc_hpddm_coarse_pc_type redundant -ksp_type fgmres -pc_hpddm_coarse_mat_type {{baij sbaij}shared output} -pc_hpddm_log_separate {{false true}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
508: testset:
509: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
510: output_file: output/ex76_fgmres_geneo_20_p_2.out
511: nsize: 4
512: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -pc_hpddm_levels_2_p 2 -pc_hpddm_levels_2_mat_type {{baij sbaij}shared output} -pc_hpddm_levels_2_eps_nev {{5 20}shared output} -pc_hpddm_levels_2_sub_pc_type cholesky -pc_hpddm_levels_2_ksp_type gmres -ksp_type fgmres -pc_hpddm_coarse_mat_type {{baij sbaij}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
513: test:
514: suffix: fgmres_geneo_20_p_2_geneo
515: args: -mat_type {{aij sbaij}shared output}
516: test:
517: suffix: fgmres_geneo_20_p_2_geneo_algebraic
518: args: -pc_hpddm_levels_2_st_pc_type mat
519: # PCHPDDM + KSPHPDDM test to exercise multilevel + multiple RHS in one go
520: testset:
521: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
522: output_file: output/ex76_fgmres_geneo_20_p_2.out
523: # for -pc_hpddm_coarse_correction additive
524: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 37/Linear solve converged due to CONVERGED_RTOL iterations 25/g"
525: nsize: 4
526: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -ksp_type hpddm -ksp_hpddm_variant flexible -pc_hpddm_coarse_mat_type baij -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -rhs 4
527: test:
528: suffix: fgmres_geneo_20_p_2_geneo_rhs
529: args: -pc_hpddm_levels_2_p 2 -pc_hpddm_levels_2_mat_type baij -pc_hpddm_levels_2_eps_nev 5 -pc_hpddm_levels_2_sub_pc_type cholesky -pc_hpddm_levels_2_ksp_max_it 10 -pc_hpddm_levels_2_ksp_type hpddm -pc_hpddm_levels_2_ksp_hpddm_type gmres -mat_type aij -pc_hpddm_coarse_correction {{additive deflated balanced}shared output}
530: test:
531: requires: cuda
532: suffix: fgmres_geneo_20_rhs_cuda
533: args: -mat_type aijcusparse -pc_hpddm_coarse_correction {{deflated balanced}shared output}
535: testset:
536: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES) mumps defined(PETSC_HAVE_OPENMP_SUPPORT)
537: filter: grep -E -e "Linear solve" -e " executing" | sed -e "s/MPI = 1/MPI = 2/g" -e "s/OMP = 1/OMP = 2/g"
538: nsize: 4
539: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 15 -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_coarse_p {{1 2}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_4 2 -pc_hpddm_coarse_mat_mumps_use_omp_threads {{1 2}shared output}
540: test:
541: suffix: geneo_mumps_use_omp_threads_1
542: output_file: output/ex76_geneo_mumps_use_omp_threads.out
543: args: -pc_hpddm_coarse_mat_type {{baij sbaij}shared output}
544: test:
545: suffix: geneo_mumps_use_omp_threads_2
546: output_file: output/ex76_geneo_mumps_use_omp_threads.out
547: args: -pc_hpddm_coarse_mat_type aij -pc_hpddm_levels_1_eps_threshold_absolute 0.4 -pc_hpddm_coarse_pc_type cholesky -pc_hpddm_coarse_mat_filter 1e-12
549: testset: # converge really poorly because of a tiny -pc_hpddm_levels_1_eps_threshold_absolute, but needed for proper code coverage where some subdomains don't call EPSSolve()
550: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
551: nsize: 4
552: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_threshold_absolute 0.005 -pc_hpddm_levels_1_eps_use_inertia -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_define_subdomains -pc_hpddm_has_neumann -ksp_rtol 0.9
553: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1/Linear solve converged due to CONVERGED_RTOL iterations 141/g"
554: test:
555: suffix: inertia_petsc
556: output_file: output/ex76_1.out
557: args: -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type petsc
558: test:
559: suffix: inertia_mumps
560: output_file: output/ex76_1.out
561: requires: mumps
563: testset:
564: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
565: output_file: output/empty.out
566: nsize: 4
567: args: -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -rhs 4 -ksp_max_it 20 -ksp_type hpddm -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_define_subdomains -ksp_error_if_not_converged
568: test:
569: suffix: reuse_symbolic
570: args: -pc_hpddm_coarse_correction {{additive deflated balanced}shared output} -ksp_pc_side {{left right}shared output} -transpose {{true false} shared output}
571: test:
572: requires: cuda
573: suffix: reuse_symbolic_cuda
574: args: -pc_hpddm_coarse_correction deflated -ksp_pc_side right -transpose -mat_type aijcusparse
576: TEST*/