Actual source code: mkl_cpardiso.c

  1: #include <petscsys.h>
  2: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  3: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>

  5: #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
  6:   #define MKL_ILP64
  7: #endif
  8: #include <mkl.h>
  9: #include <mkl_cluster_sparse_solver.h>

 11: /*
 12:  *  Possible mkl_cpardiso phases that controls the execution of the solver.
 13:  *  For more information check mkl_cpardiso manual.
 14:  */
 15: #define JOB_ANALYSIS                                                    11
 16: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION                            12
 17: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 13
 18: #define JOB_NUMERICAL_FACTORIZATION                                     22
 19: #define JOB_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT          23
 20: #define JOB_SOLVE_ITERATIVE_REFINEMENT                                  33
 21: #define JOB_SOLVE_FORWARD_SUBSTITUTION                                  331
 22: #define JOB_SOLVE_DIAGONAL_SUBSTITUTION                                 332
 23: #define JOB_SOLVE_BACKWARD_SUBSTITUTION                                 333
 24: #define JOB_RELEASE_OF_LU_MEMORY                                        0
 25: #define JOB_RELEASE_OF_ALL_MEMORY                                       -1

 27: #define IPARM_SIZE 64
 28: #define INT_TYPE   MKL_INT

 30: static const char *Err_MSG_CPardiso(int errNo)
 31: {
 32:   switch (errNo) {
 33:   case -1:
 34:     return "input inconsistent";
 35:     break;
 36:   case -2:
 37:     return "not enough memory";
 38:     break;
 39:   case -3:
 40:     return "reordering problem";
 41:     break;
 42:   case -4:
 43:     return "zero pivot, numerical factorization or iterative refinement problem";
 44:     break;
 45:   case -5:
 46:     return "unclassified (internal) error";
 47:     break;
 48:   case -6:
 49:     return "preordering failed (matrix types 11, 13 only)";
 50:     break;
 51:   case -7:
 52:     return "diagonal matrix problem";
 53:     break;
 54:   case -8:
 55:     return "32-bit integer overflow problem";
 56:     break;
 57:   case -9:
 58:     return "not enough memory for OOC";
 59:     break;
 60:   case -10:
 61:     return "problems with opening OOC temporary files";
 62:     break;
 63:   case -11:
 64:     return "read/write problems with the OOC data file";
 65:     break;
 66:   default:
 67:     return "unknown error";
 68:   }
 69: }

 71: /*
 72:  *  Internal data structure.
 73:  *  For more information check mkl_cpardiso manual.
 74:  */

 76: typedef struct {
 77:   /* Configuration vector */
 78:   INT_TYPE iparm[IPARM_SIZE];

 80:   /*
 81:    * Internal mkl_cpardiso memory location.
 82:    * After the first call to mkl_cpardiso do not modify pt, as that could cause a serious memory leak.
 83:    */
 84:   void *pt[IPARM_SIZE];

 86:   MPI_Fint comm_mkl_cpardiso;

 88:   /* Basic mkl_cpardiso info*/
 89:   INT_TYPE phase, maxfct, mnum, mtype, n, nrhs, msglvl, err;

 91:   /* Matrix structure */
 92:   PetscScalar *a;

 94:   INT_TYPE *ia, *ja;

 96:   /* Number of non-zero elements */
 97:   INT_TYPE nz;

 99:   /* Row permutaton vector*/
100:   INT_TYPE *perm;

102:   /* Define is matrix preserve sparse structure. */
103:   MatStructure matstruc;

105:   PetscErrorCode (*ConvertToTriples)(Mat, MatReuse, PetscInt *, PetscInt **, PetscInt **, PetscScalar **);

107:   /* True if mkl_cpardiso function have been used. */
108:   PetscBool CleanUp;
109: } Mat_MKL_CPARDISO;

111: /*
112:  * Copy the elements of matrix A.
113:  * Input:
114:  *   - Mat A: MATSEQAIJ matrix
115:  *   - int shift: matrix index.
116:  *     - 0 for c representation
117:  *     - 1 for fortran representation
118:  *   - MatReuse reuse:
119:  *     - MAT_INITIAL_MATRIX: Create a new aij representation
120:  *     - MAT_REUSE_MATRIX: Reuse all aij representation and just change values
121:  * Output:
122:  *   - int *nnz: Number of nonzero-elements.
123:  *   - int **r pointer to i index
124:  *   - int **c pointer to j elements
125:  *   - MATRIXTYPE **v: Non-zero elements
126:  */
127: static PetscErrorCode MatCopy_seqaij_seqaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
128: {
129:   Mat_SeqAIJ *aa = (Mat_SeqAIJ *)A->data;

131:   PetscFunctionBegin;
132:   *v = aa->a;
133:   if (reuse == MAT_INITIAL_MATRIX) {
134:     *r   = (INT_TYPE *)aa->i;
135:     *c   = (INT_TYPE *)aa->j;
136:     *nnz = aa->nz;
137:   }
138:   PetscFunctionReturn(PETSC_SUCCESS);
139: }

141: static PetscErrorCode MatConvertToTriples_mpiaij_mpiaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
142: {
143:   const PetscInt    *ai, *aj, *bi, *bj, *garray, m = A->rmap->n, *ajj, *bjj;
144:   PetscInt           rstart, nz, i, j, countA, countB;
145:   PetscInt          *row, *col;
146:   const PetscScalar *av, *bv;
147:   PetscScalar       *val;
148:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ *)A->data;
149:   Mat_SeqAIJ        *aa  = (Mat_SeqAIJ *)mat->A->data;
150:   Mat_SeqAIJ        *bb  = (Mat_SeqAIJ *)mat->B->data;
151:   PetscInt           colA_start, jB, jcol;

153:   PetscFunctionBegin;
154:   ai     = aa->i;
155:   aj     = aa->j;
156:   bi     = bb->i;
157:   bj     = bb->j;
158:   rstart = A->rmap->rstart;
159:   av     = aa->a;
160:   bv     = bb->a;

162:   garray = mat->garray;

164:   if (reuse == MAT_INITIAL_MATRIX) {
165:     nz   = aa->nz + bb->nz;
166:     *nnz = nz;
167:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz, &val));
168:     *r = row;
169:     *c = col;
170:     *v = val;
171:   } else {
172:     row = *r;
173:     col = *c;
174:     val = *v;
175:   }

177:   nz = 0;
178:   for (i = 0; i < m; i++) {
179:     row[i] = nz;
180:     countA = ai[i + 1] - ai[i];
181:     countB = bi[i + 1] - bi[i];
182:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
183:     bjj    = bj + bi[i];

185:     /* B part, smaller col index */
186:     colA_start = rstart + ajj[0]; /* the smallest global col index of A */
187:     jB         = 0;
188:     for (j = 0; j < countB; j++) {
189:       jcol = garray[bjj[j]];
190:       if (jcol > colA_start) break;
191:       col[nz]   = jcol;
192:       val[nz++] = *bv++;
193:     }
194:     jB = j;

196:     /* A part */
197:     for (j = 0; j < countA; j++) {
198:       col[nz]   = rstart + ajj[j];
199:       val[nz++] = *av++;
200:     }

202:     /* B part, larger col index */
203:     for (j = jB; j < countB; j++) {
204:       col[nz]   = garray[bjj[j]];
205:       val[nz++] = *bv++;
206:     }
207:   }
208:   row[m] = nz;
209:   PetscFunctionReturn(PETSC_SUCCESS);
210: }

212: static PetscErrorCode MatConvertToTriples_mpibaij_mpibaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
213: {
214:   const PetscInt    *ai, *aj, *bi, *bj, *garray, bs = A->rmap->bs, bs2 = bs * bs, m = A->rmap->n / bs, *ajj, *bjj;
215:   PetscInt           rstart, nz, i, j, countA, countB;
216:   PetscInt          *row, *col;
217:   const PetscScalar *av, *bv;
218:   PetscScalar       *val;
219:   Mat_MPIBAIJ       *mat = (Mat_MPIBAIJ *)A->data;
220:   Mat_SeqBAIJ       *aa  = (Mat_SeqBAIJ *)mat->A->data;
221:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)mat->B->data;
222:   PetscInt           colA_start, jB, jcol;

224:   PetscFunctionBegin;
225:   ai     = aa->i;
226:   aj     = aa->j;
227:   bi     = bb->i;
228:   bj     = bb->j;
229:   rstart = A->rmap->rstart / bs;
230:   av     = aa->a;
231:   bv     = bb->a;

233:   garray = mat->garray;

235:   if (reuse == MAT_INITIAL_MATRIX) {
236:     nz   = aa->nz + bb->nz;
237:     *nnz = nz;
238:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz * bs2, &val));
239:     *r = row;
240:     *c = col;
241:     *v = val;
242:   } else {
243:     row = *r;
244:     col = *c;
245:     val = *v;
246:   }

248:   nz = 0;
249:   for (i = 0; i < m; i++) {
250:     row[i] = nz + 1;
251:     countA = ai[i + 1] - ai[i];
252:     countB = bi[i + 1] - bi[i];
253:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
254:     bjj    = bj + bi[i];

256:     /* B part, smaller col index */
257:     colA_start = rstart + (countA > 0 ? ajj[0] : 0); /* the smallest global col index of A */
258:     jB         = 0;
259:     for (j = 0; j < countB; j++) {
260:       jcol = garray[bjj[j]];
261:       if (jcol > colA_start) break;
262:       col[nz++] = jcol + 1;
263:     }
264:     jB = j;
265:     PetscCall(PetscArraycpy(val, bv, jB * bs2));
266:     val += jB * bs2;
267:     bv += jB * bs2;

269:     /* A part */
270:     for (j = 0; j < countA; j++) col[nz++] = rstart + ajj[j] + 1;
271:     PetscCall(PetscArraycpy(val, av, countA * bs2));
272:     val += countA * bs2;
273:     av += countA * bs2;

275:     /* B part, larger col index */
276:     for (j = jB; j < countB; j++) col[nz++] = garray[bjj[j]] + 1;
277:     PetscCall(PetscArraycpy(val, bv, (countB - jB) * bs2));
278:     val += (countB - jB) * bs2;
279:     bv += (countB - jB) * bs2;
280:   }
281:   row[m] = nz + 1;
282:   PetscFunctionReturn(PETSC_SUCCESS);
283: }

285: static PetscErrorCode MatConvertToTriples_mpisbaij_mpisbaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
286: {
287:   const PetscInt    *ai, *aj, *bi, *bj, *garray, bs = A->rmap->bs, bs2 = bs * bs, m = A->rmap->n / bs, *ajj, *bjj;
288:   PetscInt           rstart, nz, i, j, countA, countB;
289:   PetscInt          *row, *col;
290:   const PetscScalar *av, *bv;
291:   PetscScalar       *val;
292:   Mat_MPISBAIJ      *mat = (Mat_MPISBAIJ *)A->data;
293:   Mat_SeqSBAIJ      *aa  = (Mat_SeqSBAIJ *)mat->A->data;
294:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)mat->B->data;

296:   PetscFunctionBegin;
297:   ai     = aa->i;
298:   aj     = aa->j;
299:   bi     = bb->i;
300:   bj     = bb->j;
301:   rstart = A->rmap->rstart / bs;
302:   av     = aa->a;
303:   bv     = bb->a;

305:   garray = mat->garray;

307:   if (reuse == MAT_INITIAL_MATRIX) {
308:     nz   = aa->nz + bb->nz;
309:     *nnz = nz;
310:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz * bs2, &val));
311:     *r = row;
312:     *c = col;
313:     *v = val;
314:   } else {
315:     row = *r;
316:     col = *c;
317:     val = *v;
318:   }

320:   nz = 0;
321:   for (i = 0; i < m; i++) {
322:     row[i] = nz + 1;
323:     countA = ai[i + 1] - ai[i];
324:     countB = bi[i + 1] - bi[i];
325:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
326:     bjj    = bj + bi[i];

328:     /* A part */
329:     for (j = 0; j < countA; j++) col[nz++] = rstart + ajj[j] + 1;
330:     PetscCall(PetscArraycpy(val, av, countA * bs2));
331:     val += countA * bs2;
332:     av += countA * bs2;

334:     /* B part, larger col index */
335:     for (j = 0; j < countB; j++) col[nz++] = garray[bjj[j]] + 1;
336:     PetscCall(PetscArraycpy(val, bv, countB * bs2));
337:     val += countB * bs2;
338:     bv += countB * bs2;
339:   }
340:   row[m] = nz + 1;
341:   PetscFunctionReturn(PETSC_SUCCESS);
342: }

344: /*
345:  * Free memory for Mat_MKL_CPARDISO structure and pointers to objects.
346:  */
347: static PetscErrorCode MatDestroy_MKL_CPARDISO(Mat A)
348: {
349:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
350:   MPI_Comm          comm;

352:   PetscFunctionBegin;
353:   /* Terminate instance, deallocate memories */
354:   if (mat_mkl_cpardiso->CleanUp) {
355:     mat_mkl_cpardiso->phase = JOB_RELEASE_OF_ALL_MEMORY;

357:     cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, NULL, NULL, NULL, mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs,
358:                           mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);
359:   }

361:   if (mat_mkl_cpardiso->ConvertToTriples != MatCopy_seqaij_seqaij_MKL_CPARDISO) PetscCall(PetscFree3(mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja, mat_mkl_cpardiso->a));
362:   comm = MPI_Comm_f2c(mat_mkl_cpardiso->comm_mkl_cpardiso);
363:   PetscCallMPI(MPI_Comm_free(&comm));
364:   PetscCall(PetscFree(A->data));

366:   /* clear composed functions */
367:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
368:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMkl_CPardisoSetCntl_C", NULL));
369:   PetscFunctionReturn(PETSC_SUCCESS);
370: }

372: /*
373:  * Computes Ax = b
374:  */
375: static PetscErrorCode MatSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
376: {
377:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
378:   PetscScalar       *xarray;
379:   const PetscScalar *barray;

381:   PetscFunctionBegin;
382:   mat_mkl_cpardiso->nrhs = 1;
383:   PetscCall(VecGetArray(x, &xarray));
384:   PetscCall(VecGetArrayRead(b, &barray));

386:   /* solve phase */
387:   mat_mkl_cpardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
388:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
389:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);

391:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

393:   PetscCall(VecRestoreArray(x, &xarray));
394:   PetscCall(VecRestoreArrayRead(b, &barray));
395:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
396:   PetscFunctionReturn(PETSC_SUCCESS);
397: }

399: static PetscErrorCode MatForwardSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
400: {
401:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
402:   PetscScalar       *xarray;
403:   const PetscScalar *barray;

405:   PetscFunctionBegin;
406:   mat_mkl_cpardiso->nrhs = 1;
407:   PetscCall(VecGetArray(x, &xarray));
408:   PetscCall(VecGetArrayRead(b, &barray));

410:   /* solve phase */
411:   mat_mkl_cpardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
412:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
413:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);

415:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

417:   PetscCall(VecRestoreArray(x, &xarray));
418:   PetscCall(VecRestoreArrayRead(b, &barray));
419:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
420:   PetscFunctionReturn(PETSC_SUCCESS);
421: }

423: static PetscErrorCode MatBackwardSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
424: {
425:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
426:   PetscScalar       *xarray;
427:   const PetscScalar *barray;

429:   PetscFunctionBegin;
430:   mat_mkl_cpardiso->nrhs = 1;
431:   PetscCall(VecGetArray(x, &xarray));
432:   PetscCall(VecGetArrayRead(b, &barray));

434:   /* solve phase */
435:   mat_mkl_cpardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
436:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
437:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);

439:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

441:   PetscCall(VecRestoreArray(x, &xarray));
442:   PetscCall(VecRestoreArrayRead(b, &barray));
443:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
444:   PetscFunctionReturn(PETSC_SUCCESS);
445: }

447: static PetscErrorCode MatSolveTranspose_MKL_CPARDISO(Mat A, Vec b, Vec x)
448: {
449:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;

451:   PetscFunctionBegin;
452: #if defined(PETSC_USE_COMPLEX)
453:   mat_mkl_cpardiso->iparm[12 - 1] = 1;
454: #else
455:   mat_mkl_cpardiso->iparm[12 - 1] = 2;
456: #endif
457:   PetscCall(MatSolve_MKL_CPARDISO(A, b, x));
458:   mat_mkl_cpardiso->iparm[12 - 1] = 0;
459:   PetscFunctionReturn(PETSC_SUCCESS);
460: }

462: static PetscErrorCode MatMatSolve_MKL_CPARDISO(Mat A, Mat B, Mat X)
463: {
464:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
465:   PetscScalar       *xarray;
466:   const PetscScalar *barray;

468:   PetscFunctionBegin;
469:   PetscCall(MatGetSize(B, NULL, (PetscInt *)&mat_mkl_cpardiso->nrhs));

471:   if (mat_mkl_cpardiso->nrhs > 0) {
472:     PetscCall(MatDenseGetArrayRead(B, &barray));
473:     PetscCall(MatDenseGetArray(X, &xarray));

475:     PetscCheck(barray != xarray, PETSC_COMM_SELF, PETSC_ERR_SUP, "B and X cannot share the same memory location");

477:     /* solve phase */
478:     mat_mkl_cpardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
479:     cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
480:                           mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);
481:     PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));
482:     PetscCall(MatDenseRestoreArrayRead(B, &barray));
483:     PetscCall(MatDenseRestoreArray(X, &xarray));
484:   }
485:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
486:   PetscFunctionReturn(PETSC_SUCCESS);
487: }

489: /*
490:  * LU Decomposition
491:  */
492: static PetscErrorCode MatFactorNumeric_MKL_CPARDISO(Mat F, Mat A, const MatFactorInfo *info)
493: {
494:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

496:   PetscFunctionBegin;
497:   mat_mkl_cpardiso->matstruc = SAME_NONZERO_PATTERN;
498:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_REUSE_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

500:   mat_mkl_cpardiso->phase = JOB_NUMERICAL_FACTORIZATION;
501:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
502:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, &mat_mkl_cpardiso->err);
503:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

505:   mat_mkl_cpardiso->matstruc = SAME_NONZERO_PATTERN;
506:   mat_mkl_cpardiso->CleanUp  = PETSC_TRUE;
507:   PetscFunctionReturn(PETSC_SUCCESS);
508: }

510: /* Sets mkl_cpardiso options from the options database */
511: static PetscErrorCode MatSetFromOptions_MKL_CPARDISO(Mat F, Mat A)
512: {
513:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;
514:   PetscInt          icntl, threads;
515:   PetscBool         flg;

517:   PetscFunctionBegin;
518:   PetscOptionsBegin(PetscObjectComm((PetscObject)F), ((PetscObject)F)->prefix, "MKL Cluster PARDISO Options", "Mat");
519:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_65", "Suggested number of threads to use within MKL Cluster PARDISO", "None", threads, &threads, &flg));
520:   if (flg) mkl_set_num_threads((int)threads);

522:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_66", "Maximum number of factors with identical sparsity structure that must be kept in memory at the same time", "None", mat_mkl_cpardiso->maxfct, &icntl, &flg));
523:   if (flg) mat_mkl_cpardiso->maxfct = icntl;

525:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_67", "Indicates the actual matrix for the solution phase", "None", mat_mkl_cpardiso->mnum, &icntl, &flg));
526:   if (flg) mat_mkl_cpardiso->mnum = icntl;

528:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_68", "Message level information", "None", mat_mkl_cpardiso->msglvl, &icntl, &flg));
529:   if (flg) mat_mkl_cpardiso->msglvl = icntl;

531:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_69", "Defines the matrix type", "None", mat_mkl_cpardiso->mtype, &icntl, &flg));
532:   if (flg) mat_mkl_cpardiso->mtype = icntl;
533:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_1", "Use default values", "None", mat_mkl_cpardiso->iparm[0], &icntl, &flg));

535:   if (flg && icntl != 0) {
536:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_2", "Fill-in reducing ordering for the input matrix", "None", mat_mkl_cpardiso->iparm[1], &icntl, &flg));
537:     if (flg) mat_mkl_cpardiso->iparm[1] = icntl;

539:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_4", "Preconditioned CGS/CG", "None", mat_mkl_cpardiso->iparm[3], &icntl, &flg));
540:     if (flg) mat_mkl_cpardiso->iparm[3] = icntl;

542:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_5", "User permutation", "None", mat_mkl_cpardiso->iparm[4], &icntl, &flg));
543:     if (flg) mat_mkl_cpardiso->iparm[4] = icntl;

545:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_6", "Write solution on x", "None", mat_mkl_cpardiso->iparm[5], &icntl, &flg));
546:     if (flg) mat_mkl_cpardiso->iparm[5] = icntl;

548:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_8", "Iterative refinement step", "None", mat_mkl_cpardiso->iparm[7], &icntl, &flg));
549:     if (flg) mat_mkl_cpardiso->iparm[7] = icntl;

551:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_10", "Pivoting perturbation", "None", mat_mkl_cpardiso->iparm[9], &icntl, &flg));
552:     if (flg) mat_mkl_cpardiso->iparm[9] = icntl;

554:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_11", "Scaling vectors", "None", mat_mkl_cpardiso->iparm[10], &icntl, &flg));
555:     if (flg) mat_mkl_cpardiso->iparm[10] = icntl;

557:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_12", "Solve with transposed or conjugate transposed matrix A", "None", mat_mkl_cpardiso->iparm[11], &icntl, &flg));
558:     if (flg) mat_mkl_cpardiso->iparm[11] = icntl;

560:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_13", "Improved accuracy using (non-) symmetric weighted matching", "None", mat_mkl_cpardiso->iparm[12], &icntl, &flg));
561:     if (flg) mat_mkl_cpardiso->iparm[12] = icntl;

563:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_18", "Numbers of non-zero elements", "None", mat_mkl_cpardiso->iparm[17], &icntl, &flg));
564:     if (flg) mat_mkl_cpardiso->iparm[17] = icntl;

566:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_19", "Report number of floating point operations", "None", mat_mkl_cpardiso->iparm[18], &icntl, &flg));
567:     if (flg) mat_mkl_cpardiso->iparm[18] = icntl;

569:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_21", "Pivoting for symmetric indefinite matrices", "None", mat_mkl_cpardiso->iparm[20], &icntl, &flg));
570:     if (flg) mat_mkl_cpardiso->iparm[20] = icntl;

572:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_24", "Parallel factorization control", "None", mat_mkl_cpardiso->iparm[23], &icntl, &flg));
573:     if (flg) mat_mkl_cpardiso->iparm[23] = icntl;

575:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_25", "Parallel forward/backward solve control", "None", mat_mkl_cpardiso->iparm[24], &icntl, &flg));
576:     if (flg) mat_mkl_cpardiso->iparm[24] = icntl;

578:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_27", "Matrix checker", "None", mat_mkl_cpardiso->iparm[26], &icntl, &flg));
579:     if (flg) mat_mkl_cpardiso->iparm[26] = icntl;

581:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_31", "Partial solve and computing selected components of the solution vectors", "None", mat_mkl_cpardiso->iparm[30], &icntl, &flg));
582:     if (flg) mat_mkl_cpardiso->iparm[30] = icntl;

584:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_34", "Optimal number of threads for conditional numerical reproducibility (CNR) mode", "None", mat_mkl_cpardiso->iparm[33], &icntl, &flg));
585:     if (flg) mat_mkl_cpardiso->iparm[33] = icntl;

587:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_60", "Intel MKL Cluster PARDISO mode", "None", mat_mkl_cpardiso->iparm[59], &icntl, &flg));
588:     if (flg) mat_mkl_cpardiso->iparm[59] = icntl;
589:   }

591:   PetscOptionsEnd();
592:   PetscFunctionReturn(PETSC_SUCCESS);
593: }

595: static PetscErrorCode PetscInitialize_MKL_CPARDISO(Mat A, Mat_MKL_CPARDISO *mat_mkl_cpardiso)
596: {
597:   PetscInt    bs;
598:   PetscBool   match;
599:   PetscMPIInt size;
600:   MPI_Comm    comm;

602:   PetscFunctionBegin;
603:   PetscCallMPI(MPI_Comm_dup(PetscObjectComm((PetscObject)A), &comm));
604:   PetscCallMPI(MPI_Comm_size(comm, &size));
605:   mat_mkl_cpardiso->comm_mkl_cpardiso = MPI_Comm_c2f(comm);

607:   mat_mkl_cpardiso->CleanUp = PETSC_FALSE;
608:   mat_mkl_cpardiso->maxfct  = 1;
609:   mat_mkl_cpardiso->mnum    = 1;
610:   mat_mkl_cpardiso->n       = A->rmap->N;
611:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];
612:   mat_mkl_cpardiso->msglvl = 0;
613:   mat_mkl_cpardiso->nrhs   = 1;
614:   mat_mkl_cpardiso->err    = 0;
615:   mat_mkl_cpardiso->phase  = -1;
616: #if defined(PETSC_USE_COMPLEX)
617:   mat_mkl_cpardiso->mtype = 13;
618: #else
619:   mat_mkl_cpardiso->mtype = 11;
620: #endif

622: #if defined(PETSC_USE_REAL_SINGLE)
623:   mat_mkl_cpardiso->iparm[27] = 1;
624: #else
625:   mat_mkl_cpardiso->iparm[27] = 0;
626: #endif

628:   mat_mkl_cpardiso->iparm[0]  = 1;  /* Solver default parameters overridden with provided by iparm */
629:   mat_mkl_cpardiso->iparm[1]  = 2;  /* Use METIS for fill-in reordering */
630:   mat_mkl_cpardiso->iparm[5]  = 0;  /* Write solution into x */
631:   mat_mkl_cpardiso->iparm[7]  = 2;  /* Max number of iterative refinement steps */
632:   mat_mkl_cpardiso->iparm[9]  = 13; /* Perturb the pivot elements with 1E-13 */
633:   mat_mkl_cpardiso->iparm[10] = 1;  /* Use nonsymmetric permutation and scaling MPS */
634:   mat_mkl_cpardiso->iparm[12] = 1;  /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
635:   mat_mkl_cpardiso->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
636:   mat_mkl_cpardiso->iparm[18] = -1; /* Output: Mflops for LU factorization */
637:   mat_mkl_cpardiso->iparm[26] = 1;  /* Check input data for correctness */

639:   mat_mkl_cpardiso->iparm[39] = 0;
640:   if (size > 1) {
641:     mat_mkl_cpardiso->iparm[39] = 2;
642:     mat_mkl_cpardiso->iparm[40] = A->rmap->rstart;
643:     mat_mkl_cpardiso->iparm[41] = A->rmap->rend - 1;
644:   }
645:   PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &match, MATMPIBAIJ, MATMPISBAIJ, ""));
646:   if (match) {
647:     PetscCall(MatGetBlockSize(A, &bs));
648:     mat_mkl_cpardiso->iparm[36] = bs;
649:     mat_mkl_cpardiso->iparm[40] /= bs;
650:     mat_mkl_cpardiso->iparm[41] /= bs;
651:     mat_mkl_cpardiso->iparm[40]++;
652:     mat_mkl_cpardiso->iparm[41]++;
653:     mat_mkl_cpardiso->iparm[34] = 0; /* Fortran style */
654:   } else {
655:     mat_mkl_cpardiso->iparm[34] = 1; /* C style */
656:   }

658:   mat_mkl_cpardiso->perm = 0;
659:   PetscFunctionReturn(PETSC_SUCCESS);
660: }

662: /*
663:  * Symbolic decomposition. Mkl_Pardiso analysis phase.
664:  */
665: static PetscErrorCode MatLUFactorSymbolic_AIJMKL_CPARDISO(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
666: {
667:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

669:   PetscFunctionBegin;
670:   mat_mkl_cpardiso->matstruc = DIFFERENT_NONZERO_PATTERN;

672:   /* Set MKL_CPARDISO options from the options database */
673:   PetscCall(MatSetFromOptions_MKL_CPARDISO(F, A));
674:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_INITIAL_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

676:   mat_mkl_cpardiso->n = A->rmap->N;
677:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];

679:   /* analysis phase */
680:   mat_mkl_cpardiso->phase = JOB_ANALYSIS;

682:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
683:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);

685:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\".Check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

687:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
688:   F->ops->lufactornumeric   = MatFactorNumeric_MKL_CPARDISO;
689:   F->ops->solve             = MatSolve_MKL_CPARDISO;
690:   F->ops->forwardsolve      = MatForwardSolve_MKL_CPARDISO;
691:   F->ops->backwardsolve     = MatBackwardSolve_MKL_CPARDISO;
692:   F->ops->solvetranspose    = MatSolveTranspose_MKL_CPARDISO;
693:   F->ops->matsolve          = MatMatSolve_MKL_CPARDISO;
694:   PetscFunctionReturn(PETSC_SUCCESS);
695: }

697: static PetscErrorCode MatCholeskyFactorSymbolic_AIJMKL_CPARDISO(Mat F, Mat A, IS perm, const MatFactorInfo *info)
698: {
699:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

701:   PetscFunctionBegin;
702:   mat_mkl_cpardiso->matstruc = DIFFERENT_NONZERO_PATTERN;

704:   /* Set MKL_CPARDISO options from the options database */
705:   PetscCall(MatSetFromOptions_MKL_CPARDISO(F, A));
706:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_INITIAL_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

708:   mat_mkl_cpardiso->n = A->rmap->N;
709:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];
710:   PetscCheck(!PetscDefined(USE_COMPLEX), PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for PARDISO CHOLESKY with complex scalars! Use MAT_FACTOR_LU instead");
711:   if (A->spd == PETSC_BOOL3_TRUE) mat_mkl_cpardiso->mtype = 2;
712:   else mat_mkl_cpardiso->mtype = -2;

714:   /* analysis phase */
715:   mat_mkl_cpardiso->phase = JOB_ANALYSIS;

717:   cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
718:                         mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err);

720:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\".Check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

722:   mat_mkl_cpardiso->CleanUp     = PETSC_TRUE;
723:   F->ops->choleskyfactornumeric = MatFactorNumeric_MKL_CPARDISO;
724:   F->ops->solve                 = MatSolve_MKL_CPARDISO;
725:   F->ops->solvetranspose        = MatSolveTranspose_MKL_CPARDISO;
726:   F->ops->matsolve              = MatMatSolve_MKL_CPARDISO;
727:   if (A->spd == PETSC_BOOL3_TRUE) {
728:     F->ops->forwardsolve  = MatForwardSolve_MKL_CPARDISO;
729:     F->ops->backwardsolve = MatBackwardSolve_MKL_CPARDISO;
730:   }
731:   PetscFunctionReturn(PETSC_SUCCESS);
732: }

734: static PetscErrorCode MatView_MKL_CPARDISO(Mat A, PetscViewer viewer)
735: {
736:   PetscBool         iascii;
737:   PetscViewerFormat format;
738:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
739:   PetscInt          i;

741:   PetscFunctionBegin;
742:   /* check if matrix is mkl_cpardiso type */
743:   if (A->ops->solve != MatSolve_MKL_CPARDISO) PetscFunctionReturn(PETSC_SUCCESS);

745:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
746:   if (iascii) {
747:     PetscCall(PetscViewerGetFormat(viewer, &format));
748:     if (format == PETSC_VIEWER_ASCII_INFO) {
749:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO run parameters:\n"));
750:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO phase:             %d \n", mat_mkl_cpardiso->phase));
751:       for (i = 1; i <= 64; i++) PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO iparm[%d]:     %d \n", i, mat_mkl_cpardiso->iparm[i - 1]));
752:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO maxfct:     %d \n", mat_mkl_cpardiso->maxfct));
753:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO mnum:     %d \n", mat_mkl_cpardiso->mnum));
754:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO mtype:     %d \n", mat_mkl_cpardiso->mtype));
755:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO n:     %d \n", mat_mkl_cpardiso->n));
756:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO nrhs:     %d \n", mat_mkl_cpardiso->nrhs));
757:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO msglvl:     %d \n", mat_mkl_cpardiso->msglvl));
758:     }
759:   }
760:   PetscFunctionReturn(PETSC_SUCCESS);
761: }

763: static PetscErrorCode MatGetInfo_MKL_CPARDISO(Mat A, MatInfoType flag, MatInfo *info)
764: {
765:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;

767:   PetscFunctionBegin;
768:   info->block_size        = 1.0;
769:   info->nz_allocated      = mat_mkl_cpardiso->nz + 0.0;
770:   info->nz_unneeded       = 0.0;
771:   info->assemblies        = 0.0;
772:   info->mallocs           = 0.0;
773:   info->memory            = 0.0;
774:   info->fill_ratio_given  = 0;
775:   info->fill_ratio_needed = 0;
776:   info->factor_mallocs    = 0;
777:   PetscFunctionReturn(PETSC_SUCCESS);
778: }

780: static PetscErrorCode MatMkl_CPardisoSetCntl_MKL_CPARDISO(Mat F, PetscInt icntl, PetscInt ival)
781: {
782:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

784:   PetscFunctionBegin;
785:   if (icntl <= 64) {
786:     mat_mkl_cpardiso->iparm[icntl - 1] = ival;
787:   } else {
788:     if (icntl == 65) mkl_set_num_threads((int)ival);
789:     else if (icntl == 66) mat_mkl_cpardiso->maxfct = ival;
790:     else if (icntl == 67) mat_mkl_cpardiso->mnum = ival;
791:     else if (icntl == 68) mat_mkl_cpardiso->msglvl = ival;
792:     else if (icntl == 69) mat_mkl_cpardiso->mtype = ival;
793:   }
794:   PetscFunctionReturn(PETSC_SUCCESS);
795: }

797: /*@
798:   MatMkl_CPardisoSetCntl - Set MKL Cluster PARDISO parameters
799:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

801:   Logically Collective

803:   Input Parameters:
804: + F     - the factored matrix obtained by calling `MatGetFactor()`
805: . icntl - index of MKL Cluster PARDISO parameter
806: - ival  - value of MKL Cluster PARDISO parameter

808:   Options Database Key:
809: . -mat_mkl_cpardiso_<icntl> <ival> - set the option numbered icntl to ival

811:   Level: intermediate

813:   Note:
814:   This routine cannot be used if you are solving the linear system with `TS`, `SNES`, or `KSP`, only if you directly call `MatGetFactor()` so use the options
815:   database approach when working with `TS`, `SNES`, or `KSP`. See `MATSOLVERMKL_CPARDISO` for the options

817: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MATMPIAIJ`, `MATSOLVERMKL_CPARDISO`
818: @*/
819: PetscErrorCode MatMkl_CPardisoSetCntl(Mat F, PetscInt icntl, PetscInt ival)
820: {
821:   PetscFunctionBegin;
822:   PetscTryMethod(F, "MatMkl_CPardisoSetCntl_C", (Mat, PetscInt, PetscInt), (F, icntl, ival));
823:   PetscFunctionReturn(PETSC_SUCCESS);
824: }

826: /*MC
827:   MATSOLVERMKL_CPARDISO -  A matrix type providing direct solvers (LU) for parallel matrices via the external package MKL Cluster PARDISO
828:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

830:   Works with `MATMPIAIJ` matrices

832:   Use `-pc_type lu` `-pc_factor_mat_solver_type mkl_cpardiso` to use this direct solver

834:   Options Database Keys:
835: + -mat_mkl_cpardiso_65 - Suggested number of threads to use within MKL Cluster PARDISO
836: . -mat_mkl_cpardiso_66 - Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
837: . -mat_mkl_cpardiso_67 - Indicates the actual matrix for the solution phase
838: . -mat_mkl_cpardiso_68 - Message level information, use 1 to get detailed information on the solver options
839: . -mat_mkl_cpardiso_69 - Defines the matrix type. IMPORTANT: When you set this flag, iparm parameters are going to be set to the default ones for the matrix type
840: . -mat_mkl_cpardiso_1  - Use default values
841: . -mat_mkl_cpardiso_2  - Fill-in reducing ordering for the input matrix
842: . -mat_mkl_cpardiso_4  - Preconditioned CGS/CG
843: . -mat_mkl_cpardiso_5  - User permutation
844: . -mat_mkl_cpardiso_6  - Write solution on x
845: . -mat_mkl_cpardiso_8  - Iterative refinement step
846: . -mat_mkl_cpardiso_10 - Pivoting perturbation
847: . -mat_mkl_cpardiso_11 - Scaling vectors
848: . -mat_mkl_cpardiso_12 - Solve with transposed or conjugate transposed matrix A
849: . -mat_mkl_cpardiso_13 - Improved accuracy using (non-) symmetric weighted matching
850: . -mat_mkl_cpardiso_18 - Numbers of non-zero elements
851: . -mat_mkl_cpardiso_19 - Report number of floating point operations
852: . -mat_mkl_cpardiso_21 - Pivoting for symmetric indefinite matrices
853: . -mat_mkl_cpardiso_24 - Parallel factorization control
854: . -mat_mkl_cpardiso_25 - Parallel forward/backward solve control
855: . -mat_mkl_cpardiso_27 - Matrix checker
856: . -mat_mkl_cpardiso_31 - Partial solve and computing selected components of the solution vectors
857: . -mat_mkl_cpardiso_34 - Optimal number of threads for conditional numerical reproducibility (CNR) mode
858: - -mat_mkl_cpardiso_60 - Intel MKL Cluster PARDISO mode

860:   Level: beginner

862:   Notes:
863:   Use `-mat_mkl_cpardiso_68 1` to display the number of threads the solver is using. MKL does not provide a way to directly access this
864:   information.

866:   For more information on the options check
867:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

869: .seealso: [](ch_matrices), `Mat`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatMkl_CPardisoSetCntl()`, `MatGetFactor()`, `MATSOLVERMKL_PARDISO`
870: M*/

872: static PetscErrorCode MatFactorGetSolverType_mkl_cpardiso(Mat A, MatSolverType *type)
873: {
874:   PetscFunctionBegin;
875:   *type = MATSOLVERMKL_CPARDISO;
876:   PetscFunctionReturn(PETSC_SUCCESS);
877: }

879: /* MatGetFactor for MPI AIJ matrices */
880: static PetscErrorCode MatGetFactor_mpiaij_mkl_cpardiso(Mat A, MatFactorType ftype, Mat *F)
881: {
882:   Mat               B;
883:   Mat_MKL_CPARDISO *mat_mkl_cpardiso;
884:   PetscBool         isSeqAIJ, isMPIBAIJ, isMPISBAIJ;

886:   PetscFunctionBegin;
887:   /* Create the factorization matrix */

889:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJ, &isSeqAIJ));
890:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIBAIJ, &isMPIBAIJ));
891:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISBAIJ, &isMPISBAIJ));
892:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
893:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
894:   PetscCall(PetscStrallocpy("mkl_cpardiso", &((PetscObject)B)->type_name));
895:   PetscCall(MatSetUp(B));

897:   PetscCall(PetscNew(&mat_mkl_cpardiso));

899:   if (isSeqAIJ) mat_mkl_cpardiso->ConvertToTriples = MatCopy_seqaij_seqaij_MKL_CPARDISO;
900:   else if (isMPIBAIJ) mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpibaij_mpibaij_MKL_CPARDISO;
901:   else if (isMPISBAIJ) mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpisbaij_mpisbaij_MKL_CPARDISO;
902:   else mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpiaij_mpiaij_MKL_CPARDISO;

904:   if (ftype == MAT_FACTOR_LU) B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_CPARDISO;
905:   else B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_AIJMKL_CPARDISO;
906:   B->ops->destroy = MatDestroy_MKL_CPARDISO;

908:   B->ops->view    = MatView_MKL_CPARDISO;
909:   B->ops->getinfo = MatGetInfo_MKL_CPARDISO;

911:   B->factortype = ftype;
912:   B->assembled  = PETSC_TRUE; /* required by -ksp_view */

914:   B->data = mat_mkl_cpardiso;

916:   /* set solvertype */
917:   PetscCall(PetscFree(B->solvertype));
918:   PetscCall(PetscStrallocpy(MATSOLVERMKL_CPARDISO, &B->solvertype));

920:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mkl_cpardiso));
921:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMkl_CPardisoSetCntl_C", MatMkl_CPardisoSetCntl_MKL_CPARDISO));
922:   PetscCall(PetscInitialize_MKL_CPARDISO(A, mat_mkl_cpardiso));

924:   *F = B;
925:   PetscFunctionReturn(PETSC_SUCCESS);
926: }

928: PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_MKL_CPardiso(void)
929: {
930:   PetscFunctionBegin;
931:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPIAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
932:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
933:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPIBAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
934:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPISBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_mpiaij_mkl_cpardiso));
935:   PetscFunctionReturn(PETSC_SUCCESS);
936: }