Actual source code: mkl_cpardiso.c

  1: #include <petscsys.h>
  2: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  3: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>

  5: #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
  6:   #define MKL_ILP64
  7: #endif
  8: #include <mkl.h>
  9: #include <mkl_cluster_sparse_solver.h>

 11: /*
 12:  *  Possible mkl_cpardiso phases that controls the execution of the solver.
 13:  *  For more information check mkl_cpardiso manual.
 14:  */
 15: #define JOB_ANALYSIS                                                    11
 16: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION                            12
 17: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 13
 18: #define JOB_NUMERICAL_FACTORIZATION                                     22
 19: #define JOB_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT          23
 20: #define JOB_SOLVE_ITERATIVE_REFINEMENT                                  33
 21: #define JOB_SOLVE_FORWARD_SUBSTITUTION                                  331
 22: #define JOB_SOLVE_DIAGONAL_SUBSTITUTION                                 332
 23: #define JOB_SOLVE_BACKWARD_SUBSTITUTION                                 333
 24: #define JOB_RELEASE_OF_LU_MEMORY                                        0
 25: #define JOB_RELEASE_OF_ALL_MEMORY                                       -1

 27: #define IPARM_SIZE 64
 28: #define INT_TYPE   MKL_INT

 30: static const char *Err_MSG_CPardiso(int errNo)
 31: {
 32:   switch (errNo) {
 33:   case -1:
 34:     return "input inconsistent";
 35:     break;
 36:   case -2:
 37:     return "not enough memory";
 38:     break;
 39:   case -3:
 40:     return "reordering problem";
 41:     break;
 42:   case -4:
 43:     return "zero pivot, numerical factorization or iterative refinement problem";
 44:     break;
 45:   case -5:
 46:     return "unclassified (internal) error";
 47:     break;
 48:   case -6:
 49:     return "preordering failed (matrix types 11, 13 only)";
 50:     break;
 51:   case -7:
 52:     return "diagonal matrix problem";
 53:     break;
 54:   case -8:
 55:     return "32-bit integer overflow problem";
 56:     break;
 57:   case -9:
 58:     return "not enough memory for OOC";
 59:     break;
 60:   case -10:
 61:     return "problems with opening OOC temporary files";
 62:     break;
 63:   case -11:
 64:     return "read/write problems with the OOC data file";
 65:     break;
 66:   default:
 67:     return "unknown error";
 68:   }
 69: }

 71: #define PetscCallCluster(f) PetscStackCallExternalVoid("cluster_sparse_solver", f);

 73: /*
 74:  *  Internal data structure.
 75:  *  For more information check mkl_cpardiso manual.
 76:  */

 78: typedef struct {
 79:   /* Configuration vector */
 80:   INT_TYPE iparm[IPARM_SIZE];

 82:   /*
 83:    * Internal mkl_cpardiso memory location.
 84:    * After the first call to mkl_cpardiso do not modify pt, as that could cause a serious memory leak.
 85:    */
 86:   void *pt[IPARM_SIZE];

 88:   MPI_Fint comm_mkl_cpardiso;

 90:   /* Basic mkl_cpardiso info*/
 91:   INT_TYPE phase, maxfct, mnum, mtype, n, nrhs, msglvl, err;

 93:   /* Matrix structure */
 94:   PetscScalar *a;

 96:   INT_TYPE *ia, *ja;

 98:   /* Number of non-zero elements */
 99:   INT_TYPE nz;

101:   /* Row permutaton vector*/
102:   INT_TYPE *perm;

104:   /* Define is matrix preserve sparse structure. */
105:   MatStructure matstruc;

107:   PetscErrorCode (*ConvertToTriples)(Mat, MatReuse, PetscInt *, PetscInt **, PetscInt **, PetscScalar **);

109:   /* True if mkl_cpardiso function have been used. */
110:   PetscBool CleanUp;
111: } Mat_MKL_CPARDISO;

113: /*
114:  * Copy the elements of matrix A.
115:  * Input:
116:  *   - Mat A: MATSEQAIJ matrix
117:  *   - int shift: matrix index.
118:  *     - 0 for c representation
119:  *     - 1 for fortran representation
120:  *   - MatReuse reuse:
121:  *     - MAT_INITIAL_MATRIX: Create a new aij representation
122:  *     - MAT_REUSE_MATRIX: Reuse all aij representation and just change values
123:  * Output:
124:  *   - int *nnz: Number of nonzero-elements.
125:  *   - int **r pointer to i index
126:  *   - int **c pointer to j elements
127:  *   - MATRIXTYPE **v: Non-zero elements
128:  */
129: static PetscErrorCode MatCopy_seqaij_seqaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
130: {
131:   Mat_SeqAIJ *aa = (Mat_SeqAIJ *)A->data;

133:   PetscFunctionBegin;
134:   *v = aa->a;
135:   if (reuse == MAT_INITIAL_MATRIX) {
136:     *r   = (INT_TYPE *)aa->i;
137:     *c   = (INT_TYPE *)aa->j;
138:     *nnz = aa->nz;
139:   }
140:   PetscFunctionReturn(PETSC_SUCCESS);
141: }

143: static PetscErrorCode MatConvertToTriples_mpiaij_mpiaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
144: {
145:   const PetscInt    *ai, *aj, *bi, *bj, *garray, m = A->rmap->n, *ajj, *bjj;
146:   PetscInt           rstart, nz, i, j, countA, countB;
147:   PetscInt          *row, *col;
148:   const PetscScalar *av, *bv;
149:   PetscScalar       *val;
150:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ *)A->data;
151:   Mat_SeqAIJ        *aa  = (Mat_SeqAIJ *)mat->A->data;
152:   Mat_SeqAIJ        *bb  = (Mat_SeqAIJ *)mat->B->data;
153:   PetscInt           colA_start, jB, jcol;

155:   PetscFunctionBegin;
156:   ai     = aa->i;
157:   aj     = aa->j;
158:   bi     = bb->i;
159:   bj     = bb->j;
160:   rstart = A->rmap->rstart;
161:   av     = aa->a;
162:   bv     = bb->a;

164:   garray = mat->garray;

166:   if (reuse == MAT_INITIAL_MATRIX) {
167:     nz   = aa->nz + bb->nz;
168:     *nnz = nz;
169:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz, &val));
170:     *r = row;
171:     *c = col;
172:     *v = val;
173:   } else {
174:     row = *r;
175:     col = *c;
176:     val = *v;
177:   }

179:   nz = 0;
180:   for (i = 0; i < m; i++) {
181:     row[i] = nz;
182:     countA = ai[i + 1] - ai[i];
183:     countB = bi[i + 1] - bi[i];
184:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
185:     bjj    = bj + bi[i];

187:     /* B part, smaller col index */
188:     colA_start = rstart + ajj[0]; /* the smallest global col index of A */
189:     jB         = 0;
190:     for (j = 0; j < countB; j++) {
191:       jcol = garray[bjj[j]];
192:       if (jcol > colA_start) break;
193:       col[nz]   = jcol;
194:       val[nz++] = *bv++;
195:     }
196:     jB = j;

198:     /* A part */
199:     for (j = 0; j < countA; j++) {
200:       col[nz]   = rstart + ajj[j];
201:       val[nz++] = *av++;
202:     }

204:     /* B part, larger col index */
205:     for (j = jB; j < countB; j++) {
206:       col[nz]   = garray[bjj[j]];
207:       val[nz++] = *bv++;
208:     }
209:   }
210:   row[m] = nz;
211:   PetscFunctionReturn(PETSC_SUCCESS);
212: }

214: static PetscErrorCode MatConvertToTriples_mpibaij_mpibaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
215: {
216:   const PetscInt    *ai, *aj, *bi, *bj, *garray, bs = A->rmap->bs, bs2 = bs * bs, m = A->rmap->n / bs, *ajj, *bjj;
217:   PetscInt           rstart, nz, i, j, countA, countB;
218:   PetscInt          *row, *col;
219:   const PetscScalar *av, *bv;
220:   PetscScalar       *val;
221:   Mat_MPIBAIJ       *mat = (Mat_MPIBAIJ *)A->data;
222:   Mat_SeqBAIJ       *aa  = (Mat_SeqBAIJ *)mat->A->data;
223:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)mat->B->data;
224:   PetscInt           colA_start, jB, jcol;

226:   PetscFunctionBegin;
227:   ai     = aa->i;
228:   aj     = aa->j;
229:   bi     = bb->i;
230:   bj     = bb->j;
231:   rstart = A->rmap->rstart / bs;
232:   av     = aa->a;
233:   bv     = bb->a;

235:   garray = mat->garray;

237:   if (reuse == MAT_INITIAL_MATRIX) {
238:     nz   = aa->nz + bb->nz;
239:     *nnz = nz;
240:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz * bs2, &val));
241:     *r = row;
242:     *c = col;
243:     *v = val;
244:   } else {
245:     row = *r;
246:     col = *c;
247:     val = *v;
248:   }

250:   nz = 0;
251:   for (i = 0; i < m; i++) {
252:     row[i] = nz + 1;
253:     countA = ai[i + 1] - ai[i];
254:     countB = bi[i + 1] - bi[i];
255:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
256:     bjj    = bj + bi[i];

258:     /* B part, smaller col index */
259:     colA_start = rstart + (countA > 0 ? ajj[0] : 0); /* the smallest global col index of A */
260:     jB         = 0;
261:     for (j = 0; j < countB; j++) {
262:       jcol = garray[bjj[j]];
263:       if (jcol > colA_start) break;
264:       col[nz++] = jcol + 1;
265:     }
266:     jB = j;
267:     PetscCall(PetscArraycpy(val, bv, jB * bs2));
268:     val += jB * bs2;
269:     bv += jB * bs2;

271:     /* A part */
272:     for (j = 0; j < countA; j++) col[nz++] = rstart + ajj[j] + 1;
273:     PetscCall(PetscArraycpy(val, av, countA * bs2));
274:     val += countA * bs2;
275:     av += countA * bs2;

277:     /* B part, larger col index */
278:     for (j = jB; j < countB; j++) col[nz++] = garray[bjj[j]] + 1;
279:     PetscCall(PetscArraycpy(val, bv, (countB - jB) * bs2));
280:     val += (countB - jB) * bs2;
281:     bv += (countB - jB) * bs2;
282:   }
283:   row[m] = nz + 1;
284:   PetscFunctionReturn(PETSC_SUCCESS);
285: }

287: static PetscErrorCode MatConvertToTriples_mpisbaij_mpisbaij_MKL_CPARDISO(Mat A, MatReuse reuse, PetscInt *nnz, PetscInt **r, PetscInt **c, PetscScalar **v)
288: {
289:   const PetscInt    *ai, *aj, *bi, *bj, *garray, bs = A->rmap->bs, bs2 = bs * bs, m = A->rmap->n / bs, *ajj, *bjj;
290:   PetscInt           rstart, nz, i, j, countA, countB;
291:   PetscInt          *row, *col;
292:   const PetscScalar *av, *bv;
293:   PetscScalar       *val;
294:   Mat_MPISBAIJ      *mat = (Mat_MPISBAIJ *)A->data;
295:   Mat_SeqSBAIJ      *aa  = (Mat_SeqSBAIJ *)mat->A->data;
296:   Mat_SeqBAIJ       *bb  = (Mat_SeqBAIJ *)mat->B->data;

298:   PetscFunctionBegin;
299:   ai     = aa->i;
300:   aj     = aa->j;
301:   bi     = bb->i;
302:   bj     = bb->j;
303:   rstart = A->rmap->rstart / bs;
304:   av     = aa->a;
305:   bv     = bb->a;

307:   garray = mat->garray;

309:   if (reuse == MAT_INITIAL_MATRIX) {
310:     nz   = aa->nz + bb->nz;
311:     *nnz = nz;
312:     PetscCall(PetscMalloc3(m + 1, &row, nz, &col, nz * bs2, &val));
313:     *r = row;
314:     *c = col;
315:     *v = val;
316:   } else {
317:     row = *r;
318:     col = *c;
319:     val = *v;
320:   }

322:   nz = 0;
323:   for (i = 0; i < m; i++) {
324:     row[i] = nz + 1;
325:     countA = ai[i + 1] - ai[i];
326:     countB = bi[i + 1] - bi[i];
327:     ajj    = aj + ai[i]; /* ptr to the beginning of this row */
328:     bjj    = bj + bi[i];

330:     /* A part */
331:     for (j = 0; j < countA; j++) col[nz++] = rstart + ajj[j] + 1;
332:     PetscCall(PetscArraycpy(val, av, countA * bs2));
333:     val += countA * bs2;
334:     av += countA * bs2;

336:     /* B part, larger col index */
337:     for (j = 0; j < countB; j++) col[nz++] = garray[bjj[j]] + 1;
338:     PetscCall(PetscArraycpy(val, bv, countB * bs2));
339:     val += countB * bs2;
340:     bv += countB * bs2;
341:   }
342:   row[m] = nz + 1;
343:   PetscFunctionReturn(PETSC_SUCCESS);
344: }

346: /*
347:  * Free memory for Mat_MKL_CPARDISO structure and pointers to objects.
348:  */
349: static PetscErrorCode MatDestroy_MKL_CPARDISO(Mat A)
350: {
351:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
352:   MPI_Comm          comm;

354:   PetscFunctionBegin;
355:   /* Terminate instance, deallocate memories */
356:   if (mat_mkl_cpardiso->CleanUp) {
357:     mat_mkl_cpardiso->phase = JOB_RELEASE_OF_ALL_MEMORY;

359:     PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, NULL, NULL, NULL, mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs,
360:                                            mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
361:   }
362:   if (mat_mkl_cpardiso->ConvertToTriples != MatCopy_seqaij_seqaij_MKL_CPARDISO) PetscCall(PetscFree3(mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja, mat_mkl_cpardiso->a));
363:   comm = MPI_Comm_f2c(mat_mkl_cpardiso->comm_mkl_cpardiso);
364:   PetscCallMPI(MPI_Comm_free(&comm));
365:   PetscCall(PetscFree(A->data));

367:   /* clear composed functions */
368:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL));
369:   PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatMkl_CPardisoSetCntl_C", NULL));
370:   PetscFunctionReturn(PETSC_SUCCESS);
371: }

373: /*
374:  * Computes Ax = b
375:  */
376: static PetscErrorCode MatSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
377: {
378:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
379:   PetscScalar       *xarray;
380:   const PetscScalar *barray;

382:   PetscFunctionBegin;
383:   mat_mkl_cpardiso->nrhs = 1;
384:   PetscCall(VecGetArray(x, &xarray));
385:   PetscCall(VecGetArrayRead(b, &barray));

387:   /* solve phase */
388:   mat_mkl_cpardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
389:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
390:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
391:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

393:   PetscCall(VecRestoreArray(x, &xarray));
394:   PetscCall(VecRestoreArrayRead(b, &barray));
395:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
396:   PetscFunctionReturn(PETSC_SUCCESS);
397: }

399: static PetscErrorCode MatForwardSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
400: {
401:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
402:   PetscScalar       *xarray;
403:   const PetscScalar *barray;

405:   PetscFunctionBegin;
406:   mat_mkl_cpardiso->nrhs = 1;
407:   PetscCall(VecGetArray(x, &xarray));
408:   PetscCall(VecGetArrayRead(b, &barray));

410:   /* solve phase */
411:   mat_mkl_cpardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
412:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
413:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
414:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

416:   PetscCall(VecRestoreArray(x, &xarray));
417:   PetscCall(VecRestoreArrayRead(b, &barray));
418:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
419:   PetscFunctionReturn(PETSC_SUCCESS);
420: }

422: static PetscErrorCode MatBackwardSolve_MKL_CPARDISO(Mat A, Vec b, Vec x)
423: {
424:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
425:   PetscScalar       *xarray;
426:   const PetscScalar *barray;

428:   PetscFunctionBegin;
429:   mat_mkl_cpardiso->nrhs = 1;
430:   PetscCall(VecGetArray(x, &xarray));
431:   PetscCall(VecGetArrayRead(b, &barray));

433:   /* solve phase */
434:   mat_mkl_cpardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
435:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
436:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
437:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

439:   PetscCall(VecRestoreArray(x, &xarray));
440:   PetscCall(VecRestoreArrayRead(b, &barray));
441:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
442:   PetscFunctionReturn(PETSC_SUCCESS);
443: }

445: static PetscErrorCode MatSolveTranspose_MKL_CPARDISO(Mat A, Vec b, Vec x)
446: {
447:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;

449:   PetscFunctionBegin;
450: #if defined(PETSC_USE_COMPLEX)
451:   mat_mkl_cpardiso->iparm[12 - 1] = 1;
452: #else
453:   mat_mkl_cpardiso->iparm[12 - 1] = 2;
454: #endif
455:   PetscCall(MatSolve_MKL_CPARDISO(A, b, x));
456:   mat_mkl_cpardiso->iparm[12 - 1] = 0;
457:   PetscFunctionReturn(PETSC_SUCCESS);
458: }

460: static PetscErrorCode MatMatSolve_MKL_CPARDISO(Mat A, Mat B, Mat X)
461: {
462:   Mat_MKL_CPARDISO  *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
463:   PetscScalar       *xarray;
464:   const PetscScalar *barray;

466:   PetscFunctionBegin;
467:   PetscCall(MatGetSize(B, NULL, (PetscInt *)&mat_mkl_cpardiso->nrhs));

469:   if (mat_mkl_cpardiso->nrhs > 0) {
470:     PetscCall(MatDenseGetArrayRead(B, &barray));
471:     PetscCall(MatDenseGetArray(X, &xarray));

473:     PetscCheck(barray != xarray, PETSC_COMM_SELF, PETSC_ERR_SUP, "B and X cannot share the same memory location");

475:     /* solve phase */
476:     mat_mkl_cpardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
477:     PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
478:                                            mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
479:     PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));
480:     PetscCall(MatDenseRestoreArrayRead(B, &barray));
481:     PetscCall(MatDenseRestoreArray(X, &xarray));
482:   }
483:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
484:   PetscFunctionReturn(PETSC_SUCCESS);
485: }

487: /*
488:  * LU Decomposition
489:  */
490: static PetscErrorCode MatFactorNumeric_MKL_CPARDISO(Mat F, Mat A, const MatFactorInfo *info)
491: {
492:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

494:   PetscFunctionBegin;
495:   mat_mkl_cpardiso->matstruc = SAME_NONZERO_PATTERN;
496:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_REUSE_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

498:   mat_mkl_cpardiso->phase = JOB_NUMERICAL_FACTORIZATION;
499:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
500:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, &mat_mkl_cpardiso->err));
501:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\". Please check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

503:   mat_mkl_cpardiso->matstruc = SAME_NONZERO_PATTERN;
504:   mat_mkl_cpardiso->CleanUp  = PETSC_TRUE;
505:   PetscFunctionReturn(PETSC_SUCCESS);
506: }

508: /* Sets mkl_cpardiso options from the options database */
509: static PetscErrorCode MatSetFromOptions_MKL_CPARDISO(Mat F, Mat A)
510: {
511:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;
512:   PetscInt          icntl, threads;
513:   PetscBool         flg;

515:   PetscFunctionBegin;
516:   PetscOptionsBegin(PetscObjectComm((PetscObject)F), ((PetscObject)F)->prefix, "MKL Cluster PARDISO Options", "Mat");
517:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_65", "Suggested number of threads to use within MKL Cluster PARDISO", "None", threads, &threads, &flg));
518:   if (flg) mkl_set_num_threads((int)threads);

520:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_66", "Maximum number of factors with identical sparsity structure that must be kept in memory at the same time", "None", mat_mkl_cpardiso->maxfct, &icntl, &flg));
521:   if (flg) mat_mkl_cpardiso->maxfct = icntl;

523:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_67", "Indicates the actual matrix for the solution phase", "None", mat_mkl_cpardiso->mnum, &icntl, &flg));
524:   if (flg) mat_mkl_cpardiso->mnum = icntl;

526:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_68", "Message level information", "None", mat_mkl_cpardiso->msglvl, &icntl, &flg));
527:   if (flg) mat_mkl_cpardiso->msglvl = icntl;

529:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_69", "Defines the matrix type", "None", mat_mkl_cpardiso->mtype, &icntl, &flg));
530:   if (flg) mat_mkl_cpardiso->mtype = icntl;
531:   PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_1", "Use default values", "None", mat_mkl_cpardiso->iparm[0], &icntl, &flg));

533:   if (flg && icntl != 0) {
534:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_2", "Fill-in reducing ordering for the input matrix", "None", mat_mkl_cpardiso->iparm[1], &icntl, &flg));
535:     if (flg) mat_mkl_cpardiso->iparm[1] = icntl;

537:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_4", "Preconditioned CGS/CG", "None", mat_mkl_cpardiso->iparm[3], &icntl, &flg));
538:     if (flg) mat_mkl_cpardiso->iparm[3] = icntl;

540:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_5", "User permutation", "None", mat_mkl_cpardiso->iparm[4], &icntl, &flg));
541:     if (flg) mat_mkl_cpardiso->iparm[4] = icntl;

543:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_6", "Write solution on x", "None", mat_mkl_cpardiso->iparm[5], &icntl, &flg));
544:     if (flg) mat_mkl_cpardiso->iparm[5] = icntl;

546:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_8", "Iterative refinement step", "None", mat_mkl_cpardiso->iparm[7], &icntl, &flg));
547:     if (flg) mat_mkl_cpardiso->iparm[7] = icntl;

549:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_10", "Pivoting perturbation", "None", mat_mkl_cpardiso->iparm[9], &icntl, &flg));
550:     if (flg) mat_mkl_cpardiso->iparm[9] = icntl;

552:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_11", "Scaling vectors", "None", mat_mkl_cpardiso->iparm[10], &icntl, &flg));
553:     if (flg) mat_mkl_cpardiso->iparm[10] = icntl;

555:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_12", "Solve with transposed or conjugate transposed matrix A", "None", mat_mkl_cpardiso->iparm[11], &icntl, &flg));
556:     if (flg) mat_mkl_cpardiso->iparm[11] = icntl;

558:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_13", "Improved accuracy using (non-) symmetric weighted matching", "None", mat_mkl_cpardiso->iparm[12], &icntl, &flg));
559:     if (flg) mat_mkl_cpardiso->iparm[12] = icntl;

561:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_18", "Numbers of non-zero elements", "None", mat_mkl_cpardiso->iparm[17], &icntl, &flg));
562:     if (flg) mat_mkl_cpardiso->iparm[17] = icntl;

564:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_19", "Report number of floating point operations", "None", mat_mkl_cpardiso->iparm[18], &icntl, &flg));
565:     if (flg) mat_mkl_cpardiso->iparm[18] = icntl;

567:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_21", "Pivoting for symmetric indefinite matrices", "None", mat_mkl_cpardiso->iparm[20], &icntl, &flg));
568:     if (flg) mat_mkl_cpardiso->iparm[20] = icntl;

570:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_24", "Parallel factorization control", "None", mat_mkl_cpardiso->iparm[23], &icntl, &flg));
571:     if (flg) mat_mkl_cpardiso->iparm[23] = icntl;

573:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_25", "Parallel forward/backward solve control", "None", mat_mkl_cpardiso->iparm[24], &icntl, &flg));
574:     if (flg) mat_mkl_cpardiso->iparm[24] = icntl;

576:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_27", "Matrix checker", "None", mat_mkl_cpardiso->iparm[26], &icntl, &flg));
577:     if (flg) mat_mkl_cpardiso->iparm[26] = icntl;

579:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_31", "Partial solve and computing selected components of the solution vectors", "None", mat_mkl_cpardiso->iparm[30], &icntl, &flg));
580:     if (flg) mat_mkl_cpardiso->iparm[30] = icntl;

582:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_34", "Optimal number of threads for conditional numerical reproducibility (CNR) mode", "None", mat_mkl_cpardiso->iparm[33], &icntl, &flg));
583:     if (flg) mat_mkl_cpardiso->iparm[33] = icntl;

585:     PetscCall(PetscOptionsInt("-mat_mkl_cpardiso_60", "Intel MKL Cluster PARDISO mode", "None", mat_mkl_cpardiso->iparm[59], &icntl, &flg));
586:     if (flg) mat_mkl_cpardiso->iparm[59] = icntl;
587:   }

589:   PetscOptionsEnd();
590:   PetscFunctionReturn(PETSC_SUCCESS);
591: }

593: static PetscErrorCode PetscInitialize_MKL_CPARDISO(Mat A, Mat_MKL_CPARDISO *mat_mkl_cpardiso)
594: {
595:   PetscInt    bs;
596:   PetscBool   match;
597:   PetscMPIInt size;
598:   MPI_Comm    comm;

600:   PetscFunctionBegin;
601:   PetscCallMPI(MPI_Comm_dup(PetscObjectComm((PetscObject)A), &comm));
602:   PetscCallMPI(MPI_Comm_size(comm, &size));
603:   mat_mkl_cpardiso->comm_mkl_cpardiso = MPI_Comm_c2f(comm);

605:   mat_mkl_cpardiso->CleanUp = PETSC_FALSE;
606:   mat_mkl_cpardiso->maxfct  = 1;
607:   mat_mkl_cpardiso->mnum    = 1;
608:   mat_mkl_cpardiso->n       = A->rmap->N;
609:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];
610:   mat_mkl_cpardiso->msglvl = 0;
611:   mat_mkl_cpardiso->nrhs   = 1;
612:   mat_mkl_cpardiso->err    = 0;
613:   mat_mkl_cpardiso->phase  = -1;
614: #if defined(PETSC_USE_COMPLEX)
615:   mat_mkl_cpardiso->mtype = 13;
616: #else
617:   mat_mkl_cpardiso->mtype = 11;
618: #endif

620: #if defined(PETSC_USE_REAL_SINGLE)
621:   mat_mkl_cpardiso->iparm[27] = 1;
622: #else
623:   mat_mkl_cpardiso->iparm[27] = 0;
624: #endif

626:   mat_mkl_cpardiso->iparm[0]  = 1;  /* Solver default parameters overridden with provided by iparm */
627:   mat_mkl_cpardiso->iparm[1]  = 2;  /* Use METIS for fill-in reordering */
628:   mat_mkl_cpardiso->iparm[5]  = 0;  /* Write solution into x */
629:   mat_mkl_cpardiso->iparm[7]  = 2;  /* Max number of iterative refinement steps */
630:   mat_mkl_cpardiso->iparm[9]  = 13; /* Perturb the pivot elements with 1E-13 */
631:   mat_mkl_cpardiso->iparm[10] = 1;  /* Use nonsymmetric permutation and scaling MPS */
632:   mat_mkl_cpardiso->iparm[12] = 1;  /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
633:   mat_mkl_cpardiso->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
634:   mat_mkl_cpardiso->iparm[18] = -1; /* Output: Mflops for LU factorization */
635:   mat_mkl_cpardiso->iparm[26] = 1;  /* Check input data for correctness */

637:   mat_mkl_cpardiso->iparm[39] = 0;
638:   if (size > 1) {
639:     mat_mkl_cpardiso->iparm[39] = 2;
640:     mat_mkl_cpardiso->iparm[40] = A->rmap->rstart;
641:     mat_mkl_cpardiso->iparm[41] = A->rmap->rend - 1;
642:   }
643:   PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &match, MATMPIBAIJ, MATMPISBAIJ, ""));
644:   if (match) {
645:     PetscCall(MatGetBlockSize(A, &bs));
646:     mat_mkl_cpardiso->iparm[36] = bs;
647:     mat_mkl_cpardiso->iparm[40] /= bs;
648:     mat_mkl_cpardiso->iparm[41] /= bs;
649:     mat_mkl_cpardiso->iparm[40]++;
650:     mat_mkl_cpardiso->iparm[41]++;
651:     mat_mkl_cpardiso->iparm[34] = 0; /* Fortran style */
652:   } else {
653:     mat_mkl_cpardiso->iparm[34] = 1; /* C style */
654:   }

656:   mat_mkl_cpardiso->perm = 0;
657:   PetscFunctionReturn(PETSC_SUCCESS);
658: }

660: /*
661:  * Symbolic decomposition. Mkl_Pardiso analysis phase.
662:  */
663: static PetscErrorCode MatLUFactorSymbolic_AIJMKL_CPARDISO(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
664: {
665:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

667:   PetscFunctionBegin;
668:   mat_mkl_cpardiso->matstruc = DIFFERENT_NONZERO_PATTERN;

670:   /* Set MKL_CPARDISO options from the options database */
671:   PetscCall(MatSetFromOptions_MKL_CPARDISO(F, A));
672:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_INITIAL_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

674:   mat_mkl_cpardiso->n = A->rmap->N;
675:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];

677:   /* analysis phase */
678:   mat_mkl_cpardiso->phase = JOB_ANALYSIS;

680:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
681:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
682:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\".Check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

684:   mat_mkl_cpardiso->CleanUp = PETSC_TRUE;
685:   F->ops->lufactornumeric   = MatFactorNumeric_MKL_CPARDISO;
686:   F->ops->solve             = MatSolve_MKL_CPARDISO;
687:   F->ops->forwardsolve      = MatForwardSolve_MKL_CPARDISO;
688:   F->ops->backwardsolve     = MatBackwardSolve_MKL_CPARDISO;
689:   F->ops->solvetranspose    = MatSolveTranspose_MKL_CPARDISO;
690:   F->ops->matsolve          = MatMatSolve_MKL_CPARDISO;
691:   PetscFunctionReturn(PETSC_SUCCESS);
692: }

694: static PetscErrorCode MatCholeskyFactorSymbolic_AIJMKL_CPARDISO(Mat F, Mat A, IS perm, const MatFactorInfo *info)
695: {
696:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

698:   PetscFunctionBegin;
699:   mat_mkl_cpardiso->matstruc = DIFFERENT_NONZERO_PATTERN;

701:   /* Set MKL_CPARDISO options from the options database */
702:   PetscCall(MatSetFromOptions_MKL_CPARDISO(F, A));
703:   PetscCall((*mat_mkl_cpardiso->ConvertToTriples)(A, MAT_INITIAL_MATRIX, &mat_mkl_cpardiso->nz, &mat_mkl_cpardiso->ia, &mat_mkl_cpardiso->ja, &mat_mkl_cpardiso->a));

705:   mat_mkl_cpardiso->n = A->rmap->N;
706:   if (mat_mkl_cpardiso->iparm[36]) mat_mkl_cpardiso->n /= mat_mkl_cpardiso->iparm[36];
707:   PetscCheck(!PetscDefined(USE_COMPLEX), PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for PARDISO CHOLESKY with complex scalars! Use MAT_FACTOR_LU instead");
708:   if (A->spd == PETSC_BOOL3_TRUE) mat_mkl_cpardiso->mtype = 2;
709:   else mat_mkl_cpardiso->mtype = -2;

711:   /* analysis phase */
712:   mat_mkl_cpardiso->phase = JOB_ANALYSIS;

714:   PetscCallCluster(cluster_sparse_solver(mat_mkl_cpardiso->pt, &mat_mkl_cpardiso->maxfct, &mat_mkl_cpardiso->mnum, &mat_mkl_cpardiso->mtype, &mat_mkl_cpardiso->phase, &mat_mkl_cpardiso->n, mat_mkl_cpardiso->a, mat_mkl_cpardiso->ia, mat_mkl_cpardiso->ja,
715:                                          mat_mkl_cpardiso->perm, &mat_mkl_cpardiso->nrhs, mat_mkl_cpardiso->iparm, &mat_mkl_cpardiso->msglvl, NULL, NULL, &mat_mkl_cpardiso->comm_mkl_cpardiso, (PetscInt *)&mat_mkl_cpardiso->err));
716:   PetscCheck(mat_mkl_cpardiso->err >= 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error reported by MKL Cluster PARDISO: err=%d, msg = \"%s\".Check manual", mat_mkl_cpardiso->err, Err_MSG_CPardiso(mat_mkl_cpardiso->err));

718:   mat_mkl_cpardiso->CleanUp     = PETSC_TRUE;
719:   F->ops->choleskyfactornumeric = MatFactorNumeric_MKL_CPARDISO;
720:   F->ops->solve                 = MatSolve_MKL_CPARDISO;
721:   F->ops->solvetranspose        = MatSolveTranspose_MKL_CPARDISO;
722:   F->ops->matsolve              = MatMatSolve_MKL_CPARDISO;
723:   if (A->spd == PETSC_BOOL3_TRUE) {
724:     F->ops->forwardsolve  = MatForwardSolve_MKL_CPARDISO;
725:     F->ops->backwardsolve = MatBackwardSolve_MKL_CPARDISO;
726:   }
727:   PetscFunctionReturn(PETSC_SUCCESS);
728: }

730: static PetscErrorCode MatView_MKL_CPARDISO(Mat A, PetscViewer viewer)
731: {
732:   PetscBool         iascii;
733:   PetscViewerFormat format;
734:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;
735:   PetscInt          i;

737:   PetscFunctionBegin;
738:   /* check if matrix is mkl_cpardiso type */
739:   if (A->ops->solve != MatSolve_MKL_CPARDISO) PetscFunctionReturn(PETSC_SUCCESS);

741:   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
742:   if (iascii) {
743:     PetscCall(PetscViewerGetFormat(viewer, &format));
744:     if (format == PETSC_VIEWER_ASCII_INFO) {
745:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO run parameters:\n"));
746:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO phase:             %d \n", mat_mkl_cpardiso->phase));
747:       for (i = 1; i <= 64; i++) PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO iparm[%d]:     %d \n", i, mat_mkl_cpardiso->iparm[i - 1]));
748:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO maxfct:     %d \n", mat_mkl_cpardiso->maxfct));
749:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO mnum:     %d \n", mat_mkl_cpardiso->mnum));
750:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO mtype:     %d \n", mat_mkl_cpardiso->mtype));
751:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO n:     %d \n", mat_mkl_cpardiso->n));
752:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO nrhs:     %d \n", mat_mkl_cpardiso->nrhs));
753:       PetscCall(PetscViewerASCIIPrintf(viewer, "MKL Cluster PARDISO msglvl:     %d \n", mat_mkl_cpardiso->msglvl));
754:     }
755:   }
756:   PetscFunctionReturn(PETSC_SUCCESS);
757: }

759: static PetscErrorCode MatGetInfo_MKL_CPARDISO(Mat A, MatInfoType flag, MatInfo *info)
760: {
761:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)A->data;

763:   PetscFunctionBegin;
764:   info->block_size        = 1.0;
765:   info->nz_allocated      = mat_mkl_cpardiso->nz + 0.0;
766:   info->nz_unneeded       = 0.0;
767:   info->assemblies        = 0.0;
768:   info->mallocs           = 0.0;
769:   info->memory            = 0.0;
770:   info->fill_ratio_given  = 0;
771:   info->fill_ratio_needed = 0;
772:   info->factor_mallocs    = 0;
773:   PetscFunctionReturn(PETSC_SUCCESS);
774: }

776: static PetscErrorCode MatMkl_CPardisoSetCntl_MKL_CPARDISO(Mat F, PetscInt icntl, PetscInt ival)
777: {
778:   Mat_MKL_CPARDISO *mat_mkl_cpardiso = (Mat_MKL_CPARDISO *)F->data;

780:   PetscFunctionBegin;
781:   if (icntl <= 64) {
782:     mat_mkl_cpardiso->iparm[icntl - 1] = ival;
783:   } else {
784:     if (icntl == 65) mkl_set_num_threads((int)ival);
785:     else if (icntl == 66) mat_mkl_cpardiso->maxfct = ival;
786:     else if (icntl == 67) mat_mkl_cpardiso->mnum = ival;
787:     else if (icntl == 68) mat_mkl_cpardiso->msglvl = ival;
788:     else if (icntl == 69) mat_mkl_cpardiso->mtype = ival;
789:   }
790:   PetscFunctionReturn(PETSC_SUCCESS);
791: }

793: /*@
794:   MatMkl_CPardisoSetCntl - Set MKL Cluster PARDISO parameters
795:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

797:   Logically Collective

799:   Input Parameters:
800: + F     - the factored matrix obtained by calling `MatGetFactor()`
801: . icntl - index of MKL Cluster PARDISO parameter
802: - ival  - value of MKL Cluster PARDISO parameter

804:   Options Database Key:
805: . -mat_mkl_cpardiso_<icntl> <ival> - set the option numbered icntl to ival

807:   Level: intermediate

809:   Note:
810:   This routine cannot be used if you are solving the linear system with `TS`, `SNES`, or `KSP`, only if you directly call `MatGetFactor()` so use the options
811:   database approach when working with `TS`, `SNES`, or `KSP`. See `MATSOLVERMKL_CPARDISO` for the options

813: .seealso: [](ch_matrices), `Mat`, `MatGetFactor()`, `MATMPIAIJ`, `MATSOLVERMKL_CPARDISO`
814: @*/
815: PetscErrorCode MatMkl_CPardisoSetCntl(Mat F, PetscInt icntl, PetscInt ival)
816: {
817:   PetscFunctionBegin;
818:   PetscTryMethod(F, "MatMkl_CPardisoSetCntl_C", (Mat, PetscInt, PetscInt), (F, icntl, ival));
819:   PetscFunctionReturn(PETSC_SUCCESS);
820: }

822: /*MC
823:   MATSOLVERMKL_CPARDISO -  A matrix type providing direct solvers (LU) for parallel matrices via the external package MKL Cluster PARDISO
824:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

826:   Works with `MATMPIAIJ` matrices

828:   Use `-pc_type lu` `-pc_factor_mat_solver_type mkl_cpardiso` to use this direct solver

830:   Options Database Keys:
831: + -mat_mkl_cpardiso_65 - Suggested number of threads to use within MKL Cluster PARDISO
832: . -mat_mkl_cpardiso_66 - Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
833: . -mat_mkl_cpardiso_67 - Indicates the actual matrix for the solution phase
834: . -mat_mkl_cpardiso_68 - Message level information, use 1 to get detailed information on the solver options
835: . -mat_mkl_cpardiso_69 - Defines the matrix type. IMPORTANT: When you set this flag, iparm parameters are going to be set to the default ones for the matrix type
836: . -mat_mkl_cpardiso_1  - Use default values
837: . -mat_mkl_cpardiso_2  - Fill-in reducing ordering for the input matrix
838: . -mat_mkl_cpardiso_4  - Preconditioned CGS/CG
839: . -mat_mkl_cpardiso_5  - User permutation
840: . -mat_mkl_cpardiso_6  - Write solution on x
841: . -mat_mkl_cpardiso_8  - Iterative refinement step
842: . -mat_mkl_cpardiso_10 - Pivoting perturbation
843: . -mat_mkl_cpardiso_11 - Scaling vectors
844: . -mat_mkl_cpardiso_12 - Solve with transposed or conjugate transposed matrix A
845: . -mat_mkl_cpardiso_13 - Improved accuracy using (non-) symmetric weighted matching
846: . -mat_mkl_cpardiso_18 - Numbers of non-zero elements
847: . -mat_mkl_cpardiso_19 - Report number of floating point operations
848: . -mat_mkl_cpardiso_21 - Pivoting for symmetric indefinite matrices
849: . -mat_mkl_cpardiso_24 - Parallel factorization control
850: . -mat_mkl_cpardiso_25 - Parallel forward/backward solve control
851: . -mat_mkl_cpardiso_27 - Matrix checker
852: . -mat_mkl_cpardiso_31 - Partial solve and computing selected components of the solution vectors
853: . -mat_mkl_cpardiso_34 - Optimal number of threads for conditional numerical reproducibility (CNR) mode
854: - -mat_mkl_cpardiso_60 - Intel MKL Cluster PARDISO mode

856:   Level: beginner

858:   Notes:
859:   Use `-mat_mkl_cpardiso_68 1` to display the number of threads the solver is using. MKL does not provide a way to directly access this
860:   information.

862:   For more information on the options check
863:   <https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/onemkl-pardiso-parallel-direct-sparse-solver-iface.html>

865: .seealso: [](ch_matrices), `Mat`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatMkl_CPardisoSetCntl()`, `MatGetFactor()`, `MATSOLVERMKL_PARDISO`
866: M*/

868: static PetscErrorCode MatFactorGetSolverType_mkl_cpardiso(Mat A, MatSolverType *type)
869: {
870:   PetscFunctionBegin;
871:   *type = MATSOLVERMKL_CPARDISO;
872:   PetscFunctionReturn(PETSC_SUCCESS);
873: }

875: /* MatGetFactor for MPI AIJ matrices */
876: static PetscErrorCode MatGetFactor_mpiaij_mkl_cpardiso(Mat A, MatFactorType ftype, Mat *F)
877: {
878:   Mat               B;
879:   Mat_MKL_CPARDISO *mat_mkl_cpardiso;
880:   PetscBool         isSeqAIJ, isMPIBAIJ, isMPISBAIJ;

882:   PetscFunctionBegin;
883:   /* Create the factorization matrix */

885:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJ, &isSeqAIJ));
886:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIBAIJ, &isMPIBAIJ));
887:   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISBAIJ, &isMPISBAIJ));
888:   PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
889:   PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
890:   PetscCall(PetscStrallocpy("mkl_cpardiso", &((PetscObject)B)->type_name));
891:   PetscCall(MatSetUp(B));

893:   PetscCall(PetscNew(&mat_mkl_cpardiso));

895:   if (isSeqAIJ) mat_mkl_cpardiso->ConvertToTriples = MatCopy_seqaij_seqaij_MKL_CPARDISO;
896:   else if (isMPIBAIJ) mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpibaij_mpibaij_MKL_CPARDISO;
897:   else if (isMPISBAIJ) mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpisbaij_mpisbaij_MKL_CPARDISO;
898:   else mat_mkl_cpardiso->ConvertToTriples = MatConvertToTriples_mpiaij_mpiaij_MKL_CPARDISO;

900:   if (ftype == MAT_FACTOR_LU) B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_CPARDISO;
901:   else B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_AIJMKL_CPARDISO;
902:   B->ops->destroy = MatDestroy_MKL_CPARDISO;

904:   B->ops->view    = MatView_MKL_CPARDISO;
905:   B->ops->getinfo = MatGetInfo_MKL_CPARDISO;

907:   B->factortype = ftype;
908:   B->assembled  = PETSC_TRUE; /* required by -ksp_view */

910:   B->data = mat_mkl_cpardiso;

912:   /* set solvertype */
913:   PetscCall(PetscFree(B->solvertype));
914:   PetscCall(PetscStrallocpy(MATSOLVERMKL_CPARDISO, &B->solvertype));

916:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mkl_cpardiso));
917:   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMkl_CPardisoSetCntl_C", MatMkl_CPardisoSetCntl_MKL_CPARDISO));
918:   PetscCall(PetscInitialize_MKL_CPARDISO(A, mat_mkl_cpardiso));

920:   *F = B;
921:   PetscFunctionReturn(PETSC_SUCCESS);
922: }

924: PETSC_INTERN PetscErrorCode MatSolverTypeRegister_MKL_CPardiso(void)
925: {
926:   PetscFunctionBegin;
927:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPIAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
928:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
929:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPIBAIJ, MAT_FACTOR_LU, MatGetFactor_mpiaij_mkl_cpardiso));
930:   PetscCall(MatSolverTypeRegister(MATSOLVERMKL_CPARDISO, MATMPISBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_mpiaij_mkl_cpardiso));
931:   PetscFunctionReturn(PETSC_SUCCESS);
932: }