Actual source code: ex237.c
1: static char help[] = "Mini-app to benchmark matrix--matrix multiplication\n\n";
3: /*
4: See the paper below for more information
6: "KSPHPDDM and PCHPDDM: Extending PETSc with Robust Overlapping Schwarz Preconditioners and Advanced Krylov Methods",
7: P. Jolivet, J. E. Roman, and S. Zampini (2020).
8: */
10: #include <petsc.h>
12: #if defined(PETSC_HAVE_MKL_SPARSE_OPTIMIZE)
13: #include <mkl.h>
14: #define PetscCallMKLSparse(func, args) \
15: do { \
16: sparse_status_t __ierr; \
17: PetscStackPushExternal(#func); \
18: __ierr = func args; \
19: PetscStackPop; \
20: PetscCheck(__ierr == SPARSE_STATUS_SUCCESS, PETSC_COMM_SELF, PETSC_ERR_LIB, "Error in %s(): error code %d", #func, (int)__ierr); \
21: } while (0)
22: #else
23: #define PetscCallMKLSparse(func, args) \
24: do { \
25: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No MKL support"); \
26: } while (0)
27: #endif
29: int main(int argc, char **argv)
30: {
31: Mat A, C, D, E;
32: PetscInt nbs = 10, ntype = 10, nN = 8, m, M, trial = 5;
33: PetscViewer viewer;
34: PetscInt bs[10], N[8];
35: char *type[10];
36: PetscMPIInt size;
37: PetscBool flg, cuda, maij = PETSC_FALSE, check = PETSC_FALSE, trans = PETSC_FALSE, convert = PETSC_FALSE, mkl;
38: char file[PETSC_MAX_PATH_LEN];
40: PetscFunctionBeginUser;
41: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
42: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
43: PetscCheck(size == 1, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, "This is a uniprocessor example only");
44: PetscCall(PetscOptionsGetString(NULL, NULL, "-f", file, PETSC_MAX_PATH_LEN, &flg));
45: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_USER_INPUT, "Must indicate binary file with the -f option");
46: PetscCall(PetscOptionsGetInt(NULL, NULL, "-trial", &trial, NULL));
47: PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-bs", bs, &nbs, &flg));
48: if (!flg) {
49: nbs = 1;
50: bs[0] = 1;
51: }
52: PetscCall(PetscOptionsGetIntArray(NULL, NULL, "-N", N, &nN, &flg));
53: if (!flg) {
54: nN = 8;
55: N[0] = 1;
56: N[1] = 2;
57: N[2] = 4;
58: N[3] = 8;
59: N[4] = 16;
60: N[5] = 32;
61: N[6] = 64;
62: N[7] = 128;
63: }
64: PetscCall(PetscOptionsGetStringArray(NULL, NULL, "-type", type, &ntype, &flg));
65: if (!flg) {
66: ntype = 1;
67: PetscCall(PetscStrallocpy(MATSEQAIJ, &type[0]));
68: }
69: PetscCall(PetscOptionsGetBool(NULL, NULL, "-check", &check, NULL));
70: PetscCall(PetscOptionsGetBool(NULL, NULL, "-trans", &trans, NULL));
71: PetscCall(PetscOptionsGetBool(NULL, NULL, "-convert_aij", &convert, NULL));
72: for (PetscInt j = 0; j < nbs; ++j) {
73: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, file, FILE_MODE_READ, &viewer));
74: PetscCall(MatCreate(PETSC_COMM_WORLD, &A));
75: PetscCall(MatSetFromOptions(A));
76: PetscCall(MatLoad(A, viewer));
77: PetscCall(PetscViewerDestroy(&viewer));
78: PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &flg, MATSEQAIJ, MATMPIAIJ, ""));
79: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_USER_INPUT, "Must indicate a MatAIJ input matrix");
80: PetscCall(MatGetSize(A, &m, &M));
81: if (m == M) {
82: Mat oA;
83: PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &oA));
84: PetscCall(MatAXPY(A, 1.0, oA, DIFFERENT_NONZERO_PATTERN));
85: PetscCall(MatDestroy(&oA));
86: }
87: PetscCall(MatGetLocalSize(A, &m, NULL));
88: PetscCall(MatGetSize(A, &M, NULL));
89: if (bs[j] > 1) {
90: Mat T, Tt, B;
91: const PetscScalar *ptr;
92: PetscScalar *val, *Aa;
93: const PetscInt *Ai, *Aj;
94: PetscInt An, i, k;
95: PetscBool done;
97: PetscCall(MatCreateDense(PETSC_COMM_SELF, bs[j], bs[j], bs[j], bs[j], NULL, &T));
98: PetscCall(MatSetRandom(T, NULL));
99: PetscCall(MatTranspose(T, MAT_INITIAL_MATRIX, &Tt));
100: PetscCall(MatAXPY(T, 1.0, Tt, SAME_NONZERO_PATTERN));
101: PetscCall(MatDestroy(&Tt));
102: PetscCall(MatDenseGetArrayRead(T, &ptr));
103: PetscCall(MatGetRowIJ(A, 0, PETSC_FALSE, PETSC_FALSE, &An, &Ai, &Aj, &done));
104: PetscCheck(done && An == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Inconsistent sizes");
105: PetscCall(MatSeqAIJGetArray(A, &Aa));
106: PetscCall(MatCreate(PETSC_COMM_WORLD, &B));
107: PetscCall(MatSetType(B, MATSEQBAIJ));
108: PetscCall(MatSetSizes(B, bs[j] * An, bs[j] * An, PETSC_DECIDE, PETSC_DECIDE));
109: PetscCall(PetscMalloc1(Ai[An] * bs[j] * bs[j], &val));
110: for (i = 0; i < Ai[An]; ++i)
111: for (k = 0; k < bs[j] * bs[j]; ++k) val[i * bs[j] * bs[j] + k] = Aa[i] * ptr[k];
112: PetscCall(MatSetOption(B, MAT_ROW_ORIENTED, PETSC_FALSE));
113: PetscCall(MatSeqBAIJSetPreallocationCSR(B, bs[j], Ai, Aj, val));
114: PetscCall(PetscFree(val));
115: PetscCall(MatSeqAIJRestoreArray(A, &Aa));
116: PetscCall(MatRestoreRowIJ(A, 0, PETSC_FALSE, PETSC_FALSE, &An, &Ai, &Aj, &done));
117: PetscCall(MatDenseRestoreArrayRead(T, &ptr));
118: PetscCall(MatDestroy(&T));
119: PetscCall(MatDestroy(&A));
120: A = B;
121: }
122: /* reconvert back to SeqAIJ before converting to the desired type later */
123: if (!convert) E = A;
124: PetscCall(MatConvert(A, MATSEQAIJ, convert ? MAT_INITIAL_MATRIX : MAT_INPLACE_MATRIX, &E));
125: PetscCall(MatSetOption(E, MAT_SYMMETRIC, PETSC_TRUE));
126: for (PetscInt i = 0; i < ntype; ++i) {
127: char *tmp = NULL;
128: PetscInt *ia_ptr, *ja_ptr, k;
129: PetscScalar *a_ptr;
130: #if defined(PETSC_HAVE_MKL_SPARSE_OPTIMIZE)
131: struct matrix_descr descr;
132: sparse_matrix_t spr;
133: descr.type = SPARSE_MATRIX_TYPE_GENERAL;
134: descr.diag = SPARSE_DIAG_NON_UNIT;
135: #endif
136: if (convert) PetscCall(MatDestroy(&A));
137: PetscCall(PetscStrstr(type[i], "mkl", &tmp));
138: if (tmp) {
139: size_t mlen, tlen;
140: char base[256];
142: mkl = PETSC_TRUE;
143: PetscCall(PetscStrlen(tmp, &mlen));
144: PetscCall(PetscStrlen(type[i], &tlen));
145: PetscCall(PetscStrncpy(base, type[i], tlen - mlen + 1));
146: PetscCall(MatConvert(E, base, convert ? MAT_INITIAL_MATRIX : MAT_INPLACE_MATRIX, &A));
147: } else {
148: mkl = PETSC_FALSE;
149: PetscCall(PetscStrstr(type[i], "maij", &tmp));
150: if (!tmp) {
151: PetscCall(MatConvert(E, type[i], convert ? MAT_INITIAL_MATRIX : MAT_INPLACE_MATRIX, &A));
152: } else {
153: PetscCall(MatConvert(E, MATAIJ, convert ? MAT_INITIAL_MATRIX : MAT_INPLACE_MATRIX, &A));
154: maij = PETSC_TRUE;
155: }
156: }
157: PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &cuda, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, ""));
158: if (mkl) {
159: const PetscInt *Ai, *Aj;
160: PetscInt An;
161: PetscBool done;
163: PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &flg, MATSEQAIJ, MATSEQBAIJ, MATSEQSBAIJ, ""));
164: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_USER_INPUT, "Not implemented");
165: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJ, &flg));
166: PetscCall(MatGetRowIJ(A, 0, PETSC_FALSE, flg ? PETSC_FALSE : PETSC_TRUE, &An, &Ai, &Aj, &done));
167: PetscCheck(done, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Inconsistent sizes");
168: PetscCall(PetscMalloc1(An + 1, &ia_ptr));
169: PetscCall(PetscMalloc1(Ai[An], &ja_ptr));
170: if (flg) { /* SeqAIJ */
171: for (k = 0; k < An + 1; ++k) ia_ptr[k] = Ai[k];
172: for (k = 0; k < Ai[An]; ++k) ja_ptr[k] = Aj[k];
173: PetscCall(MatSeqAIJGetArray(A, &a_ptr));
174: PetscCallMKLSparse(mkl_sparse_d_create_csr, (&spr, SPARSE_INDEX_BASE_ZERO, An, An, ia_ptr, ia_ptr + 1, ja_ptr, a_ptr));
175: } else {
176: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQBAIJ, &flg));
177: if (flg) {
178: for (k = 0; k < An + 1; ++k) ia_ptr[k] = Ai[k] + 1; /* Fortran indexing to maximize cases covered by _mm routines */
179: for (k = 0; k < Ai[An]; ++k) ja_ptr[k] = Aj[k] + 1; /* Fortran indexing to maximize cases covered by _mm routines */
180: PetscCall(MatSeqBAIJGetArray(A, &a_ptr));
181: PetscCallMKLSparse(mkl_sparse_d_create_bsr, (&spr, SPARSE_INDEX_BASE_ONE, SPARSE_LAYOUT_COLUMN_MAJOR, An, An, bs[j], ia_ptr, ia_ptr + 1, ja_ptr, a_ptr));
182: } else {
183: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQSBAIJ, &flg));
184: if (flg) {
185: for (k = 0; k < An + 1; ++k) ia_ptr[k] = Ai[k] + 1; /* Fortran indexing to maximize cases covered by _mm routines */
186: for (k = 0; k < Ai[An]; ++k) ja_ptr[k] = Aj[k] + 1; /* Fortran indexing to maximize cases covered by _mm routines */
187: PetscCall(MatSeqSBAIJGetArray(A, &a_ptr));
188: PetscCallMKLSparse(mkl_sparse_d_create_bsr, (&spr, SPARSE_INDEX_BASE_ONE, SPARSE_LAYOUT_COLUMN_MAJOR, An, An, bs[j], ia_ptr, ia_ptr + 1, ja_ptr, a_ptr));
189: #if defined(PETSC_HAVE_MKL_SPARSE_OPTIMIZE)
190: descr.type = SPARSE_MATRIX_TYPE_SYMMETRIC;
191: descr.mode = SPARSE_FILL_MODE_UPPER;
192: descr.diag = SPARSE_DIAG_NON_UNIT;
193: #endif
194: }
195: }
196: }
197: PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJ, &flg));
198: PetscCall(MatRestoreRowIJ(A, 0, PETSC_FALSE, flg ? PETSC_FALSE : PETSC_TRUE, &An, &Ai, &Aj, &done));
199: }
201: PetscCall(MatViewFromOptions(A, NULL, "-A_view"));
203: for (k = 0; k < nN; ++k) {
204: MatType Atype, Ctype;
205: PetscInt AM, AN, CM, CN, t;
206: PetscLogStage stage, tstage;
207: char stage_s[256];
209: PetscCall(MatCreateDense(PETSC_COMM_WORLD, bs[j] * m, PETSC_DECIDE, bs[j] * M, N[k], NULL, &C));
210: PetscCall(MatCreateDense(PETSC_COMM_WORLD, bs[j] * m, PETSC_DECIDE, bs[j] * M, N[k], NULL, &D));
211: PetscCall(MatSetRandom(C, NULL));
212: if (cuda) { /* convert to GPU if needed */
213: PetscCall(MatConvert(C, MATDENSECUDA, MAT_INPLACE_MATRIX, &C));
214: PetscCall(MatConvert(D, MATDENSECUDA, MAT_INPLACE_MATRIX, &D));
215: }
216: if (mkl) {
217: if (N[k] > 1) PetscCallMKLSparse(mkl_sparse_set_mm_hint, (spr, SPARSE_OPERATION_NON_TRANSPOSE, descr, SPARSE_LAYOUT_COLUMN_MAJOR, N[k], 1 + trial));
218: else PetscCallMKLSparse(mkl_sparse_set_mv_hint, (spr, SPARSE_OPERATION_NON_TRANSPOSE, descr, 1 + trial));
219: PetscCallMKLSparse(mkl_sparse_set_memory_hint, (spr, SPARSE_MEMORY_AGGRESSIVE));
220: PetscCallMKLSparse(mkl_sparse_optimize, (spr));
221: }
222: PetscCall(MatGetType(A, &Atype));
223: PetscCall(MatGetType(C, &Ctype));
224: PetscCall(MatGetSize(A, &AM, &AN));
225: PetscCall(MatGetSize(C, &CM, &CN));
227: if (!maij || N[k] > 1) {
228: PetscCall(PetscSNPrintf(stage_s, sizeof(stage_s), "type_%s-bs_%" PetscInt_FMT "-N_%02d", type[i], bs[j], (int)N[k]));
229: PetscCall(PetscLogStageRegister(stage_s, &stage));
230: }
231: if (trans && N[k] > 1) {
232: PetscCall(PetscSNPrintf(stage_s, sizeof(stage_s), "trans_type_%s-bs_%" PetscInt_FMT "-N_%02d", type[i], bs[j], (int)N[k]));
233: PetscCall(PetscLogStageRegister(stage_s, &tstage));
234: }
235: /* A*B */
236: if (N[k] > 1) {
237: if (!maij) {
238: PetscCall(MatProductCreateWithMat(A, C, NULL, D));
239: PetscCall(MatProductSetType(D, MATPRODUCT_AB));
240: PetscCall(MatProductSetFromOptions(D));
241: PetscCall(MatProductSymbolic(D));
242: }
244: if (!mkl) {
245: if (!maij) {
246: PetscCall(MatProductNumeric(D));
247: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking MatProduct %s: with A %s %" PetscInt_FMT "x%" PetscInt_FMT " and B %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", MatProductTypes[MATPRODUCT_AB], Atype, AM, AN, Ctype, CM, CN));
248: PetscCall(PetscLogStagePush(stage));
249: for (t = 0; t < trial; ++t) PetscCall(MatProductNumeric(D));
250: PetscCall(PetscLogStagePop());
251: } else {
252: Mat E, Ct, Dt;
253: Vec cC, cD;
254: const PetscScalar *c_ptr;
255: PetscScalar *d_ptr;
256: PetscCall(MatCreateMAIJ(A, N[k], &E));
257: PetscCall(MatDenseGetLocalMatrix(C, &Ct));
258: PetscCall(MatDenseGetLocalMatrix(D, &Dt));
259: PetscCall(MatTranspose(Ct, MAT_INPLACE_MATRIX, &Ct));
260: PetscCall(MatTranspose(Dt, MAT_INPLACE_MATRIX, &Dt));
261: PetscCall(MatDenseGetArrayRead(Ct, &c_ptr));
262: PetscCall(MatDenseGetArrayWrite(Dt, &d_ptr));
263: PetscCall(VecCreateMPIWithArray(PETSC_COMM_WORLD, 1, AM * N[k], PETSC_DECIDE, c_ptr, &cC));
264: PetscCall(VecCreateMPIWithArray(PETSC_COMM_WORLD, 1, AM * N[k], PETSC_DECIDE, d_ptr, &cD));
265: PetscCall(MatMult(E, cC, cD));
266: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking MatMult: with A %s %" PetscInt_FMT "x%" PetscInt_FMT " and B %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", MATMAIJ, AM, AN, VECMPI, AM * N[k], 1));
267: PetscCall(PetscLogStagePush(stage));
268: for (t = 0; t < trial; ++t) PetscCall(MatMult(E, cC, cD));
269: PetscCall(PetscLogStagePop());
270: PetscCall(VecDestroy(&cD));
271: PetscCall(VecDestroy(&cC));
272: PetscCall(MatDestroy(&E));
273: PetscCall(MatDenseRestoreArrayWrite(Dt, &d_ptr));
274: PetscCall(MatDenseRestoreArrayRead(Ct, &c_ptr));
275: PetscCall(MatTranspose(Ct, MAT_INPLACE_MATRIX, &Ct));
276: PetscCall(MatTranspose(Dt, MAT_INPLACE_MATRIX, &Dt));
277: }
278: } else {
279: const PetscScalar *c_ptr;
280: PetscScalar *d_ptr;
282: PetscCall(MatDenseGetArrayRead(C, &c_ptr));
283: PetscCall(MatDenseGetArrayWrite(D, &d_ptr));
284: PetscCallMKLSparse(mkl_sparse_d_mm, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, SPARSE_LAYOUT_COLUMN_MAJOR, c_ptr, CN, CM, 0.0, d_ptr, CM));
285: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking mkl_sparse_d_mm (COLUMN_MAJOR): with A %s %" PetscInt_FMT "x%" PetscInt_FMT " and B %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", Atype, AM, AN, Ctype, CM, CN));
286: PetscCall(PetscLogStagePush(stage));
287: for (t = 0; t < trial; ++t) PetscCallMKLSparse(mkl_sparse_d_mm, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, SPARSE_LAYOUT_COLUMN_MAJOR, c_ptr, CN, CM, 0.0, d_ptr, CM));
288: PetscCall(PetscLogStagePop());
289: PetscCall(MatDenseRestoreArrayWrite(D, &d_ptr));
290: PetscCall(MatDenseRestoreArrayRead(C, &c_ptr));
291: }
292: } else if (maij) {
293: PetscCall(MatDestroy(&C));
294: PetscCall(MatDestroy(&D));
295: continue;
296: } else if (!mkl) {
297: Vec cC, cD;
299: PetscCall(MatDenseGetColumnVecRead(C, 0, &cC));
300: PetscCall(MatDenseGetColumnVecWrite(D, 0, &cD));
301: PetscCall(MatMult(A, cC, cD));
302: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking MatMult: with A %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", Atype, AM, AN));
303: PetscCall(PetscLogStagePush(stage));
304: for (t = 0; t < trial; ++t) PetscCall(MatMult(A, cC, cD));
305: PetscCall(PetscLogStagePop());
306: PetscCall(MatDenseRestoreColumnVecRead(C, 0, &cC));
307: PetscCall(MatDenseRestoreColumnVecWrite(D, 0, &cD));
308: } else {
309: const PetscScalar *c_ptr;
310: PetscScalar *d_ptr;
312: PetscCall(MatDenseGetArrayRead(C, &c_ptr));
313: PetscCall(MatDenseGetArrayWrite(D, &d_ptr));
314: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking mkl_sparse_d_mv: with A %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", Atype, AM, AN));
315: PetscCallMKLSparse(mkl_sparse_d_mv, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, c_ptr, 0.0, d_ptr));
316: PetscCall(PetscLogStagePush(stage));
317: for (t = 0; t < trial; ++t) PetscCallMKLSparse(mkl_sparse_d_mv, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, c_ptr, 0.0, d_ptr));
318: PetscCall(PetscLogStagePop());
319: PetscCall(MatDenseRestoreArrayWrite(D, &d_ptr));
320: PetscCall(MatDenseRestoreArrayRead(C, &c_ptr));
321: }
323: if (check) {
324: PetscCall(MatMatMultEqual(A, C, D, 10, &flg));
325: if (!flg) {
326: MatType Dtype;
328: PetscCall(MatGetType(D, &Dtype));
329: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Error with A %s%s, C %s, D %s, Nk %" PetscInt_FMT "\n", Atype, mkl ? "mkl" : "", Ctype, Dtype, N[k]));
330: }
331: }
333: /* MKL implementation seems buggy for ABt */
334: /* A*Bt */
335: if (!mkl && trans && N[k] > 1) {
336: PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &flg, MATSEQAIJ, MATMPIAIJ, ""));
337: if (flg) {
338: PetscCall(MatTranspose(C, MAT_INPLACE_MATRIX, &C));
339: PetscCall(MatGetType(C, &Ctype));
340: if (!mkl) {
341: PetscCall(MatProductCreateWithMat(A, C, NULL, D));
342: PetscCall(MatProductSetType(D, MATPRODUCT_ABt));
343: PetscCall(MatProductSetFromOptions(D));
344: PetscCall(MatProductSymbolic(D));
345: PetscCall(MatProductNumeric(D));
346: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking MatProduct %s: with A %s %" PetscInt_FMT "x%" PetscInt_FMT " and Bt %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", MatProductTypes[MATPRODUCT_ABt], Atype, AM, AN, Ctype, CM, CN));
347: PetscCall(PetscLogStagePush(tstage));
348: for (t = 0; t < trial; ++t) PetscCall(MatProductNumeric(D));
349: PetscCall(PetscLogStagePop());
350: } else {
351: const PetscScalar *c_ptr;
352: PetscScalar *d_ptr;
354: PetscCallMKLSparse(mkl_sparse_set_mm_hint, (spr, SPARSE_OPERATION_NON_TRANSPOSE, descr, SPARSE_LAYOUT_ROW_MAJOR, N[k], 1 + trial));
355: PetscCallMKLSparse(mkl_sparse_optimize, (spr));
356: PetscCall(MatDenseGetArrayRead(C, &c_ptr));
357: PetscCall(MatDenseGetArrayWrite(D, &d_ptr));
358: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Benchmarking mkl_sparse_d_mm (ROW_MAJOR): with A %s %" PetscInt_FMT "x%" PetscInt_FMT " and B %s %" PetscInt_FMT "x%" PetscInt_FMT "\n", Atype, AM, AN, Ctype, CM, CN));
359: PetscCallMKLSparse(mkl_sparse_d_mm, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, SPARSE_LAYOUT_ROW_MAJOR, c_ptr, CN, CM, 0.0, d_ptr, CM));
360: PetscCall(PetscLogStagePush(stage));
361: for (t = 0; t < trial; ++t) PetscCallMKLSparse(mkl_sparse_d_mm, (SPARSE_OPERATION_NON_TRANSPOSE, 1.0, spr, descr, SPARSE_LAYOUT_ROW_MAJOR, c_ptr, CN, CM, 0.0, d_ptr, CM));
362: PetscCall(PetscLogStagePop());
363: PetscCall(MatDenseRestoreArrayWrite(D, &d_ptr));
364: PetscCall(MatDenseRestoreArrayRead(C, &c_ptr));
365: }
366: }
367: }
369: if (!mkl && trans && N[k] > 1 && flg && check) {
370: PetscCall(MatMatTransposeMultEqual(A, C, D, 10, &flg));
371: if (!flg) {
372: MatType Dtype;
373: PetscCall(MatGetType(D, &Dtype));
374: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Error with A %s%s, C %s, D %s, Nk %" PetscInt_FMT "\n", Atype, mkl ? "mkl" : "", Ctype, Dtype, N[k]));
375: }
376: }
377: PetscCall(MatDestroy(&C));
378: PetscCall(MatDestroy(&D));
379: }
380: if (mkl) {
381: PetscCallMKLSparse(mkl_sparse_destroy, (spr));
382: PetscCall(PetscFree(ia_ptr));
383: PetscCall(PetscFree(ja_ptr));
384: }
385: if (cuda && i != ntype - 1) {
386: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "AIJCUSPARSE must be last, otherwise MatConvert() to another MatType is too slow\n"));
387: break;
388: }
389: }
390: if (E != A) PetscCall(MatDestroy(&E));
391: PetscCall(MatDestroy(&A));
392: }
393: for (m = 0; m < ntype; ++m) PetscCall(PetscFree(type[m]));
394: PetscCall(PetscFinalize());
395: return 0;
396: }
398: /*TEST
399: build:
400: requires: double !complex !defined(PETSC_USE_64BIT_INDICES)
402: testset:
403: nsize: 1
404: filter: sed "/Benchmarking/d"
405: args: -f ${wPETSC_DIR}/share/petsc/datafiles/matrices/spd-real-int32-float64 -bs 1,2,3 -N 1,2,18 -check -trans -convert_aij {{false true}shared output}
406: test:
407: suffix: basic
408: args: -type aij,sbaij,baij
409: output_file: output/ex237.out
410: test:
411: suffix: maij
412: args: -type aij,maij
413: output_file: output/ex237.out
414: test:
415: suffix: cuda
416: requires: cuda
417: args: -type aij,aijcusparse
418: output_file: output/ex237.out
419: test:
420: suffix: mkl
421: requires: mkl_sparse_optimize
422: args: -type aij,aijmkl,baijmkl,sbaijmkl
423: output_file: output/ex237.out
425: TEST*/