Actual source code: cusparsematimpl.h

  1: #pragma once

  3: #include <petscpkg_version.h>
  4: #include <../src/vec/vec/impls/seq/cupm/vecseqcupm.hpp>
  5: #include <../src/sys/objects/device/impls/cupm/cupmthrustutility.hpp>
  6: #include <petsc/private/petsclegacycupmblas.h>

  8: #include <algorithm>
  9: #include <vector>

 11: #include <thrust/device_vector.h>
 12: #include <thrust/device_ptr.h>
 13: #include <thrust/device_malloc_allocator.h>
 14: #include <thrust/transform.h>
 15: #include <thrust/functional.h>
 16: #include <thrust/sequence.h>
 17: #include <thrust/system/system_error.h>

 19: #if defined(PETSC_USE_COMPLEX)
 20:   #if defined(PETSC_USE_REAL_SINGLE)
 21: const cuComplex PETSC_CUSPARSE_ONE  = {1.0f, 0.0f};
 22: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
 23:     #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i)  cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
 24:     #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
 25:     #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j)          cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
 26:     #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i)   cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
 27:     #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j)  cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
 28:     #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j)           cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
 29:   #elif defined(PETSC_USE_REAL_DOUBLE)
 30: const cuDoubleComplex PETSC_CUSPARSE_ONE  = {1.0, 0.0};
 31: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
 32:     #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i)  cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
 33:     #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
 34:     #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j)          cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
 35:     #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i)   cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
 36:     #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j)  cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
 37:     #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j)           cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
 38:   #endif
 39: #else
 40: const PetscScalar PETSC_CUSPARSE_ONE  = 1.0;
 41: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
 42:   #if defined(PETSC_USE_REAL_SINGLE)
 43:     #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize
 44:     #define cusparseXcsrilu02_analysis   cusparseScsrilu02_analysis
 45:     #define cusparseXcsrilu02            cusparseScsrilu02
 46:     #define cusparseXcsric02_bufferSize  cusparseScsric02_bufferSize
 47:     #define cusparseXcsric02_analysis    cusparseScsric02_analysis
 48:     #define cusparseXcsric02             cusparseScsric02
 49:   #elif defined(PETSC_USE_REAL_DOUBLE)
 50:     #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize
 51:     #define cusparseXcsrilu02_analysis   cusparseDcsrilu02_analysis
 52:     #define cusparseXcsrilu02            cusparseDcsrilu02
 53:     #define cusparseXcsric02_bufferSize  cusparseDcsric02_bufferSize
 54:     #define cusparseXcsric02_analysis    cusparseDcsric02_analysis
 55:     #define cusparseXcsric02             cusparseDcsric02
 56:   #endif
 57: #endif

 59: #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
 60:   #define csrsvInfo_t              csrsv2Info_t
 61:   #define cusparseCreateCsrsvInfo  cusparseCreateCsrsv2Info
 62:   #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info
 63:   #if defined(PETSC_USE_COMPLEX)
 64:     #if defined(PETSC_USE_REAL_SINGLE)
 65:       #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j)          cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j)
 66:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k)       cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k)
 67:       #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n)
 68:     #elif defined(PETSC_USE_REAL_DOUBLE)
 69:       #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j)          cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j)
 70:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k)       cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k)
 71:       #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n)
 72:     #endif
 73:   #else /* not complex */
 74:     #if defined(PETSC_USE_REAL_SINGLE)
 75:       #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize
 76:       #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis
 77:       #define cusparseXcsrsv_solve    cusparseScsrsv2_solve
 78:     #elif defined(PETSC_USE_REAL_DOUBLE)
 79:       #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize
 80:       #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis
 81:       #define cusparseXcsrsv_solve    cusparseDcsrsv2_solve
 82:     #endif
 83:   #endif
 84: #else /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
 85:   #define csrsvInfo_t              cusparseSolveAnalysisInfo_t
 86:   #define cusparseCreateCsrsvInfo  cusparseCreateSolveAnalysisInfo
 87:   #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo
 88:   #if defined(PETSC_USE_COMPLEX)
 89:     #if defined(PETSC_USE_REAL_SINGLE)
 90:       #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(e), (f), (cuComplex *)(g), (h), (i), (j), (cuComplex *)(k), (cuComplex *)(l))
 91:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED)               cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i))
 92:     #elif defined(PETSC_USE_REAL_DOUBLE)
 93:       #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) \
 94:         cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(e), (f), (cuDoubleComplex *)(g), (h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l))
 95:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i))
 96:     #endif
 97:   #else /* not complex */
 98:     #if defined(PETSC_USE_REAL_SINGLE)
 99:       #define cusparseXcsrsv_solve                                     cusparseScsrsv_solve
100:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseScsrsv_analysis(a, b, c, d, e, f, g, h, i)
101:     #elif defined(PETSC_USE_REAL_DOUBLE)
102:       #define cusparseXcsrsv_solve                                     cusparseDcsrsv_solve
103:       #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseDcsrsv_analysis(a, b, c, d, e, f, g, h, i)
104:     #endif
105:   #endif
106: #endif /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */

108: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
109:   #define cusparse_csr2csc cusparseCsr2cscEx2
110:   #if defined(PETSC_USE_COMPLEX)
111:     #if defined(PETSC_USE_REAL_SINGLE)
112:       #define cusparse_scalartype                                                             CUDA_C_32F
113:       #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
114:       #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
115:         cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
116:     #elif defined(PETSC_USE_REAL_DOUBLE)
117:       #define cusparse_scalartype CUDA_C_64F
118:       #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
119:         cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
120:       #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
121:         cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
122:     #endif
123:   #else /* not complex */
124:     #if defined(PETSC_USE_REAL_SINGLE)
125:       #define cusparse_scalartype            CUDA_R_32F
126:       #define cusparse_csr_spgeam            cusparseScsrgeam2
127:       #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
128:     #elif defined(PETSC_USE_REAL_DOUBLE)
129:       #define cusparse_scalartype            CUDA_R_64F
130:       #define cusparse_csr_spgeam            cusparseDcsrgeam2
131:       #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
132:     #endif
133:   #endif
134: #else /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
135:   #if defined(PETSC_USE_COMPLEX)
136:     #if defined(PETSC_USE_REAL_SINGLE)
137:       #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m)                        cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m))
138:       #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p)               cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p))
139:       #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l)                            cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l))
140:       #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h)                                       cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h))
141:       #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j)                                  cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j))
142:       #define cusparse_hyb2csr(a, b, c, d, e, f)                                              cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f))
143:       #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t)
144:       #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s)    cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s)
145:     #elif defined(PETSC_USE_REAL_DOUBLE)
146:       #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m))
147:       #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
148:         cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p))
149:       #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l)                            cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l))
150:       #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h)                                       cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h))
151:       #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j)                                  cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j))
152:       #define cusparse_hyb2csr(a, b, c, d, e, f)                                              cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f))
153:       #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t)
154:       #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \
155:         cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s)
156:     #endif
157:   #else
158:     #if defined(PETSC_USE_REAL_SINGLE)
159:       #define cusparse_csr_spmv   cusparseScsrmv
160:       #define cusparse_csr_spmm   cusparseScsrmm
161:       #define cusparse_csr2csc    cusparseScsr2csc
162:       #define cusparse_hyb_spmv   cusparseShybmv
163:       #define cusparse_csr2hyb    cusparseScsr2hyb
164:       #define cusparse_hyb2csr    cusparseShyb2csr
165:       #define cusparse_csr_spgemm cusparseScsrgemm
166:       #define cusparse_csr_spgeam cusparseScsrgeam
167:     #elif defined(PETSC_USE_REAL_DOUBLE)
168:       #define cusparse_csr_spmv   cusparseDcsrmv
169:       #define cusparse_csr_spmm   cusparseDcsrmm
170:       #define cusparse_csr2csc    cusparseDcsr2csc
171:       #define cusparse_hyb_spmv   cusparseDhybmv
172:       #define cusparse_csr2hyb    cusparseDcsr2hyb
173:       #define cusparse_hyb2csr    cusparseDhyb2csr
174:       #define cusparse_csr_spgemm cusparseDcsrgemm
175:       #define cusparse_csr_spgeam cusparseDcsrgeam
176:     #endif
177:   #endif
178: #endif /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */

180: #define THRUSTINTARRAY32 thrust::device_vector<int>
181: #define THRUSTINTARRAY   thrust::device_vector<PetscInt>
182: #define THRUSTARRAY      thrust::device_vector<PetscScalar>

184: /* A CSR matrix nonzero structure */
185: struct CsrMatrix {
186:   PetscInt          num_rows;
187:   PetscInt          num_cols;
188:   PetscInt          num_entries;
189:   THRUSTINTARRAY32 *row_offsets;
190:   THRUSTINTARRAY32 *column_indices;
191:   THRUSTARRAY      *values;
192: };

194: /* This is struct holding the relevant data needed to a MatSolve */
195: struct Mat_SeqAIJCUSPARSETriFactorStruct {
196:   /* Data needed for triangular solve */
197:   cusparseMatDescr_t  descr;
198:   cusparseOperation_t solveOp;
199:   CsrMatrix          *csrMat;
200: #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
201:   csrsvInfo_t           solveInfo;
202:   cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
203: #endif
204:   int          solveBufferSize;
205:   void        *solveBuffer;
206:   size_t       csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
207:   void        *csr2cscBuffer;
208:   PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
209: };

211: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
212: struct Mat_SeqAIJCUSPARSETriFactors {
213: #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
214:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr;          /* pointer for lower triangular (factored matrix) on GPU */
215:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr;          /* pointer for upper triangular (factored matrix) on GPU */
216:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
217:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
218: #endif

220:   THRUSTINTARRAY  *rpermIndices; /* indices used for any reordering */
221:   THRUSTINTARRAY  *cpermIndices; /* indices used for any reordering */
222:   THRUSTARRAY     *workVector;
223:   cusparseHandle_t handle; /* a handle to the cusparse library */
224:   PetscInt         nnz;    /* number of nonzeros ... need this for accurate logging between ICC and ILU */
225:   cudaDeviceProp   dev_prop;
226:   PetscBool        init_dev_prop;

228: #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
229:   /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV,
230:      which first appeared in cusparse-11.5 with cuda-11.3.
231:   */
232:   PetscScalar *csrVal, *diag;             // the diagonal D in UtDU of Cholesky
233:   int         *csrRowPtr32, *csrColIdx32; // i,j of M. cusparseScsrilu02/ic02() etc require 32-bit indices

235:   PetscInt    *csrRowPtr, *csrColIdx; // i, j of M on device for CUDA APIs that support 64-bit indices
236:   PetscScalar *csrVal_h, *diag_h;     // Since LU is done on host, we prepare a factored matrix in regular csr format on host and then copy it to device
237:   PetscInt    *csrRowPtr_h;           // csrColIdx_h is temporary, so it is not here

239:   /* Mixed mat descriptor types? yes, different cusparse APIs use different types */
240:   cusparseMatDescr_t   matDescr_M;
241:   cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U;
242:   cusparseSpSVDescr_t  spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut;

244:   cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y;
245:   PetscScalar         *X, *Y; /* data array of dnVec X and Y */

247:   /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */
248:   int    factBufferSize_M; /* M ~= LU or LLt */
249:   size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut;
250:   /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut.
251:      So save memory, we share the factorization buffer with one of spsvBuffer_L/U.
252:   */
253:   void *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut;

255:   csrilu02Info_t        ilu0Info_M;
256:   csric02Info_t         ic0Info_M;
257:   int                   structural_zero, numerical_zero;
258:   cusparseSolvePolicy_t policy_M;

260:   /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */
261:   PetscBool createdTransposeSpSVDescr;    /* Have we created SpSV descriptors for Lt, Ut? */
262:   PetscBool updatedTransposeSpSVAnalysis; /* Have we ever updated (done) SpSV analysis for Lt, Ut */
263:   PetscBool updatedSpSVAnalysis;          /* Have we ever updated (done) SpSV Analysis for L, U? */

265:   PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */
266: #endif
267: };

269: struct Mat_CusparseSpMV {
270:   PetscBool initialized;    /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
271:   size_t    spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
272:   void     *spmvBuffer;
273: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)      /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
274:   cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
275: #endif
276: };

278: /* This is struct holding the relevant data needed to a MatMult */
279: struct Mat_SeqAIJCUSPARSEMultStruct {
280:   void              *mat;          /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
281:   cusparseMatDescr_t descr;        /* Data needed to describe the matrix for a multiply */
282:   THRUSTINTARRAY    *cprowIndices; /* compressed row indices used in the parallel SpMV */
283:   PetscScalar       *alpha_one;    /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
284:   PetscScalar       *beta_zero;    /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
285:   PetscScalar       *beta_one;     /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
286: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
287:   cusparseSpMatDescr_t matDescr;          /* descriptor for the matrix, used by SpMV and SpMM */
288:   #if PETSC_PKG_CUDA_VERSION_GE(12, 4, 0) // tested up to 12.6.0
289:   cusparseSpMatDescr_t matDescr_SpMV[3];  // Use separate MatDescr for opA's, to workaround cusparse bugs after 12.4, see https://github.com/NVIDIA/CUDALibrarySamples/issues/212,
290:   cusparseSpMatDescr_t matDescr_SpMM[3];  // and known issues https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-release-12-6
291:   #endif
292:   Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
293:   Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL)
294:   {
295:     for (int i = 0; i < 3; i++) {
296:       cuSpMV[i].initialized = PETSC_FALSE;
297:   #if PETSC_PKG_CUDA_VERSION_GE(12, 4, 0)
298:       matDescr_SpMV[i] = NULL;
299:       matDescr_SpMM[i] = NULL;
300:   #endif
301:     }
302:   }
303: #endif
304: };

306: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
307: struct Mat_SeqAIJCUSPARSE {
308:   Mat_SeqAIJCUSPARSEMultStruct *mat;            /* pointer to the matrix on the GPU */
309:   Mat_SeqAIJCUSPARSEMultStruct *matTranspose;   /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
310:   THRUSTARRAY                  *workVector;     /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
311:   THRUSTINTARRAY32             *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
312:   PetscInt                      nrows;          /* number of rows of the matrix seen by GPU */
313:   MatCUSPARSEStorageFormat      format;         /* the storage format for the matrix on the device */
314:   PetscBool                     use_cpu_solve;  /* Use AIJ_Seq (I)LU solve */
315:   cudaStream_t                  stream;         /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
316:   cusparseHandle_t              handle;         /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
317:   PetscObjectState              nonzerostate;   /* track nonzero state to possibly recreate the GPU matrix */
318: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
319:   size_t               csr2cscBufferSize; /* stuff used to compute the matTranspose above */
320:   void                *csr2cscBuffer;     /* This is used as a C struct and is calloc'ed by PetscNew() */
321:   cusparseCsr2CscAlg_t csr2cscAlg;        /* algorithms can be selected from command line options */
322:   cusparseSpMVAlg_t    spmvAlg;
323:   cusparseSpMMAlg_t    spmmAlg;
324: #endif
325:   THRUSTINTARRAY *csr2csc_i;
326:   THRUSTINTARRAY *coords; /* permutation array used in MatSeqAIJCUSPARSEMergeMats */
327: };

329: typedef struct Mat_SeqAIJCUSPARSETriFactors *Mat_SeqAIJCUSPARSETriFactors_p;

331: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
332: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *);
333: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *);

335: using VecSeq_CUDA = Petsc::vec::cupm::impl::VecSeq_CUPM<Petsc::device::cupm::DeviceType::CUDA>;

337: static inline bool isCudaMem(const void *data)
338: {
339:   using namespace Petsc::device::cupm;
340:   auto mtype = PETSC_MEMTYPE_HOST;

342:   PetscFunctionBegin;
343:   PetscCallAbort(PETSC_COMM_SELF, impl::Interface<DeviceType::CUDA>::PetscCUPMGetMemType(data, &mtype));
344:   PetscFunctionReturn(PetscMemTypeDevice(mtype));
345: }