Actual source code: aijviennacl.cxx


  2: /*
  3:     Defines the basic matrix operations for the AIJ (compressed row)
  4:   matrix storage format.
  5: */

  7: #include <petscconf.h>
  8: #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
  9: #include <../src/mat/impls/aij/seq/aij.h>
 10: #include <petscbt.h>
 11: #include <../src/vec/vec/impls/dvecimpl.h>
 12: #include <petsc/private/vecimpl.h>

 14: #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>

 16: #include <algorithm>
 17: #include <vector>
 18: #include <string>

 20: #include "viennacl/linalg/prod.hpp"

 22: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJViennaCL(Mat A, MatType type, MatReuse reuse, Mat *newmat);
 23: PETSC_INTERN PetscErrorCode MatGetFactor_seqaij_petsc(Mat, MatFactorType, Mat *);
 24: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);

 26: PetscErrorCode MatViennaCLCopyToGPU(Mat A)
 27: {
 28:   Mat_SeqAIJViennaCL *viennaclstruct = (Mat_SeqAIJViennaCL *)A->spptr;
 29:   Mat_SeqAIJ         *a              = (Mat_SeqAIJ *)A->data;

 31:   if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) { //some OpenCL SDKs have issues with buffers of size 0
 32:     if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
 33:       PetscLogEventBegin(MAT_ViennaCLCopyToGPU, A, 0, 0, 0);

 35:       try {
 36:         if (a->compressedrow.use) {
 37:           if (!viennaclstruct->compressed_mat) viennaclstruct->compressed_mat = new ViennaCLCompressedAIJMatrix();

 39:           // Since PetscInt is different from cl_uint, we have to convert:
 40:           viennacl::backend::mem_handle dummy;

 42:           viennacl::backend::typesafe_host_array<unsigned int> row_buffer;
 43:           row_buffer.raw_resize(dummy, a->compressedrow.nrows + 1);
 44:           for (PetscInt i = 0; i <= a->compressedrow.nrows; ++i) row_buffer.set(i, (a->compressedrow.i)[i]);

 46:           viennacl::backend::typesafe_host_array<unsigned int> row_indices;
 47:           row_indices.raw_resize(dummy, a->compressedrow.nrows);
 48:           for (PetscInt i = 0; i < a->compressedrow.nrows; ++i) row_indices.set(i, (a->compressedrow.rindex)[i]);

 50:           viennacl::backend::typesafe_host_array<unsigned int> col_buffer;
 51:           col_buffer.raw_resize(dummy, a->nz);
 52:           for (PetscInt i = 0; i < a->nz; ++i) col_buffer.set(i, (a->j)[i]);

 54:           viennaclstruct->compressed_mat->set(row_buffer.get(), row_indices.get(), col_buffer.get(), a->a, A->rmap->n, A->cmap->n, a->compressedrow.nrows, a->nz);
 55:           PetscLogCpuToGpu(((2 * a->compressedrow.nrows) + 1 + a->nz) * sizeof(PetscInt) + (a->nz) * sizeof(PetscScalar));
 56:         } else {
 57:           if (!viennaclstruct->mat) viennaclstruct->mat = new ViennaCLAIJMatrix();

 59:           // Since PetscInt is in general different from cl_uint, we have to convert:
 60:           viennacl::backend::mem_handle dummy;

 62:           viennacl::backend::typesafe_host_array<unsigned int> row_buffer;
 63:           row_buffer.raw_resize(dummy, A->rmap->n + 1);
 64:           for (PetscInt i = 0; i <= A->rmap->n; ++i) row_buffer.set(i, (a->i)[i]);

 66:           viennacl::backend::typesafe_host_array<unsigned int> col_buffer;
 67:           col_buffer.raw_resize(dummy, a->nz);
 68:           for (PetscInt i = 0; i < a->nz; ++i) col_buffer.set(i, (a->j)[i]);

 70:           viennaclstruct->mat->set(row_buffer.get(), col_buffer.get(), a->a, A->rmap->n, A->cmap->n, a->nz);
 71:           PetscLogCpuToGpu(((A->rmap->n + 1) + a->nz) * sizeof(PetscInt) + (a->nz) * sizeof(PetscScalar));
 72:         }
 73:         ViennaCLWaitForGPU();
 74:       } catch (std::exception const &ex) {
 75:         SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "ViennaCL error: %s", ex.what());
 76:       }

 78:       // Create temporary vector for v += A*x:
 79:       if (viennaclstruct->tempvec) {
 80:         if (viennaclstruct->tempvec->size() != static_cast<std::size_t>(A->rmap->n)) {
 81:           delete (ViennaCLVector *)viennaclstruct->tempvec;
 82:           viennaclstruct->tempvec = new ViennaCLVector(A->rmap->n);
 83:         } else {
 84:           viennaclstruct->tempvec->clear();
 85:         }
 86:       } else {
 87:         viennaclstruct->tempvec = new ViennaCLVector(A->rmap->n);
 88:       }

 90:       A->offloadmask = PETSC_OFFLOAD_BOTH;

 92:       PetscLogEventEnd(MAT_ViennaCLCopyToGPU, A, 0, 0, 0);
 93:     }
 94:   }
 95:   return 0;
 96: }

 98: PetscErrorCode MatViennaCLCopyFromGPU(Mat A, const ViennaCLAIJMatrix *Agpu)
 99: {
100:   Mat_SeqAIJViennaCL *viennaclstruct = (Mat_SeqAIJViennaCL *)A->spptr;
101:   Mat_SeqAIJ         *a              = (Mat_SeqAIJ *)A->data;
102:   PetscInt            m              = A->rmap->n;

104:   if (A->offloadmask == PETSC_OFFLOAD_BOTH) return 0;
105:   if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED && Agpu) {
106:     try {
109:       a->nz           = Agpu->nnz();
110:       a->maxnz        = a->nz; /* Since we allocate exactly the right amount */
111:       A->preallocated = PETSC_TRUE;
112:       if (a->singlemalloc) {
113:         if (a->a) PetscFree3(a->a, a->j, a->i);
114:       } else {
115:         if (a->i) PetscFree(a->i);
116:         if (a->j) PetscFree(a->j);
117:         if (a->a) PetscFree(a->a);
118:       }
119:       PetscMalloc3(a->nz, &a->a, a->nz, &a->j, m + 1, &a->i);

121:       a->singlemalloc = PETSC_TRUE;

123:       /* Setup row lengths */
124:       PetscFree(a->imax);
125:       PetscFree(a->ilen);
126:       PetscMalloc1(m, &a->imax);
127:       PetscMalloc1(m, &a->ilen);

129:       /* Copy data back from GPU */
130:       viennacl::backend::typesafe_host_array<unsigned int> row_buffer;
131:       row_buffer.raw_resize(Agpu->handle1(), Agpu->size1() + 1);

133:       // copy row array
134:       viennacl::backend::memory_read(Agpu->handle1(), 0, row_buffer.raw_size(), row_buffer.get());
135:       (a->i)[0] = row_buffer[0];
136:       for (PetscInt i = 0; i < (PetscInt)Agpu->size1(); ++i) {
137:         (a->i)[i + 1] = row_buffer[i + 1];
138:         a->imax[i] = a->ilen[i] = a->i[i + 1] - a->i[i]; //Set imax[] and ilen[] arrays at the same time as i[] for better cache reuse
139:       }

141:       // copy column indices
142:       viennacl::backend::typesafe_host_array<unsigned int> col_buffer;
143:       col_buffer.raw_resize(Agpu->handle2(), Agpu->nnz());
144:       viennacl::backend::memory_read(Agpu->handle2(), 0, col_buffer.raw_size(), col_buffer.get());
145:       for (PetscInt i = 0; i < (PetscInt)Agpu->nnz(); ++i) (a->j)[i] = col_buffer[i];

147:       // copy nonzero entries directly to destination (no conversion required)
148:       viennacl::backend::memory_read(Agpu->handle(), 0, sizeof(PetscScalar) * Agpu->nnz(), a->a);

150:       PetscLogGpuToCpu(row_buffer.raw_size() + col_buffer.raw_size() + (Agpu->nnz() * sizeof(PetscScalar)));
151:       ViennaCLWaitForGPU();
152:       /* TODO: Once a->diag is moved out of MatAssemblyEnd(), invalidate it here. */
153:     } catch (std::exception const &ex) {
154:       SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "ViennaCL error: %s", ex.what());
155:     }
156:   } else if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED) {
157:     return 0;
158:   } else {
159:     if (!Agpu && A->offloadmask != PETSC_OFFLOAD_GPU) return 0;

162:     if (!Agpu) {
163:       viennacl::backend::memory_read(viennaclstruct->mat->handle(), 0, sizeof(PetscScalar) * viennaclstruct->mat->nnz(), a->a);
164:     } else {
165:       viennacl::backend::memory_read(Agpu->handle(), 0, sizeof(PetscScalar) * Agpu->nnz(), a->a);
166:     }
167:   }
168:   A->offloadmask = PETSC_OFFLOAD_BOTH;
169:   /* This assembly prevents resetting the flag to PETSC_OFFLOAD_CPU and recopying */
170:   MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
171:   MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
172:   return 0;
173: }

175: PetscErrorCode MatMult_SeqAIJViennaCL(Mat A, Vec xx, Vec yy)
176: {
177:   Mat_SeqAIJ           *a              = (Mat_SeqAIJ *)A->data;
178:   Mat_SeqAIJViennaCL   *viennaclstruct = (Mat_SeqAIJViennaCL *)A->spptr;
179:   const ViennaCLVector *xgpu           = NULL;
180:   ViennaCLVector       *ygpu           = NULL;

182:   /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
183:   MatViennaCLCopyToGPU(A);
184:   if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) {
185:     VecViennaCLGetArrayRead(xx, &xgpu);
186:     VecViennaCLGetArrayWrite(yy, &ygpu);
187:     PetscLogGpuTimeBegin();
188:     try {
189:       if (a->compressedrow.use) {
190:         *ygpu = viennacl::linalg::prod(*viennaclstruct->compressed_mat, *xgpu);
191:       } else {
192:         *ygpu = viennacl::linalg::prod(*viennaclstruct->mat, *xgpu);
193:       }
194:       ViennaCLWaitForGPU();
195:     } catch (std::exception const &ex) {
196:       SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "ViennaCL error: %s", ex.what());
197:     }
198:     PetscLogGpuTimeEnd();
199:     VecViennaCLRestoreArrayRead(xx, &xgpu);
200:     VecViennaCLRestoreArrayWrite(yy, &ygpu);
201:     PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt);
202:   } else {
203:     VecSet_SeqViennaCL(yy, 0);
204:   }
205:   return 0;
206: }

208: PetscErrorCode MatMultAdd_SeqAIJViennaCL(Mat A, Vec xx, Vec yy, Vec zz)
209: {
210:   Mat_SeqAIJ           *a              = (Mat_SeqAIJ *)A->data;
211:   Mat_SeqAIJViennaCL   *viennaclstruct = (Mat_SeqAIJViennaCL *)A->spptr;
212:   const ViennaCLVector *xgpu = NULL, *ygpu = NULL;
213:   ViennaCLVector       *zgpu = NULL;

215:   /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
216:   MatViennaCLCopyToGPU(A);
217:   if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) {
218:     try {
219:       VecViennaCLGetArrayRead(xx, &xgpu);
220:       VecViennaCLGetArrayRead(yy, &ygpu);
221:       VecViennaCLGetArrayWrite(zz, &zgpu);
222:       PetscLogGpuTimeBegin();
223:       if (a->compressedrow.use) *viennaclstruct->tempvec = viennacl::linalg::prod(*viennaclstruct->compressed_mat, *xgpu);
224:       else *viennaclstruct->tempvec = viennacl::linalg::prod(*viennaclstruct->mat, *xgpu);
225:       if (zz != yy) *zgpu = *ygpu + *viennaclstruct->tempvec;
226:       else *zgpu += *viennaclstruct->tempvec;
227:       ViennaCLWaitForGPU();
228:       PetscLogGpuTimeEnd();
229:       VecViennaCLRestoreArrayRead(xx, &xgpu);
230:       VecViennaCLRestoreArrayRead(yy, &ygpu);
231:       VecViennaCLRestoreArrayWrite(zz, &zgpu);

233:     } catch (std::exception const &ex) {
234:       SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "ViennaCL error: %s", ex.what());
235:     }
236:     PetscLogGpuFlops(2.0 * a->nz);
237:   } else {
238:     VecCopy_SeqViennaCL(yy, zz);
239:   }
240:   return 0;
241: }

243: PetscErrorCode MatAssemblyEnd_SeqAIJViennaCL(Mat A, MatAssemblyType mode)
244: {
245:   MatAssemblyEnd_SeqAIJ(A, mode);
246:   if (mode == MAT_FLUSH_ASSEMBLY) return 0;
247:   if (!A->boundtocpu) MatViennaCLCopyToGPU(A);
248:   return 0;
249: }

251: /* --------------------------------------------------------------------------------*/
252: /*@C
253:    MatCreateSeqAIJViennaCL - Creates a sparse matrix in `MATSEQAIJVIENNACL` (compressed row) format
254:    (the default parallel PETSc format).  This matrix will ultimately be pushed down
255:    to GPUs and use the ViennaCL library for calculations. For good matrix
256:    assembly performance the user should preallocate the matrix storage by setting
257:    the parameter nz (or the array nnz).  By setting these parameters accurately,
258:    performance during matrix assembly can be increased substantially.

260:    Collective

262:    Input Parameters:
263: +  comm - MPI communicator, set to `PETSC_COMM_SELF`
264: .  m - number of rows
265: .  n - number of columns
266: .  nz - number of nonzeros per row (same for all rows)
267: -  nnz - array containing the number of nonzeros in the various rows
268:          (possibly different for each row) or NULL

270:    Output Parameter:
271: .  A - the matrix

273:    It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
274:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
275:    [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`]

277:    Notes:
278:    If nnz is given then nz is ignored

280:    The AIJ format, also called
281:    compressed row storage, is fully compatible with standard Fortran 77
282:    storage.  That is, the stored row and column indices can begin at
283:    either one (as in Fortran) or zero.  See the users' manual for details.

285:    Specify the preallocated storage with either nz or nnz (not both).
286:    Set nz = `PETSC_DEFAULT` and nnz = NULL for PETSc to control dynamic memory
287:    allocation.  For large problems you MUST preallocate memory or you
288:    will get TERRIBLE performance, see the users' manual chapter on matrices.

290:    Level: intermediate

292: .seealso: `MATSEQAIJVIENNACL`, `MatCreate()`, `MatCreateAIJ()`, `MatCreateAIJCUSPARSE()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MatCreateAIJ()`
293: @*/
294: PetscErrorCode MatCreateSeqAIJViennaCL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A)
295: {
296:   MatCreate(comm, A);
297:   MatSetSizes(*A, m, n, m, n);
298:   MatSetType(*A, MATSEQAIJVIENNACL);
299:   MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz);
300:   return 0;
301: }

303: PetscErrorCode MatDestroy_SeqAIJViennaCL(Mat A)
304: {
305:   Mat_SeqAIJViennaCL *viennaclcontainer = (Mat_SeqAIJViennaCL *)A->spptr;

307:   try {
308:     if (viennaclcontainer) {
309:       delete viennaclcontainer->tempvec;
310:       delete viennaclcontainer->mat;
311:       delete viennaclcontainer->compressed_mat;
312:       delete viennaclcontainer;
313:     }
314:     A->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
315:   } catch (std::exception const &ex) {
316:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "ViennaCL error: %s", ex.what());
317:   }

319:   PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaij_seqaijviennacl_C", NULL);
320:   PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijviennacl_seqdense_C", NULL);
321:   PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijviennacl_seqaij_C", NULL);

323:   /* this next line is because MatDestroy tries to PetscFree spptr if it is not zero, and PetscFree only works if the memory was allocated with PetscNew or PetscMalloc, which don't call the constructor */
324:   A->spptr = 0;
325:   MatDestroy_SeqAIJ(A);
326:   return 0;
327: }

329: PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJViennaCL(Mat B)
330: {
331:   MatCreate_SeqAIJ(B);
332:   MatConvert_SeqAIJ_SeqAIJViennaCL(B, MATSEQAIJVIENNACL, MAT_INPLACE_MATRIX, &B);
333:   return 0;
334: }

336: static PetscErrorCode MatBindToCPU_SeqAIJViennaCL(Mat, PetscBool);
337: static PetscErrorCode MatDuplicate_SeqAIJViennaCL(Mat A, MatDuplicateOption cpvalues, Mat *B)
338: {
339:   Mat C;

341:   MatDuplicate_SeqAIJ(A, cpvalues, B);
342:   C = *B;

344:   MatBindToCPU_SeqAIJViennaCL(A, PETSC_FALSE);
345:   C->ops->bindtocpu = MatBindToCPU_SeqAIJViennaCL;

347:   C->spptr                                         = new Mat_SeqAIJViennaCL();
348:   ((Mat_SeqAIJViennaCL *)C->spptr)->tempvec        = NULL;
349:   ((Mat_SeqAIJViennaCL *)C->spptr)->mat            = NULL;
350:   ((Mat_SeqAIJViennaCL *)C->spptr)->compressed_mat = NULL;

352:   PetscObjectChangeTypeName((PetscObject)C, MATSEQAIJVIENNACL);

354:   C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;

356:   /* If the source matrix is already assembled, copy the destination matrix to the GPU */
357:   if (C->assembled) MatViennaCLCopyToGPU(C);
358:   return 0;
359: }

361: static PetscErrorCode MatSeqAIJGetArray_SeqAIJViennaCL(Mat A, PetscScalar *array[])
362: {
363:   MatViennaCLCopyFromGPU(A, (const ViennaCLAIJMatrix *)NULL);
364:   *array = ((Mat_SeqAIJ *)A->data)->a;
365:   return 0;
366: }

368: static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJViennaCL(Mat A, PetscScalar *array[])
369: {
370:   A->offloadmask = PETSC_OFFLOAD_CPU;
371:   *array         = NULL;
372:   return 0;
373: }

375: static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJViennaCL(Mat A, const PetscScalar *array[])
376: {
377:   MatViennaCLCopyFromGPU(A, (const ViennaCLAIJMatrix *)NULL);
378:   *array = ((Mat_SeqAIJ *)A->data)->a;
379:   return 0;
380: }

382: static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJViennaCL(Mat A, const PetscScalar *array[])
383: {
384:   *array = NULL;
385:   /* No A->offloadmask = PETSC_OFFLOAD_CPU since if A->offloadmask was PETSC_OFFLOAD_BOTH, it is still BOTH */
386:   return 0;
387: }

389: static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJViennaCL(Mat A, PetscScalar *array[])
390: {
391:   *array = ((Mat_SeqAIJ *)A->data)->a;
392:   return 0;
393: }

395: static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJViennaCL(Mat A, PetscScalar *array[])
396: {
397:   A->offloadmask = PETSC_OFFLOAD_CPU;
398:   *array         = NULL;
399:   return 0;
400: }

402: static PetscErrorCode MatBindToCPU_SeqAIJViennaCL(Mat A, PetscBool flg)
403: {
404:   Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;

406:   A->boundtocpu = flg;
407:   if (flg && a->inode.size) {
408:     a->inode.use = PETSC_TRUE;
409:   } else {
410:     a->inode.use = PETSC_FALSE;
411:   }
412:   if (flg) {
413:     /* make sure we have an up-to-date copy on the CPU */
414:     MatViennaCLCopyFromGPU(A, (const ViennaCLAIJMatrix *)NULL);
415:     A->ops->mult        = MatMult_SeqAIJ;
416:     A->ops->multadd     = MatMultAdd_SeqAIJ;
417:     A->ops->assemblyend = MatAssemblyEnd_SeqAIJ;
418:     A->ops->duplicate   = MatDuplicate_SeqAIJ;
419:     PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps));
420:   } else {
421:     A->ops->mult        = MatMult_SeqAIJViennaCL;
422:     A->ops->multadd     = MatMultAdd_SeqAIJViennaCL;
423:     A->ops->assemblyend = MatAssemblyEnd_SeqAIJViennaCL;
424:     A->ops->destroy     = MatDestroy_SeqAIJViennaCL;
425:     A->ops->duplicate   = MatDuplicate_SeqAIJViennaCL;

427:     a->ops->getarray          = MatSeqAIJGetArray_SeqAIJViennaCL;
428:     a->ops->restorearray      = MatSeqAIJRestoreArray_SeqAIJViennaCL;
429:     a->ops->getarrayread      = MatSeqAIJGetArrayRead_SeqAIJViennaCL;
430:     a->ops->restorearrayread  = MatSeqAIJRestoreArrayRead_SeqAIJViennaCL;
431:     a->ops->getarraywrite     = MatSeqAIJGetArrayWrite_SeqAIJViennaCL;
432:     a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJViennaCL;
433:   }
434:   return 0;
435: }

437: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJViennaCL(Mat A, MatType type, MatReuse reuse, Mat *newmat)
438: {
439:   Mat B;


443:   if (reuse == MAT_INITIAL_MATRIX) MatDuplicate(A, MAT_COPY_VALUES, newmat);

445:   B = *newmat;

447:   B->spptr = new Mat_SeqAIJViennaCL();

449:   ((Mat_SeqAIJViennaCL *)B->spptr)->tempvec        = NULL;
450:   ((Mat_SeqAIJViennaCL *)B->spptr)->mat            = NULL;
451:   ((Mat_SeqAIJViennaCL *)B->spptr)->compressed_mat = NULL;

453:   MatBindToCPU_SeqAIJViennaCL(A, PETSC_FALSE);
454:   A->ops->bindtocpu = MatBindToCPU_SeqAIJViennaCL;

456:   PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJVIENNACL);
457:   PetscFree(B->defaultvectype);
458:   PetscStrallocpy(VECVIENNACL, &B->defaultvectype);

460:   PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaij_seqaijviennacl_C", MatConvert_SeqAIJ_SeqAIJViennaCL);
461:   PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijviennacl_seqdense_C", MatProductSetFromOptions_SeqAIJ_SeqDense);
462:   PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijviennacl_seqaij_C", MatProductSetFromOptions_SeqAIJ);

464:   B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;

466:   /* If the source matrix is already assembled, copy the destination matrix to the GPU */
467:   if (B->assembled) MatViennaCLCopyToGPU(B);
468:   return 0;
469: }

471: /*MC
472:    MATSEQAIJVIENNACL - MATAIJVIENNACL = "aijviennacl" = "seqaijviennacl" - A matrix type to be used for sparse matrices.

474:    A matrix type type whose data resides on GPUs. These matrices are in CSR format by
475:    default. All matrix calculations are performed using the ViennaCL library.

477:    Options Database Keys:
478: +  -mat_type aijviennacl - sets the matrix type to `MATSEQAIJVIENNACL` during a call to `MatSetFromOptions()
479: .  -mat_viennacl_storage_format csr - sets the storage format of matrices for `MatMult()` during a call to `MatSetFromOptions()`.
480: -  -mat_viennacl_mult_storage_format csr - sets the storage format of matrices for `MatMult()` during a call to `MatSetFromOptions()`.

482:   Level: beginner

484: .seealso: `MatCreateSeqAIJViennaCL()`, `MATAIJVIENNACL`, `MatCreateAIJViennaCL()`
485: M*/

487: PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_ViennaCL(void)
488: {
489:   MatSolverTypeRegister(MATSOLVERPETSC, MATSEQAIJVIENNACL, MAT_FACTOR_LU, MatGetFactor_seqaij_petsc);
490:   MatSolverTypeRegister(MATSOLVERPETSC, MATSEQAIJVIENNACL, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaij_petsc);
491:   MatSolverTypeRegister(MATSOLVERPETSC, MATSEQAIJVIENNACL, MAT_FACTOR_ILU, MatGetFactor_seqaij_petsc);
492:   MatSolverTypeRegister(MATSOLVERPETSC, MATSEQAIJVIENNACL, MAT_FACTOR_ICC, MatGetFactor_seqaij_petsc);
493:   return 0;
494: }