Actual source code: mmdense.c
1: /*
2: Support for the parallel dense matrix vector multiply
3: */
4: #include <../src/mat/impls/dense/mpi/mpidense.h>
5: #include <petscblaslapack.h>
7: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
8: {
9: Mat_MPIDense *mdn = (Mat_MPIDense *)mat->data;
11: PetscFunctionBegin;
12: if (!mdn->Mvctx) {
13: /* Create local vector that is used to scatter into */
14: PetscCall(VecDestroy(&mdn->lvec));
15: if (mdn->A) { PetscCall(MatCreateVecs(mdn->A, &mdn->lvec, NULL)); }
16: PetscCall(PetscLayoutSetUp(mat->cmap));
17: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)mat), &mdn->Mvctx));
18: PetscCall(PetscSFSetGraphWithPattern(mdn->Mvctx, mat->cmap, PETSCSF_PATTERN_ALLGATHER));
19: }
20: PetscFunctionReturn(PETSC_SUCCESS);
21: }
23: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *);
25: PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
26: {
27: PetscInt nmax, nstages_local, nstages, i, pos, max_no;
29: PetscFunctionBegin;
30: /* Allocate memory to hold all the submatrices */
31: if (scall != MAT_REUSE_MATRIX) PetscCall(PetscCalloc1(ismax + 1, submat));
32: /* Determine the number of stages through which submatrices are done */
33: nmax = 20 * 1000000 / (C->cmap->N * sizeof(PetscInt));
34: if (!nmax) nmax = 1;
35: nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0);
37: /* Make sure every processor loops through the nstages */
38: PetscCallMPI(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
40: for (i = 0, pos = 0; i < nstages; i++) {
41: if (pos + nmax <= ismax) max_no = nmax;
42: else if (pos == ismax) max_no = 0;
43: else max_no = ismax - pos;
44: PetscCall(MatCreateSubMatrices_MPIDense_Local(C, max_no, isrow + pos, iscol + pos, scall, *submat + pos));
45: pos += max_no;
46: }
47: PetscFunctionReturn(PETSC_SUCCESS);
48: }
50: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
51: {
52: Mat_MPIDense *c = (Mat_MPIDense *)C->data;
53: Mat A = c->A;
54: Mat_SeqDense *a = (Mat_SeqDense *)A->data, *mat;
55: PetscMPIInt rank, size, tag0, tag1, idex, end, i, proc, nrqs, *rtable, *pa, nrqr;
56: PetscInt N = C->cmap->N, rstart = C->rmap->rstart, count;
57: const PetscInt **irow, **icol, *irow_i;
58: PetscInt *nrow, *ncol, *w1, *w3, *w4, start, inrqr;
59: PetscInt **sbuf1, m, ct1, **rbuf1, row;
60: PetscInt msz, **ptr, *ctr, *tmp, bsz;
61: PetscInt is_no, jmax, **rmap, *rmap_i;
62: PetscInt ctr_j, *sbuf1_j, *rbuf1_i;
63: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2;
64: MPI_Status *r_status1, *r_status2, *s_status1, *s_status2;
65: MPI_Comm comm;
66: PetscScalar **rbuf2, **sbuf2;
67: PetscBool sorted;
69: PetscFunctionBegin;
70: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
71: tag0 = ((PetscObject)C)->tag;
72: PetscCallMPI(MPI_Comm_rank(comm, &rank));
73: PetscCallMPI(MPI_Comm_size(comm, &size));
74: m = C->rmap->N;
76: /* Get some new tags to keep the communication clean */
77: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
79: /* Check if the col indices are sorted */
80: for (PetscInt i = 0; i < ismax; i++) {
81: PetscCall(ISSorted(isrow[i], &sorted));
82: PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "ISrow is not sorted");
83: PetscCall(ISSorted(iscol[i], &sorted));
84: PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "IScol is not sorted");
85: }
87: PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, m, &rtable));
88: for (PetscInt i = 0; i < ismax; i++) {
89: PetscCall(ISGetIndices(isrow[i], &irow[i]));
90: PetscCall(ISGetIndices(iscol[i], &icol[i]));
91: PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
92: PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));
93: }
95: /* Create hash table for the mapping :row -> proc*/
96: for (PetscMPIInt i = 0, j = 0; i < size; i++) {
97: jmax = C->rmap->range[i + 1];
98: for (; j < jmax; j++) rtable[j] = i;
99: }
101: /* evaluate communication - mesg to who,length of mesg, and buffer space
102: required. Based on this, buffers are allocated, and data copied into them*/
103: PetscCall(PetscMalloc3(2 * size, &w1, size, &w3, size, &w4));
104: PetscCall(PetscArrayzero(w1, size * 2)); /* initialize work vector*/
105: PetscCall(PetscArrayzero(w3, size)); /* initialize work vector*/
106: for (PetscInt i = 0; i < ismax; i++) {
107: PetscCall(PetscArrayzero(w4, size)); /* initialize work vector*/
108: jmax = nrow[i];
109: irow_i = irow[i];
110: for (PetscInt j = 0; j < jmax; j++) {
111: row = irow_i[j];
112: proc = rtable[row];
113: w4[proc]++;
114: }
115: for (PetscMPIInt j = 0; j < size; j++) {
116: if (w4[j]) {
117: w1[2 * j] += w4[j];
118: w3[j]++;
119: }
120: }
121: }
123: nrqs = 0; /* no of outgoing messages */
124: msz = 0; /* total mesg length (for all procs) */
125: w1[2 * rank] = 0; /* no mesg sent to self */
126: w3[rank] = 0;
127: for (PetscMPIInt i = 0; i < size; i++) {
128: if (w1[2 * i]) {
129: w1[2 * i + 1] = 1;
130: nrqs++;
131: } /* there exists a message to proc i */
132: }
133: PetscCall(PetscMalloc1(nrqs + 1, &pa)); /*(proc -array)*/
134: for (PetscMPIInt i = 0, j = 0; i < size; i++) {
135: if (w1[2 * i]) {
136: pa[j] = i;
137: j++;
138: }
139: }
141: /* Each message would have a header = 1 + 2*(no of IS) + data */
142: for (PetscMPIInt i = 0; i < nrqs; i++) {
143: PetscMPIInt j = pa[i];
144: w1[2 * j] += w1[2 * j + 1] + 2 * w3[j];
145: msz += w1[2 * j];
146: }
147: /* Do a global reduction to determine how many messages to expect*/
148: PetscCall(PetscMaxSum(comm, w1, &bsz, &inrqr));
149: PetscCall(PetscMPIIntCast(inrqr, &nrqr));
151: /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
152: PetscCall(PetscMalloc1(nrqr + 1, &rbuf1));
153: PetscCall(PetscMalloc1(nrqr * bsz, &rbuf1[0]));
154: for (PetscInt i = 1; i < nrqr; ++i) rbuf1[i] = rbuf1[i - 1] + bsz;
156: /* Post the receives */
157: PetscCall(PetscMalloc1(nrqr + 1, &r_waits1));
158: for (PetscInt i = 0; i < nrqr; ++i) PetscCallMPI(MPIU_Irecv(rbuf1[i], bsz, MPIU_INT, MPI_ANY_SOURCE, tag0, comm, r_waits1 + i));
160: /* Allocate Memory for outgoing messages */
161: PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
162: PetscCall(PetscArrayzero(sbuf1, size));
163: PetscCall(PetscArrayzero(ptr, size));
164: {
165: PetscInt *iptr = tmp, ict = 0;
166: for (PetscMPIInt i = 0; i < nrqs; i++) {
167: PetscMPIInt j = pa[i];
168: iptr += ict;
169: sbuf1[j] = iptr;
170: ict = w1[2 * j];
171: }
172: }
174: /* Form the outgoing messages */
175: /* Initialize the header space */
176: for (PetscMPIInt i = 0; i < nrqs; i++) {
177: PetscInt j = pa[i];
178: sbuf1[j][0] = 0;
179: PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
180: ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
181: }
183: /* Parse the isrow and copy data into outbuf */
184: for (PetscInt i = 0; i < ismax; i++) {
185: PetscCall(PetscArrayzero(ctr, size));
186: irow_i = irow[i];
187: jmax = nrow[i];
188: for (PetscInt j = 0; j < jmax; j++) { /* parse the indices of each IS */
189: row = irow_i[j];
190: proc = rtable[row];
191: if (proc != rank) { /* copy to the outgoing buf*/
192: ctr[proc]++;
193: *ptr[proc] = row;
194: ptr[proc]++;
195: }
196: }
197: /* Update the headers for the current IS */
198: for (PetscMPIInt j = 0; j < size; j++) { /* Can Optimise this loop too */
199: if ((ctr_j = ctr[j])) {
200: PetscInt k;
201: sbuf1_j = sbuf1[j];
202: k = ++sbuf1_j[0];
203: sbuf1_j[2 * k] = ctr_j;
204: sbuf1_j[2 * k - 1] = i;
205: }
206: }
207: }
209: /* Now post the sends */
210: PetscCall(PetscMalloc1(nrqs + 1, &s_waits1));
211: for (PetscMPIInt i = 0; i < nrqs; ++i) {
212: PetscMPIInt j = pa[i];
213: PetscCallMPI(MPIU_Isend(sbuf1[j], w1[2 * j], MPIU_INT, j, tag0, comm, s_waits1 + i));
214: }
216: /* Post receives to capture the row_data from other procs */
217: PetscCall(PetscMalloc1(nrqs + 1, &r_waits2));
218: PetscCall(PetscMalloc1(nrqs + 1, &rbuf2));
219: for (PetscMPIInt i = 0; i < nrqs; i++) {
220: PetscMPIInt j = pa[i];
221: count = (w1[2 * j] - (2 * sbuf1[j][0] + 1)) * N;
222: PetscCall(PetscMalloc1(count + 1, &rbuf2[i]));
223: PetscCallMPI(MPIU_Irecv(rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2 + i));
224: }
226: /* Receive messages(row_nos) and then, pack and send off the rowvalues
227: to the correct processors */
229: PetscCall(PetscMalloc1(nrqr + 1, &s_waits2));
230: PetscCall(PetscMalloc1(nrqr + 1, &r_status1));
231: PetscCall(PetscMalloc1(nrqr + 1, &sbuf2));
233: {
234: PetscScalar *sbuf2_i;
235: const PetscScalar *v_start, *v;
236: PetscMPIInt s_proc;
238: PetscCall(MatDenseGetArrayRead(A, &v));
239: for (PetscMPIInt i = 0; i < nrqr; ++i) {
240: PetscCallMPI(MPI_Waitany(nrqr, r_waits1, &idex, r_status1 + i));
241: s_proc = r_status1[i].MPI_SOURCE; /* send processor */
242: rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */
243: /* no of rows = end - start; since start is array idex[], 0idex, whel end
244: is length of the buffer - which is 1idex */
245: start = 2 * rbuf1_i[0] + 1;
246: PetscCallMPI(MPI_Get_count(r_status1 + i, MPIU_INT, &end));
247: /* allocate memory sufficinet to hold all the row values */
248: PetscCall(PetscMalloc1((end - start) * N, &sbuf2[idex]));
249: sbuf2_i = sbuf2[idex];
250: /* Now pack the data */
251: for (PetscInt j = start; j < end; j++) {
252: row = rbuf1_i[j] - rstart;
253: v_start = v + row;
254: for (PetscInt k = 0; k < N; k++) {
255: sbuf2_i[0] = v_start[0];
256: sbuf2_i++;
257: v_start += a->lda;
258: }
259: }
260: /* Now send off the data */
261: PetscCallMPI(MPIU_Isend(sbuf2[idex], (end - start) * N, MPIU_SCALAR, s_proc, tag1, comm, s_waits2 + i));
262: }
263: PetscCall(MatDenseRestoreArrayRead(A, &v));
264: }
265: /* End Send-Recv of IS + row_numbers */
266: PetscCall(PetscFree(r_status1));
267: PetscCall(PetscFree(r_waits1));
268: PetscCall(PetscMalloc1(nrqs + 1, &s_status1));
269: if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status1));
270: PetscCall(PetscFree(s_status1));
271: PetscCall(PetscFree(s_waits1));
273: /* Create the submatrices */
274: if (scall == MAT_REUSE_MATRIX) {
275: for (PetscInt i = 0; i < ismax; i++) {
276: mat = (Mat_SeqDense *)submats[i]->data;
277: PetscCheck(!(submats[i]->rmap->n != nrow[i]) && !(submats[i]->cmap->n != ncol[i]), PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");
278: PetscCall(PetscArrayzero(mat->v, submats[i]->rmap->n * submats[i]->cmap->n));
280: submats[i]->factortype = C->factortype;
281: }
282: } else {
283: for (PetscInt i = 0; i < ismax; i++) {
284: PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
285: PetscCall(MatSetSizes(submats[i], nrow[i], ncol[i], nrow[i], ncol[i]));
286: PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name));
287: PetscCall(MatSeqDenseSetPreallocation(submats[i], NULL));
288: }
289: }
291: /* Assemble the matrices */
292: {
293: PetscInt col;
294: PetscScalar *imat_v, *mat_v, *imat_vi, *mat_vi;
296: for (PetscInt i = 0; i < ismax; i++) {
297: mat = (Mat_SeqDense *)submats[i]->data;
298: mat_v = a->v;
299: imat_v = mat->v;
300: irow_i = irow[i];
301: m = nrow[i];
302: for (PetscInt j = 0; j < m; j++) {
303: row = irow_i[j];
304: proc = rtable[row];
305: if (proc == rank) {
306: row = row - rstart;
307: mat_vi = mat_v + row;
308: imat_vi = imat_v + j;
309: for (PetscInt k = 0; k < ncol[i]; k++) {
310: col = icol[i][k];
311: imat_vi[k * m] = mat_vi[col * a->lda];
312: }
313: }
314: }
315: }
316: }
318: /* Create row map-> This maps c->row to submat->row for each submat*/
319: /* this is a very expensive operation wrt memory usage */
320: PetscCall(PetscMalloc1(ismax, &rmap));
321: PetscCall(PetscCalloc1(ismax * C->rmap->N, &rmap[0]));
322: for (PetscInt i = 1; i < ismax; i++) rmap[i] = rmap[i - 1] + C->rmap->N;
323: for (PetscInt i = 0; i < ismax; i++) {
324: rmap_i = rmap[i];
325: irow_i = irow[i];
326: jmax = nrow[i];
327: for (PetscInt j = 0; j < jmax; j++) rmap_i[irow_i[j]] = j;
328: }
330: /* Now Receive the row_values and assemble the rest of the matrix */
331: PetscCall(PetscMalloc1(nrqs + 1, &r_status2));
332: {
333: PetscInt is_max, tmp1, col, *sbuf1_i, is_sz;
334: PetscScalar *rbuf2_i, *imat_v, *imat_vi;
336: for (tmp1 = 0; tmp1 < nrqs; tmp1++) { /* For each message */
337: PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &i, r_status2 + tmp1));
338: /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
339: sbuf1_i = sbuf1[pa[i]];
340: is_max = sbuf1_i[0];
341: ct1 = 2 * is_max + 1;
342: rbuf2_i = rbuf2[i];
343: for (PetscInt j = 1; j <= is_max; j++) { /* For each IS belonging to the message */
344: is_no = sbuf1_i[2 * j - 1];
345: is_sz = sbuf1_i[2 * j];
346: mat = (Mat_SeqDense *)submats[is_no]->data;
347: imat_v = mat->v;
348: rmap_i = rmap[is_no];
349: m = nrow[is_no];
350: for (PetscInt k = 0; k < is_sz; k++, rbuf2_i += N) { /* For each row */
351: row = sbuf1_i[ct1];
352: ct1++;
353: row = rmap_i[row];
354: imat_vi = imat_v + row;
355: for (PetscInt l = 0; l < ncol[is_no]; l++) { /* For each col */
356: col = icol[is_no][l];
357: imat_vi[l * m] = rbuf2_i[col];
358: }
359: }
360: }
361: }
362: }
363: /* End Send-Recv of row_values */
364: PetscCall(PetscFree(r_status2));
365: PetscCall(PetscFree(r_waits2));
366: PetscCall(PetscMalloc1(nrqr + 1, &s_status2));
367: if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status2));
368: PetscCall(PetscFree(s_status2));
369: PetscCall(PetscFree(s_waits2));
371: /* Restore the indices */
372: for (PetscMPIInt i = 0; i < ismax; i++) {
373: PetscCall(ISRestoreIndices(isrow[i], irow + i));
374: PetscCall(ISRestoreIndices(iscol[i], icol + i));
375: }
377: PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, rtable));
378: PetscCall(PetscFree3(w1, w3, w4));
379: PetscCall(PetscFree(pa));
381: for (PetscMPIInt i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf2[i]));
382: PetscCall(PetscFree(rbuf2));
383: PetscCall(PetscFree4(sbuf1, ptr, tmp, ctr));
384: PetscCall(PetscFree(rbuf1[0]));
385: PetscCall(PetscFree(rbuf1));
387: for (PetscMPIInt i = 0; i < nrqr; ++i) PetscCall(PetscFree(sbuf2[i]));
389: PetscCall(PetscFree(sbuf2));
390: PetscCall(PetscFree(rmap[0]));
391: PetscCall(PetscFree(rmap));
393: for (PetscInt i = 0; i < ismax; i++) {
394: PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
395: PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
396: }
397: PetscFunctionReturn(PETSC_SUCCESS);
398: }
400: PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA, PetscScalar alpha)
401: {
402: Mat_MPIDense *A = (Mat_MPIDense *)inA->data;
404: PetscFunctionBegin;
405: PetscCall(MatScale(A->A, alpha));
406: PetscFunctionReturn(PETSC_SUCCESS);
407: }