Actual source code: dainterp.c
1: /*
2: Code for interpolating between grids represented by DMDAs
3: */
5: /*
6: For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does
7: not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results
8: we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some
9: consider it cleaner, but old version is turned on since it handles periodic case.
10: */
11: #define NEWVERSION 0
13: #include <petsc/private/dmdaimpl.h>
15: /*
16: Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ.
17: This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the
18: matrix type for both the operator matrices and the interpolation matrices so that users
19: can select matrix types of base MATAIJ for accelerators
20: */
21: static PetscErrorCode ConvertToAIJ(MatType intype, MatType *outtype)
22: {
23: PetscInt i;
24: char const *types[3] = {MATAIJ, MATSEQAIJ, MATMPIAIJ};
25: PetscBool flg;
27: PetscFunctionBegin;
28: *outtype = MATAIJ;
29: for (i = 0; i < 3; i++) {
30: PetscCall(PetscStrbeginswith(intype, types[i], &flg));
31: if (flg) {
32: *outtype = intype;
33: PetscFunctionReturn(PETSC_SUCCESS);
34: }
35: }
36: PetscFunctionReturn(PETSC_SUCCESS);
37: }
39: static PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac, DM daf, Mat *A)
40: {
41: PetscInt i, i_start, m_f, Mx;
42: const PetscInt *idx_f, *idx_c;
43: PetscInt m_ghost, m_ghost_c;
44: PetscInt row, col, i_start_ghost, mx, m_c, nc, ratio;
45: PetscInt i_c, i_start_c, i_start_ghost_c, cols[2], dof;
46: PetscScalar v[2], x;
47: Mat mat;
48: DMBoundaryType bx;
49: ISLocalToGlobalMapping ltog_f, ltog_c;
50: MatType mattype;
52: PetscFunctionBegin;
53: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
54: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
55: if (bx == DM_BOUNDARY_PERIODIC) {
56: ratio = mx / Mx;
57: PetscCheck(ratio * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
58: } else {
59: ratio = (mx - 1) / (Mx - 1);
60: PetscCheck(ratio * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
61: }
63: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
64: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
65: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
66: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
68: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
69: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
70: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
71: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
73: /* create interpolation matrix */
74: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
75: #if defined(PETSC_HAVE_CUDA)
76: /*
77: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
78: we don't want the original unconverted matrix copied to the GPU
79: */
80: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
81: #endif
82: PetscCall(MatSetSizes(mat, m_f, m_c, mx, Mx));
83: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
84: PetscCall(MatSetType(mat, mattype));
85: PetscCall(MatSeqAIJSetPreallocation(mat, 2, NULL));
86: PetscCall(MatMPIAIJSetPreallocation(mat, 2, NULL, 1, NULL));
88: /* loop over local fine grid nodes setting interpolation for those*/
89: if (!NEWVERSION) {
90: for (i = i_start; i < i_start + m_f; i++) {
91: /* convert to local "natural" numbering and then to PETSc global numbering */
92: row = idx_f[i - i_start_ghost];
94: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
95: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c);
97: /*
98: Only include those interpolation points that are truly
99: nonzero. Note this is very important for final grid lines
100: in x direction; since they have no right neighbor
101: */
102: x = ((PetscReal)(i - i_c * ratio)) / ((PetscReal)ratio);
103: nc = 0;
104: /* one left and below; or we are right on it */
105: col = (i_c - i_start_ghost_c);
106: cols[nc] = idx_c[col];
107: v[nc++] = -x + 1.0;
108: /* one right? */
109: if (i_c * ratio != i) {
110: cols[nc] = idx_c[col + 1];
111: v[nc++] = x;
112: }
113: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
114: }
116: } else {
117: PetscScalar *xi;
118: PetscInt li, nxi, n;
119: PetscScalar Ni[2];
121: /* compute local coordinate arrays */
122: nxi = ratio + 1;
123: PetscCall(PetscMalloc1(nxi, &xi));
124: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
126: for (i = i_start; i < i_start + m_f; i++) {
127: /* convert to local "natural" numbering and then to PETSc global numbering */
128: row = idx_f[i - i_start_ghost];
130: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
131: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c); /* remainders */
132: li = i - ratio * (i / ratio);
133: if (i == mx - 1) li = nxi - 1;
135: /* corners */
136: col = (i_c - i_start_ghost_c);
137: cols[0] = idx_c[col];
138: Ni[0] = 1.0;
139: if ((li == 0) || (li == nxi - 1)) {
140: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
141: continue;
142: }
144: /* edges + interior */
145: /* remainders */
146: if (i == mx - 1) i_c--;
148: col = (i_c - i_start_ghost_c);
149: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
150: cols[1] = idx_c[col + 1];
152: Ni[0] = 0.5 * (1.0 - xi[li]);
153: Ni[1] = 0.5 * (1.0 + xi[li]);
154: for (n = 0; n < 2; n++) {
155: if (PetscAbsScalar(Ni[n]) < 1.0e-32) cols[n] = -1;
156: }
157: PetscCall(MatSetValues(mat, 1, &row, 2, cols, Ni, INSERT_VALUES));
158: }
159: PetscCall(PetscFree(xi));
160: }
161: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
162: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
163: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
164: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
165: PetscCall(MatCreateMAIJ(mat, dof, A));
166: PetscCall(MatDestroy(&mat));
167: PetscFunctionReturn(PETSC_SUCCESS);
168: }
170: static PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac, DM daf, Mat *A)
171: {
172: PetscInt i, i_start, m_f, Mx;
173: const PetscInt *idx_f, *idx_c;
174: ISLocalToGlobalMapping ltog_f, ltog_c;
175: PetscInt m_ghost, m_ghost_c;
176: PetscInt row, col, i_start_ghost, mx, m_c, nc, ratio;
177: PetscInt i_c, i_start_c, i_start_ghost_c, cols[2], dof;
178: PetscScalar v[2], x;
179: Mat mat;
180: DMBoundaryType bx;
181: MatType mattype;
183: PetscFunctionBegin;
184: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
185: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
186: if (bx == DM_BOUNDARY_PERIODIC) {
187: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
188: ratio = mx / Mx;
189: PetscCheck(ratio * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
190: } else {
191: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
192: ratio = (mx - 1) / (Mx - 1);
193: PetscCheck(ratio * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
194: }
196: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
197: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
198: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
199: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
201: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
202: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
203: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
204: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
206: /* create interpolation matrix */
207: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
208: #if defined(PETSC_HAVE_CUDA)
209: /*
210: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
211: we don't want the original unconverted matrix copied to the GPU
212: */
213: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
214: #endif
215: PetscCall(MatSetSizes(mat, m_f, m_c, mx, Mx));
216: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
217: PetscCall(MatSetType(mat, mattype));
218: PetscCall(MatSeqAIJSetPreallocation(mat, 2, NULL));
219: PetscCall(MatMPIAIJSetPreallocation(mat, 2, NULL, 0, NULL));
221: /* loop over local fine grid nodes setting interpolation for those*/
222: for (i = i_start; i < i_start + m_f; i++) {
223: /* convert to local "natural" numbering and then to PETSc global numbering */
224: row = idx_f[i - i_start_ghost];
226: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
228: /*
229: Only include those interpolation points that are truly
230: nonzero. Note this is very important for final grid lines
231: in x direction; since they have no right neighbor
232: */
233: x = ((PetscReal)(i - i_c * ratio)) / ((PetscReal)ratio);
234: nc = 0;
235: /* one left and below; or we are right on it */
236: col = (i_c - i_start_ghost_c);
237: cols[nc] = idx_c[col];
238: v[nc++] = -x + 1.0;
239: /* one right? */
240: if (i_c * ratio != i) {
241: cols[nc] = idx_c[col + 1];
242: v[nc++] = x;
243: }
244: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
245: }
246: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
247: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
248: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
249: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
250: PetscCall(MatCreateMAIJ(mat, dof, A));
251: PetscCall(MatDestroy(&mat));
252: PetscCall(PetscLogFlops(5.0 * m_f));
253: PetscFunctionReturn(PETSC_SUCCESS);
254: }
256: static PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac, DM daf, Mat *A)
257: {
258: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
259: const PetscInt *idx_c, *idx_f;
260: ISLocalToGlobalMapping ltog_f, ltog_c;
261: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, *dnz, *onz;
262: PetscInt row, col, i_start_ghost, j_start_ghost, cols[4], mx, m_c, my, nc, ratioi, ratioj;
263: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c, col_shift, col_scale;
264: PetscMPIInt size_c, size_f, rank_f;
265: PetscScalar v[4], x, y;
266: Mat mat;
267: DMBoundaryType bx, by;
268: MatType mattype;
270: PetscFunctionBegin;
271: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
272: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
273: if (bx == DM_BOUNDARY_PERIODIC) {
274: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
275: ratioi = mx / Mx;
276: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
277: } else {
278: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
279: ratioi = (mx - 1) / (Mx - 1);
280: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
281: }
282: if (by == DM_BOUNDARY_PERIODIC) {
283: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
284: ratioj = my / My;
285: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
286: } else {
287: PetscCheck(My >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be greater than 1", My);
288: ratioj = (my - 1) / (My - 1);
289: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
290: }
292: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
293: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
294: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
295: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
297: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
298: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
299: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
300: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
302: /*
303: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
304: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
305: processors). It's effective length is hence 4 times its normal length, this is
306: why the col_scale is multiplied by the interpolation matrix column sizes.
307: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
308: copy of the coarse vector. A bit of a hack but you do better.
310: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
311: */
312: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
313: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
314: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
315: col_scale = size_f / size_c;
316: col_shift = Mx * My * (rank_f / size_c);
318: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f, col_scale * m_c * n_c, dnz, onz);
319: for (j = j_start; j < j_start + n_f; j++) {
320: for (i = i_start; i < i_start + m_f; i++) {
321: /* convert to local "natural" numbering and then to PETSc global numbering */
322: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
324: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
325: j_c = (j / ratioj); /* coarse grid node below fine grid node */
327: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT, j_start, j_c, j_start_ghost_c);
328: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c);
330: /*
331: Only include those interpolation points that are truly
332: nonzero. Note this is very important for final grid lines
333: in x and y directions; since they have no right/top neighbors
334: */
335: nc = 0;
336: /* one left and below; or we are right on it */
337: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
338: cols[nc++] = col_shift + idx_c[col];
339: /* one right and below */
340: if (i_c * ratioi != i) cols[nc++] = col_shift + idx_c[col + 1];
341: /* one left and above */
342: if (j_c * ratioj != j) cols[nc++] = col_shift + idx_c[col + m_ghost_c];
343: /* one right and above */
344: if (i_c * ratioi != i && j_c * ratioj != j) cols[nc++] = col_shift + idx_c[col + (m_ghost_c + 1)];
345: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
346: }
347: }
348: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
349: #if defined(PETSC_HAVE_CUDA)
350: /*
351: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
352: we don't want the original unconverted matrix copied to the GPU
353: */
354: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
355: #endif
356: PetscCall(MatSetSizes(mat, m_f * n_f, col_scale * m_c * n_c, mx * my, col_scale * Mx * My));
357: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
358: PetscCall(MatSetType(mat, mattype));
359: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
360: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
361: MatPreallocateEnd(dnz, onz);
363: /* loop over local fine grid nodes setting interpolation for those*/
364: if (!NEWVERSION) {
365: for (j = j_start; j < j_start + n_f; j++) {
366: for (i = i_start; i < i_start + m_f; i++) {
367: /* convert to local "natural" numbering and then to PETSc global numbering */
368: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
370: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
371: j_c = (j / ratioj); /* coarse grid node below fine grid node */
373: /*
374: Only include those interpolation points that are truly
375: nonzero. Note this is very important for final grid lines
376: in x and y directions; since they have no right/top neighbors
377: */
378: x = ((PetscReal)(i - i_c * ratioi)) / ((PetscReal)ratioi);
379: y = ((PetscReal)(j - j_c * ratioj)) / ((PetscReal)ratioj);
381: nc = 0;
382: /* one left and below; or we are right on it */
383: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
384: cols[nc] = col_shift + idx_c[col];
385: v[nc++] = x * y - x - y + 1.0;
386: /* one right and below */
387: if (i_c * ratioi != i) {
388: cols[nc] = col_shift + idx_c[col + 1];
389: v[nc++] = -x * y + x;
390: }
391: /* one left and above */
392: if (j_c * ratioj != j) {
393: cols[nc] = col_shift + idx_c[col + m_ghost_c];
394: v[nc++] = -x * y + y;
395: }
396: /* one right and above */
397: if (j_c * ratioj != j && i_c * ratioi != i) {
398: cols[nc] = col_shift + idx_c[col + (m_ghost_c + 1)];
399: v[nc++] = x * y;
400: }
401: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
402: }
403: }
405: } else {
406: PetscScalar Ni[4];
407: PetscScalar *xi, *eta;
408: PetscInt li, nxi, lj, neta;
410: /* compute local coordinate arrays */
411: nxi = ratioi + 1;
412: neta = ratioj + 1;
413: PetscCall(PetscMalloc1(nxi, &xi));
414: PetscCall(PetscMalloc1(neta, &eta));
415: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
416: for (lj = 0; lj < neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj * (2.0 / (PetscScalar)(neta - 1));
418: /* loop over local fine grid nodes setting interpolation for those*/
419: for (j = j_start; j < j_start + n_f; j++) {
420: for (i = i_start; i < i_start + m_f; i++) {
421: /* convert to local "natural" numbering and then to PETSc global numbering */
422: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
424: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
425: j_c = (j / ratioj); /* coarse grid node below fine grid node */
427: /* remainders */
428: li = i - ratioi * (i / ratioi);
429: if (i == mx - 1) li = nxi - 1;
430: lj = j - ratioj * (j / ratioj);
431: if (j == my - 1) lj = neta - 1;
433: /* corners */
434: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
435: cols[0] = col_shift + idx_c[col]; /* left, below */
436: Ni[0] = 1.0;
437: if ((li == 0) || (li == nxi - 1)) {
438: if ((lj == 0) || (lj == neta - 1)) {
439: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
440: continue;
441: }
442: }
444: /* edges + interior */
445: /* remainders */
446: if (i == mx - 1) i_c--;
447: if (j == my - 1) j_c--;
449: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
450: cols[0] = col_shift + idx_c[col]; /* left, below */
451: cols[1] = col_shift + idx_c[col + 1]; /* right, below */
452: cols[2] = col_shift + idx_c[col + m_ghost_c]; /* left, above */
453: cols[3] = col_shift + idx_c[col + (m_ghost_c + 1)]; /* right, above */
455: Ni[0] = 0.25 * (1.0 - xi[li]) * (1.0 - eta[lj]);
456: Ni[1] = 0.25 * (1.0 + xi[li]) * (1.0 - eta[lj]);
457: Ni[2] = 0.25 * (1.0 - xi[li]) * (1.0 + eta[lj]);
458: Ni[3] = 0.25 * (1.0 + xi[li]) * (1.0 + eta[lj]);
460: nc = 0;
461: if (PetscAbsScalar(Ni[0]) < 1.0e-32) cols[0] = -1;
462: if (PetscAbsScalar(Ni[1]) < 1.0e-32) cols[1] = -1;
463: if (PetscAbsScalar(Ni[2]) < 1.0e-32) cols[2] = -1;
464: if (PetscAbsScalar(Ni[3]) < 1.0e-32) cols[3] = -1;
466: PetscCall(MatSetValues(mat, 1, &row, 4, cols, Ni, INSERT_VALUES));
467: }
468: }
469: PetscCall(PetscFree(xi));
470: PetscCall(PetscFree(eta));
471: }
472: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
473: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
474: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
475: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
476: PetscCall(MatCreateMAIJ(mat, dof, A));
477: PetscCall(MatDestroy(&mat));
478: PetscFunctionReturn(PETSC_SUCCESS);
479: }
481: /*
482: Contributed by Andrei Draganescu <aidraga@sandia.gov>
483: */
484: static PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac, DM daf, Mat *A)
485: {
486: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
487: const PetscInt *idx_c, *idx_f;
488: ISLocalToGlobalMapping ltog_f, ltog_c;
489: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, *dnz, *onz;
490: PetscInt row, col, i_start_ghost, j_start_ghost, cols[4], mx, m_c, my, nc, ratioi, ratioj;
491: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c, col_shift, col_scale;
492: PetscMPIInt size_c, size_f, rank_f;
493: PetscScalar v[4];
494: Mat mat;
495: DMBoundaryType bx, by;
496: MatType mattype;
498: PetscFunctionBegin;
499: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
500: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
501: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
502: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
503: ratioi = mx / Mx;
504: ratioj = my / My;
505: PetscCheck(ratioi * Mx == mx, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in x");
506: PetscCheck(ratioj * My == my, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in y");
507: PetscCheck(ratioi == 2, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in x must be 2");
508: PetscCheck(ratioj == 2, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in y must be 2");
510: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
511: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
512: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
513: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
515: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
516: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
517: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
518: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
520: /*
521: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
522: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
523: processors). It's effective length is hence 4 times its normal length, this is
524: why the col_scale is multiplied by the interpolation matrix column sizes.
525: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
526: copy of the coarse vector. A bit of a hack but you do better.
528: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
529: */
530: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
531: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
532: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
533: col_scale = size_f / size_c;
534: col_shift = Mx * My * (rank_f / size_c);
536: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f, col_scale * m_c * n_c, dnz, onz);
537: for (j = j_start; j < j_start + n_f; j++) {
538: for (i = i_start; i < i_start + m_f; i++) {
539: /* convert to local "natural" numbering and then to PETSc global numbering */
540: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
542: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
543: j_c = (j / ratioj); /* coarse grid node below fine grid node */
545: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT, j_start, j_c, j_start_ghost_c);
546: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c);
548: /*
549: Only include those interpolation points that are truly
550: nonzero. Note this is very important for final grid lines
551: in x and y directions; since they have no right/top neighbors
552: */
553: nc = 0;
554: /* one left and below; or we are right on it */
555: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
556: cols[nc++] = col_shift + idx_c[col];
557: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
558: }
559: }
560: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
561: #if defined(PETSC_HAVE_CUDA)
562: /*
563: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
564: we don't want the original unconverted matrix copied to the GPU
565: */
566: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
567: #endif
568: PetscCall(MatSetSizes(mat, m_f * n_f, col_scale * m_c * n_c, mx * my, col_scale * Mx * My));
569: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
570: PetscCall(MatSetType(mat, mattype));
571: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
572: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
573: MatPreallocateEnd(dnz, onz);
575: /* loop over local fine grid nodes setting interpolation for those*/
576: for (j = j_start; j < j_start + n_f; j++) {
577: for (i = i_start; i < i_start + m_f; i++) {
578: /* convert to local "natural" numbering and then to PETSc global numbering */
579: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
581: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
582: j_c = (j / ratioj); /* coarse grid node below fine grid node */
583: nc = 0;
584: /* one left and below; or we are right on it */
585: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
586: cols[nc] = col_shift + idx_c[col];
587: v[nc++] = 1.0;
589: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
590: }
591: }
592: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
593: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
594: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
595: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
596: PetscCall(MatCreateMAIJ(mat, dof, A));
597: PetscCall(MatDestroy(&mat));
598: PetscCall(PetscLogFlops(13.0 * m_f * n_f));
599: PetscFunctionReturn(PETSC_SUCCESS);
600: }
602: /*
603: Contributed by Jianming Yang <jianming-yang@uiowa.edu>
604: */
605: static PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac, DM daf, Mat *A)
606: {
607: PetscInt i, j, l, i_start, j_start, l_start, m_f, n_f, p_f, Mx, My, Mz, dof;
608: const PetscInt *idx_c, *idx_f;
609: ISLocalToGlobalMapping ltog_f, ltog_c;
610: PetscInt m_ghost, n_ghost, p_ghost, m_ghost_c, n_ghost_c, p_ghost_c, nc, *dnz, *onz;
611: PetscInt row, col, i_start_ghost, j_start_ghost, l_start_ghost, cols[8], mx, m_c, my, n_c, mz, p_c, ratioi, ratioj, ratiol;
612: PetscInt i_c, j_c, l_c, i_start_c, j_start_c, l_start_c, i_start_ghost_c, j_start_ghost_c, l_start_ghost_c, col_shift, col_scale;
613: PetscMPIInt size_c, size_f, rank_f;
614: PetscScalar v[8];
615: Mat mat;
616: DMBoundaryType bx, by, bz;
617: MatType mattype;
619: PetscFunctionBegin;
620: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
621: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
622: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
623: PetscCheck(Mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be positive", Mz);
624: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
625: ratioi = mx / Mx;
626: ratioj = my / My;
627: ratiol = mz / Mz;
628: PetscCheck(ratioi * Mx == mx, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in x");
629: PetscCheck(ratioj * My == my, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in y");
630: PetscCheck(ratiol * Mz == mz, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in z");
631: PetscCheck(ratioi == 2 || ratioi == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in x must be 1 or 2");
632: PetscCheck(ratioj == 2 || ratioj == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in y must be 1 or 2");
633: PetscCheck(ratiol == 2 || ratiol == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in z must be 1 or 2");
635: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
636: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
637: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
638: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
640: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
641: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
642: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
643: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
645: /*
646: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
647: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
648: processors). It's effective length is hence 4 times its normal length, this is
649: why the col_scale is multiplied by the interpolation matrix column sizes.
650: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
651: copy of the coarse vector. A bit of a hack but you do better.
653: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
654: */
655: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
656: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
657: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
658: col_scale = size_f / size_c;
659: col_shift = Mx * My * Mz * (rank_f / size_c);
661: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f * p_f, col_scale * m_c * n_c * p_c, dnz, onz);
662: for (l = l_start; l < l_start + p_f; l++) {
663: for (j = j_start; j < j_start + n_f; j++) {
664: for (i = i_start; i < i_start + m_f; i++) {
665: /* convert to local "natural" numbering and then to PETSc global numbering */
666: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
668: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
669: j_c = (j / ratioj); /* coarse grid node below fine grid node */
670: l_c = (l / ratiol);
672: PetscCheck(l_c >= l_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT, l_start, l_c, l_start_ghost_c);
673: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT, j_start, j_c, j_start_ghost_c);
674: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c);
676: /*
677: Only include those interpolation points that are truly
678: nonzero. Note this is very important for final grid lines
679: in x and y directions; since they have no right/top neighbors
680: */
681: nc = 0;
682: /* one left and below; or we are right on it */
683: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
684: cols[nc++] = col_shift + idx_c[col];
685: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
686: }
687: }
688: }
689: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
690: #if defined(PETSC_HAVE_CUDA)
691: /*
692: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
693: we don't want the original unconverted matrix copied to the GPU
694: */
695: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
696: #endif
697: PetscCall(MatSetSizes(mat, m_f * n_f * p_f, col_scale * m_c * n_c * p_c, mx * my * mz, col_scale * Mx * My * Mz));
698: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
699: PetscCall(MatSetType(mat, mattype));
700: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
701: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
702: MatPreallocateEnd(dnz, onz);
704: /* loop over local fine grid nodes setting interpolation for those*/
705: for (l = l_start; l < l_start + p_f; l++) {
706: for (j = j_start; j < j_start + n_f; j++) {
707: for (i = i_start; i < i_start + m_f; i++) {
708: /* convert to local "natural" numbering and then to PETSc global numbering */
709: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
711: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
712: j_c = (j / ratioj); /* coarse grid node below fine grid node */
713: l_c = (l / ratiol);
714: nc = 0;
715: /* one left and below; or we are right on it */
716: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
717: cols[nc] = col_shift + idx_c[col];
718: v[nc++] = 1.0;
720: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
721: }
722: }
723: }
724: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
725: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
726: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
727: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
728: PetscCall(MatCreateMAIJ(mat, dof, A));
729: PetscCall(MatDestroy(&mat));
730: PetscCall(PetscLogFlops(13.0 * m_f * n_f * p_f));
731: PetscFunctionReturn(PETSC_SUCCESS);
732: }
734: static PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac, DM daf, Mat *A)
735: {
736: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof, l;
737: const PetscInt *idx_c, *idx_f;
738: ISLocalToGlobalMapping ltog_f, ltog_c;
739: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, Mz, mz;
740: PetscInt row, col, i_start_ghost, j_start_ghost, cols[8], mx, m_c, my, nc, ratioi, ratioj, ratiok;
741: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c;
742: PetscInt l_start, p_f, l_start_ghost, p_ghost, l_start_c, p_c;
743: PetscInt l_start_ghost_c, p_ghost_c, l_c, *dnz, *onz;
744: PetscScalar v[8], x, y, z;
745: Mat mat;
746: DMBoundaryType bx, by, bz;
747: MatType mattype;
749: PetscFunctionBegin;
750: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
751: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
752: if (mx == Mx) {
753: ratioi = 1;
754: } else if (bx == DM_BOUNDARY_PERIODIC) {
755: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
756: ratioi = mx / Mx;
757: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
758: } else {
759: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
760: ratioi = (mx - 1) / (Mx - 1);
761: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
762: }
763: if (my == My) {
764: ratioj = 1;
765: } else if (by == DM_BOUNDARY_PERIODIC) {
766: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
767: ratioj = my / My;
768: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
769: } else {
770: PetscCheck(My >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be greater than 1", My);
771: ratioj = (my - 1) / (My - 1);
772: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
773: }
774: if (mz == Mz) {
775: ratiok = 1;
776: } else if (bz == DM_BOUNDARY_PERIODIC) {
777: PetscCheck(Mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be positive", Mz);
778: ratiok = mz / Mz;
779: PetscCheck(ratiok * Mz == mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
780: } else {
781: PetscCheck(Mz >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be greater than 1", Mz);
782: ratiok = (mz - 1) / (Mz - 1);
783: PetscCheck(ratiok * (Mz - 1) == mz - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
784: }
786: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
787: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
788: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
789: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
791: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
792: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
793: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
794: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
796: /* create interpolation matrix, determining exact preallocation */
797: MatPreallocateBegin(PetscObjectComm((PetscObject)dac), m_f * n_f * p_f, m_c * n_c * p_c, dnz, onz);
798: /* loop over local fine grid nodes counting interpolating points */
799: for (l = l_start; l < l_start + p_f; l++) {
800: for (j = j_start; j < j_start + n_f; j++) {
801: for (i = i_start; i < i_start + m_f; i++) {
802: /* convert to local "natural" numbering and then to PETSc global numbering */
803: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
804: i_c = (i / ratioi);
805: j_c = (j / ratioj);
806: l_c = (l / ratiok);
807: PetscCheck(l_c >= l_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT, l_start, l_c, l_start_ghost_c);
808: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT, j_start, j_c, j_start_ghost_c);
809: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT, i_start, i_c, i_start_ghost_c);
811: /*
812: Only include those interpolation points that are truly
813: nonzero. Note this is very important for final grid lines
814: in x and y directions; since they have no right/top neighbors
815: */
816: nc = 0;
817: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
818: cols[nc++] = idx_c[col];
819: if (i_c * ratioi != i) cols[nc++] = idx_c[col + 1];
820: if (j_c * ratioj != j) cols[nc++] = idx_c[col + m_ghost_c];
821: if (l_c * ratiok != l) cols[nc++] = idx_c[col + m_ghost_c * n_ghost_c];
822: if (j_c * ratioj != j && i_c * ratioi != i) cols[nc++] = idx_c[col + (m_ghost_c + 1)];
823: if (j_c * ratioj != j && l_c * ratiok != l) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)];
824: if (i_c * ratioi != i && l_c * ratiok != l) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + 1)];
825: if (i_c * ratioi != i && l_c * ratiok != l && j_c * ratioj != j) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)];
826: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
827: }
828: }
829: }
830: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
831: #if defined(PETSC_HAVE_CUDA)
832: /*
833: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
834: we don't want the original unconverted matrix copied to the GPU
835: */
836: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
837: #endif
838: PetscCall(MatSetSizes(mat, m_f * n_f * p_f, m_c * n_c * p_c, mx * my * mz, Mx * My * Mz));
839: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
840: PetscCall(MatSetType(mat, mattype));
841: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
842: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
843: MatPreallocateEnd(dnz, onz);
845: /* loop over local fine grid nodes setting interpolation for those*/
846: if (!NEWVERSION) {
847: for (l = l_start; l < l_start + p_f; l++) {
848: for (j = j_start; j < j_start + n_f; j++) {
849: for (i = i_start; i < i_start + m_f; i++) {
850: /* convert to local "natural" numbering and then to PETSc global numbering */
851: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
853: i_c = (i / ratioi);
854: j_c = (j / ratioj);
855: l_c = (l / ratiok);
857: /*
858: Only include those interpolation points that are truly
859: nonzero. Note this is very important for final grid lines
860: in x and y directions; since they have no right/top neighbors
861: */
862: x = ((PetscReal)(i - i_c * ratioi)) / ((PetscReal)ratioi);
863: y = ((PetscReal)(j - j_c * ratioj)) / ((PetscReal)ratioj);
864: z = ((PetscReal)(l - l_c * ratiok)) / ((PetscReal)ratiok);
866: nc = 0;
867: /* one left and below; or we are right on it */
868: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
870: cols[nc] = idx_c[col];
871: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
873: if (i_c * ratioi != i) {
874: cols[nc] = idx_c[col + 1];
875: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
876: }
878: if (j_c * ratioj != j) {
879: cols[nc] = idx_c[col + m_ghost_c];
880: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
881: }
883: if (l_c * ratiok != l) {
884: cols[nc] = idx_c[col + m_ghost_c * n_ghost_c];
885: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
886: }
888: if (j_c * ratioj != j && i_c * ratioi != i) {
889: cols[nc] = idx_c[col + (m_ghost_c + 1)];
890: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
891: }
893: if (j_c * ratioj != j && l_c * ratiok != l) {
894: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)];
895: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
896: }
898: if (i_c * ratioi != i && l_c * ratiok != l) {
899: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + 1)];
900: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
901: }
903: if (i_c * ratioi != i && l_c * ratiok != l && j_c * ratioj != j) {
904: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)];
905: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
906: }
907: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
908: }
909: }
910: }
912: } else {
913: PetscScalar *xi, *eta, *zeta;
914: PetscInt li, nxi, lj, neta, lk, nzeta, n;
915: PetscScalar Ni[8];
917: /* compute local coordinate arrays */
918: nxi = ratioi + 1;
919: neta = ratioj + 1;
920: nzeta = ratiok + 1;
921: PetscCall(PetscMalloc1(nxi, &xi));
922: PetscCall(PetscMalloc1(neta, &eta));
923: PetscCall(PetscMalloc1(nzeta, &zeta));
924: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
925: for (lj = 0; lj < neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj * (2.0 / (PetscScalar)(neta - 1));
926: for (lk = 0; lk < nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk * (2.0 / (PetscScalar)(nzeta - 1));
928: for (l = l_start; l < l_start + p_f; l++) {
929: for (j = j_start; j < j_start + n_f; j++) {
930: for (i = i_start; i < i_start + m_f; i++) {
931: /* convert to local "natural" numbering and then to PETSc global numbering */
932: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
934: i_c = (i / ratioi);
935: j_c = (j / ratioj);
936: l_c = (l / ratiok);
938: /* remainders */
939: li = i - ratioi * (i / ratioi);
940: if (i == mx - 1) li = nxi - 1;
941: lj = j - ratioj * (j / ratioj);
942: if (j == my - 1) lj = neta - 1;
943: lk = l - ratiok * (l / ratiok);
944: if (l == mz - 1) lk = nzeta - 1;
946: /* corners */
947: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
948: cols[0] = idx_c[col];
949: Ni[0] = 1.0;
950: if ((li == 0) || (li == nxi - 1)) {
951: if ((lj == 0) || (lj == neta - 1)) {
952: if ((lk == 0) || (lk == nzeta - 1)) {
953: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
954: continue;
955: }
956: }
957: }
959: /* edges + interior */
960: /* remainders */
961: if (i == mx - 1) i_c--;
962: if (j == my - 1) j_c--;
963: if (l == mz - 1) l_c--;
965: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
966: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
967: cols[1] = idx_c[col + 1]; /* one right and below */
968: cols[2] = idx_c[col + m_ghost_c]; /* one left and above */
969: cols[3] = idx_c[col + (m_ghost_c + 1)]; /* one right and above */
971: cols[4] = idx_c[col + m_ghost_c * n_ghost_c]; /* one left and below and front; or we are right on it */
972: cols[5] = idx_c[col + (m_ghost_c * n_ghost_c + 1)]; /* one right and below, and front */
973: cols[6] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)]; /* one left and above and front*/
974: cols[7] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)]; /* one right and above and front */
976: Ni[0] = 0.125 * (1.0 - xi[li]) * (1.0 - eta[lj]) * (1.0 - zeta[lk]);
977: Ni[1] = 0.125 * (1.0 + xi[li]) * (1.0 - eta[lj]) * (1.0 - zeta[lk]);
978: Ni[2] = 0.125 * (1.0 - xi[li]) * (1.0 + eta[lj]) * (1.0 - zeta[lk]);
979: Ni[3] = 0.125 * (1.0 + xi[li]) * (1.0 + eta[lj]) * (1.0 - zeta[lk]);
981: Ni[4] = 0.125 * (1.0 - xi[li]) * (1.0 - eta[lj]) * (1.0 + zeta[lk]);
982: Ni[5] = 0.125 * (1.0 + xi[li]) * (1.0 - eta[lj]) * (1.0 + zeta[lk]);
983: Ni[6] = 0.125 * (1.0 - xi[li]) * (1.0 + eta[lj]) * (1.0 + zeta[lk]);
984: Ni[7] = 0.125 * (1.0 + xi[li]) * (1.0 + eta[lj]) * (1.0 + zeta[lk]);
986: for (n = 0; n < 8; n++) {
987: if (PetscAbsScalar(Ni[n]) < 1.0e-32) cols[n] = -1;
988: }
989: PetscCall(MatSetValues(mat, 1, &row, 8, cols, Ni, INSERT_VALUES));
990: }
991: }
992: }
993: PetscCall(PetscFree(xi));
994: PetscCall(PetscFree(eta));
995: PetscCall(PetscFree(zeta));
996: }
997: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
998: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
999: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
1000: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
1002: PetscCall(MatCreateMAIJ(mat, dof, A));
1003: PetscCall(MatDestroy(&mat));
1004: PetscFunctionReturn(PETSC_SUCCESS);
1005: }
1007: PetscErrorCode DMCreateInterpolation_DA(DM dac, DM daf, Mat *A, Vec *scale)
1008: {
1009: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc, dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1010: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1011: DMDAStencilType stc, stf;
1012: DM_DA *ddc = (DM_DA *)dac->data;
1014: PetscFunctionBegin;
1017: PetscAssertPointer(A, 3);
1018: if (scale) PetscAssertPointer(scale, 4);
1020: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1021: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1022: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1023: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1024: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1025: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1026: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1027: PetscCheck(Mc >= 2 || Mf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in x direction");
1028: PetscCheck(dimc <= 1 || Nc >= 2 || Nf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in y direction");
1029: PetscCheck(dimc <= 2 || Pc >= 2 || Pf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in z direction");
1031: if (ddc->interptype == DMDA_Q1) {
1032: if (dimc == 1) {
1033: PetscCall(DMCreateInterpolation_DA_1D_Q1(dac, daf, A));
1034: } else if (dimc == 2) {
1035: PetscCall(DMCreateInterpolation_DA_2D_Q1(dac, daf, A));
1036: } else if (dimc == 3) {
1037: PetscCall(DMCreateInterpolation_DA_3D_Q1(dac, daf, A));
1038: } else SETERRQ(PetscObjectComm((PetscObject)daf), PETSC_ERR_SUP, "No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d", dimc, (int)ddc->interptype);
1039: } else if (ddc->interptype == DMDA_Q0) {
1040: if (dimc == 1) {
1041: PetscCall(DMCreateInterpolation_DA_1D_Q0(dac, daf, A));
1042: } else if (dimc == 2) {
1043: PetscCall(DMCreateInterpolation_DA_2D_Q0(dac, daf, A));
1044: } else if (dimc == 3) {
1045: PetscCall(DMCreateInterpolation_DA_3D_Q0(dac, daf, A));
1046: } else SETERRQ(PetscObjectComm((PetscObject)daf), PETSC_ERR_SUP, "No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d", dimc, (int)ddc->interptype);
1047: }
1048: if (scale) PetscCall(DMCreateInterpolationScale((DM)dac, (DM)daf, *A, scale));
1049: PetscFunctionReturn(PETSC_SUCCESS);
1050: }
1052: static PetscErrorCode DMCreateInjection_DA_1D(DM dac, DM daf, VecScatter *inject)
1053: {
1054: PetscInt i, i_start, m_f, Mx, dof;
1055: const PetscInt *idx_f;
1056: ISLocalToGlobalMapping ltog_f;
1057: PetscInt m_ghost, m_ghost_c;
1058: PetscInt row, i_start_ghost, mx, m_c, nc, ratioi;
1059: PetscInt i_start_c, i_start_ghost_c;
1060: PetscInt *cols;
1061: DMBoundaryType bx;
1062: Vec vecf, vecc;
1063: IS isf;
1065: PetscFunctionBegin;
1066: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
1067: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1068: if (bx == DM_BOUNDARY_PERIODIC) {
1069: ratioi = mx / Mx;
1070: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1071: } else {
1072: ratioi = (mx - 1) / (Mx - 1);
1073: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1074: }
1076: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
1077: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
1078: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1079: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1081: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
1082: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
1084: /* loop over local fine grid nodes setting interpolation for those*/
1085: nc = 0;
1086: PetscCall(PetscMalloc1(m_f, &cols));
1088: for (i = i_start_c; i < i_start_c + m_c; i++) {
1089: PetscInt i_f = i * ratioi;
1091: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]", i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1093: row = idx_f[i_f - i_start_ghost];
1094: cols[nc++] = row;
1095: }
1097: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1098: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1099: PetscCall(DMGetGlobalVector(dac, &vecc));
1100: PetscCall(DMGetGlobalVector(daf, &vecf));
1101: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1102: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1103: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1104: PetscCall(ISDestroy(&isf));
1105: PetscFunctionReturn(PETSC_SUCCESS);
1106: }
1108: static PetscErrorCode DMCreateInjection_DA_2D(DM dac, DM daf, VecScatter *inject)
1109: {
1110: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
1111: const PetscInt *idx_c, *idx_f;
1112: ISLocalToGlobalMapping ltog_f, ltog_c;
1113: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c;
1114: PetscInt row, i_start_ghost, j_start_ghost, mx, m_c, my, nc, ratioi, ratioj;
1115: PetscInt i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c;
1116: PetscInt *cols;
1117: DMBoundaryType bx, by;
1118: Vec vecf, vecc;
1119: IS isf;
1121: PetscFunctionBegin;
1122: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
1123: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1124: if (bx == DM_BOUNDARY_PERIODIC) {
1125: ratioi = mx / Mx;
1126: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1127: } else {
1128: ratioi = (mx - 1) / (Mx - 1);
1129: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1130: }
1131: if (by == DM_BOUNDARY_PERIODIC) {
1132: ratioj = my / My;
1133: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1134: } else {
1135: ratioj = (my - 1) / (My - 1);
1136: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1137: }
1139: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
1140: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
1141: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1142: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1144: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
1145: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
1146: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
1147: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
1149: /* loop over local fine grid nodes setting interpolation for those*/
1150: nc = 0;
1151: PetscCall(PetscMalloc1(n_f * m_f, &cols));
1152: for (j = j_start_c; j < j_start_c + n_c; j++) {
1153: for (i = i_start_c; i < i_start_c + m_c; i++) {
1154: PetscInt i_f = i * ratioi, j_f = j * ratioj;
1155: PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost + n_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]", j, j_f, j_start_ghost, j_start_ghost + n_ghost);
1156: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA, i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]", i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1157: row = idx_f[(m_ghost * (j_f - j_start_ghost) + (i_f - i_start_ghost))];
1158: cols[nc++] = row;
1159: }
1160: }
1161: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1162: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
1164: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1165: PetscCall(DMGetGlobalVector(dac, &vecc));
1166: PetscCall(DMGetGlobalVector(daf, &vecf));
1167: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1168: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1169: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1170: PetscCall(ISDestroy(&isf));
1171: PetscFunctionReturn(PETSC_SUCCESS);
1172: }
1174: static PetscErrorCode DMCreateInjection_DA_3D(DM dac, DM daf, VecScatter *inject)
1175: {
1176: PetscInt i, j, k, i_start, j_start, k_start, m_f, n_f, p_f, Mx, My, Mz;
1177: PetscInt m_ghost, n_ghost, p_ghost, m_ghost_c, n_ghost_c, p_ghost_c;
1178: PetscInt i_start_ghost, j_start_ghost, k_start_ghost;
1179: PetscInt mx, my, mz, ratioi, ratioj, ratiok;
1180: PetscInt i_start_c, j_start_c, k_start_c;
1181: PetscInt m_c, n_c, p_c;
1182: PetscInt i_start_ghost_c, j_start_ghost_c, k_start_ghost_c;
1183: PetscInt row, nc, dof;
1184: const PetscInt *idx_c, *idx_f;
1185: ISLocalToGlobalMapping ltog_f, ltog_c;
1186: PetscInt *cols;
1187: DMBoundaryType bx, by, bz;
1188: Vec vecf, vecc;
1189: IS isf;
1191: PetscFunctionBegin;
1192: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
1193: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1195: if (bx == DM_BOUNDARY_PERIODIC) {
1196: ratioi = mx / Mx;
1197: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1198: } else {
1199: ratioi = (mx - 1) / (Mx - 1);
1200: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1201: }
1202: if (by == DM_BOUNDARY_PERIODIC) {
1203: ratioj = my / My;
1204: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1205: } else {
1206: ratioj = (my - 1) / (My - 1);
1207: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1208: }
1209: if (bz == DM_BOUNDARY_PERIODIC) {
1210: ratiok = mz / Mz;
1211: PetscCheck(ratiok * Mz == mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " My %" PetscInt_FMT, mz, Mz);
1212: } else {
1213: ratiok = (mz - 1) / (Mz - 1);
1214: PetscCheck(ratiok * (Mz - 1) == mz - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
1215: }
1217: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &k_start, &m_f, &n_f, &p_f));
1218: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &k_start_ghost, &m_ghost, &n_ghost, &p_ghost));
1219: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1220: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1222: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &k_start_c, &m_c, &n_c, &p_c));
1223: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &k_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
1224: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
1225: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
1227: /* loop over local fine grid nodes setting interpolation for those*/
1228: nc = 0;
1229: PetscCall(PetscMalloc1(n_f * m_f * p_f, &cols));
1230: for (k = k_start_c; k < k_start_c + p_c; k++) {
1231: for (j = j_start_c; j < j_start_c + n_c; j++) {
1232: for (i = i_start_c; i < i_start_c + m_c; i++) {
1233: PetscInt i_f = i * ratioi, j_f = j * ratioj, k_f = k * ratiok;
1234: PetscCheck(k_f >= k_start_ghost && k_f < k_start_ghost + p_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1235: "Processor's coarse DMDA must lie over fine DMDA "
1236: "k_c %" PetscInt_FMT " k_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1237: k, k_f, k_start_ghost, k_start_ghost + p_ghost);
1238: PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost + n_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1239: "Processor's coarse DMDA must lie over fine DMDA "
1240: "j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1241: j, j_f, j_start_ghost, j_start_ghost + n_ghost);
1242: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1243: "Processor's coarse DMDA must lie over fine DMDA "
1244: "i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1245: i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1246: row = idx_f[(m_ghost * n_ghost * (k_f - k_start_ghost) + m_ghost * (j_f - j_start_ghost) + (i_f - i_start_ghost))];
1247: cols[nc++] = row;
1248: }
1249: }
1250: }
1251: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1252: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
1254: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1255: PetscCall(DMGetGlobalVector(dac, &vecc));
1256: PetscCall(DMGetGlobalVector(daf, &vecf));
1257: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1258: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1259: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1260: PetscCall(ISDestroy(&isf));
1261: PetscFunctionReturn(PETSC_SUCCESS);
1262: }
1264: PetscErrorCode DMCreateInjection_DA(DM dac, DM daf, Mat *mat)
1265: {
1266: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc, dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1267: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1268: DMDAStencilType stc, stf;
1269: VecScatter inject = NULL;
1271: PetscFunctionBegin;
1274: PetscAssertPointer(mat, 3);
1276: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1277: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1278: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1279: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1280: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1281: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1282: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1283: PetscCheck(Mc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in x direction");
1284: PetscCheck(dimc <= 1 || Nc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in y direction");
1285: PetscCheck(dimc <= 2 || Pc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in z direction");
1287: if (dimc == 1) {
1288: PetscCall(DMCreateInjection_DA_1D(dac, daf, &inject));
1289: } else if (dimc == 2) {
1290: PetscCall(DMCreateInjection_DA_2D(dac, daf, &inject));
1291: } else if (dimc == 3) {
1292: PetscCall(DMCreateInjection_DA_3D(dac, daf, &inject));
1293: }
1294: PetscCall(MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat));
1295: PetscCall(VecScatterDestroy(&inject));
1296: PetscFunctionReturn(PETSC_SUCCESS);
1297: }
1299: // PetscClangLinter pragma ignore: -fdoc-*
1300: /*@
1301: DMCreateAggregates - Deprecated, see DMDACreateAggregates.
1303: Level: intermediate
1304: @*/
1305: PetscErrorCode DMCreateAggregates(DM dac, DM daf, Mat *mat)
1306: {
1307: return DMDACreateAggregates(dac, daf, mat);
1308: }
1310: /*@
1311: DMDACreateAggregates - Gets the aggregates that map between
1312: grids associated with two `DMDA`
1314: Collective
1316: Input Parameters:
1317: + dac - the coarse grid `DMDA`
1318: - daf - the fine grid `DMDA`
1320: Output Parameter:
1321: . rest - the restriction matrix (transpose of the projection matrix)
1323: Level: intermediate
1325: Note:
1326: This routine is not used by PETSc.
1327: It is not clear what its use case is and it may be removed in a future release.
1328: Users should contact petsc-maint@mcs.anl.gov if they plan to use it.
1330: .seealso: [](sec_struct), `DMRefine()`, `DMCreateInjection()`, `DMCreateInterpolation()`
1331: @*/
1332: PetscErrorCode DMDACreateAggregates(DM dac, DM daf, Mat *rest)
1333: {
1334: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc;
1335: PetscInt dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1336: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1337: DMDAStencilType stc, stf;
1338: PetscInt i, j, l;
1339: PetscInt i_start, j_start, l_start, m_f, n_f, p_f;
1340: PetscInt i_start_ghost, j_start_ghost, l_start_ghost, m_ghost, n_ghost, p_ghost;
1341: const PetscInt *idx_f;
1342: PetscInt i_c, j_c, l_c;
1343: PetscInt i_start_c, j_start_c, l_start_c, m_c, n_c, p_c;
1344: PetscInt i_start_ghost_c, j_start_ghost_c, l_start_ghost_c, m_ghost_c, n_ghost_c, p_ghost_c;
1345: const PetscInt *idx_c;
1346: PetscInt d;
1347: PetscInt a;
1348: PetscInt max_agg_size;
1349: PetscInt *fine_nodes;
1350: PetscScalar *one_vec;
1351: PetscInt fn_idx;
1352: ISLocalToGlobalMapping ltogmf, ltogmc;
1354: PetscFunctionBegin;
1357: PetscAssertPointer(rest, 3);
1359: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1360: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1361: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1362: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1363: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1364: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1365: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1367: PetscCheck(Mf >= Mc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Mc %" PetscInt_FMT ", Mf %" PetscInt_FMT, Mc, Mf);
1368: PetscCheck(Nf >= Nc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Nc %" PetscInt_FMT ", Nf %" PetscInt_FMT, Nc, Nf);
1369: PetscCheck(Pf >= Pc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Pc %" PetscInt_FMT ", Pf %" PetscInt_FMT, Pc, Pf);
1371: if (Pc < 0) Pc = 1;
1372: if (Pf < 0) Pf = 1;
1373: if (Nc < 0) Nc = 1;
1374: if (Nf < 0) Nf = 1;
1376: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
1377: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
1379: PetscCall(DMGetLocalToGlobalMapping(daf, <ogmf));
1380: PetscCall(ISLocalToGlobalMappingGetIndices(ltogmf, &idx_f));
1382: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
1383: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
1385: PetscCall(DMGetLocalToGlobalMapping(dac, <ogmc));
1386: PetscCall(ISLocalToGlobalMappingGetIndices(ltogmc, &idx_c));
1388: /*
1389: Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios
1390: for dimension 1 and 2 respectively.
1391: Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1)
1392: and r_y*j and r_y*(j+1) will be grouped into the same coarse grid aggregate.
1393: Each specific dof on the fine grid is mapped to one dof on the coarse grid.
1394: */
1396: max_agg_size = (Mf / Mc + 1) * (Nf / Nc + 1) * (Pf / Pc + 1);
1398: /* create the matrix that will contain the restriction operator */
1399: PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c * n_c * p_c * dofc, m_f * n_f * p_f * doff, Mc * Nc * Pc * dofc, Mf * Nf * Pf * doff, max_agg_size, NULL, max_agg_size, NULL, rest));
1401: /* store nodes in the fine grid here */
1402: PetscCall(PetscMalloc2(max_agg_size, &one_vec, max_agg_size, &fine_nodes));
1403: for (i = 0; i < max_agg_size; i++) one_vec[i] = 1.0;
1405: /* loop over all coarse nodes */
1406: for (l_c = l_start_c; l_c < l_start_c + p_c; l_c++) {
1407: for (j_c = j_start_c; j_c < j_start_c + n_c; j_c++) {
1408: for (i_c = i_start_c; i_c < i_start_c + m_c; i_c++) {
1409: for (d = 0; d < dofc; d++) {
1410: /* convert to local "natural" numbering and then to PETSc global numbering */
1411: a = idx_c[dofc * (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c))] + d;
1413: fn_idx = 0;
1414: /* Corresponding fine points are all points (i_f, j_f, l_f) such that
1415: i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc
1416: (same for other dimensions)
1417: */
1418: for (l = l_c * Pf / Pc; l < PetscMin((l_c + 1) * Pf / Pc, Pf); l++) {
1419: for (j = j_c * Nf / Nc; j < PetscMin((j_c + 1) * Nf / Nc, Nf); j++) {
1420: for (i = i_c * Mf / Mc; i < PetscMin((i_c + 1) * Mf / Mc, Mf); i++) {
1421: fine_nodes[fn_idx] = idx_f[doff * (m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))] + d;
1422: fn_idx++;
1423: }
1424: }
1425: }
1426: /* add all these points to one aggregate */
1427: PetscCall(MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES));
1428: }
1429: }
1430: }
1431: }
1432: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmf, &idx_f));
1433: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmc, &idx_c));
1434: PetscCall(PetscFree2(one_vec, fine_nodes));
1435: PetscCall(MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY));
1436: PetscCall(MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY));
1437: PetscFunctionReturn(PETSC_SUCCESS);
1438: }