Actual source code: comm.c

  1: /***********************************comm.c*************************************

  3: Author: Henry M. Tufo III

  5: e-mail: hmt@cs.brown.edu

  7: snail-mail:
  8: Division of Applied Mathematics
  9: Brown University
 10: Providence, RI 02912

 12: Last Modification:
 13: 11.21.97
 14: ***********************************comm.c*************************************/
 15: #include <../src/ksp/pc/impls/tfs/tfs.h>

 17: /* global program control variables - explicitly exported */
 18: PetscMPIInt PCTFS_my_id            = 0;
 19: PetscMPIInt PCTFS_num_nodes        = 1;
 20: PetscMPIInt PCTFS_floor_num_nodes  = 0;
 21: PetscMPIInt PCTFS_i_log2_num_nodes = 0;

 23: /* global program control variables */
 24: static PetscInt p_init = 0;
 25: static PetscInt modfl_num_nodes;
 26: static PetscInt edge_not_pow_2;

 28: static PetscInt edge_node[sizeof(PetscInt) * 32];

 30: /***********************************comm.c*************************************/
 31: PetscErrorCode PCTFS_comm_init(void)
 32: {
 33:   PetscFunctionBegin;
 34:   if (p_init++) PetscFunctionReturn(PETSC_SUCCESS);

 36:   PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PCTFS_num_nodes));
 37:   PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PCTFS_my_id));

 39:   PetscCheck(PCTFS_num_nodes <= (INT_MAX >> 1), PETSC_COMM_SELF, PETSC_ERR_PLIB, "Can't have more than MAX_INT/2 nodes!!!");

 41:   PetscCall(PCTFS_ivec_zero((PetscInt *)edge_node, sizeof(PetscInt) * 32));

 43:   PCTFS_floor_num_nodes  = 1;
 44:   PCTFS_i_log2_num_nodes = modfl_num_nodes = 0;
 45:   while (PCTFS_floor_num_nodes <= PCTFS_num_nodes) {
 46:     edge_node[PCTFS_i_log2_num_nodes] = PCTFS_my_id ^ PCTFS_floor_num_nodes;
 47:     PCTFS_floor_num_nodes <<= 1;
 48:     PCTFS_i_log2_num_nodes++;
 49:   }

 51:   PCTFS_i_log2_num_nodes--;
 52:   PCTFS_floor_num_nodes >>= 1;
 53:   modfl_num_nodes = (PCTFS_num_nodes - PCTFS_floor_num_nodes);

 55:   if ((PCTFS_my_id > 0) && (PCTFS_my_id <= modfl_num_nodes)) edge_not_pow_2 = ((PCTFS_my_id | PCTFS_floor_num_nodes) - 1);
 56:   else if (PCTFS_my_id >= PCTFS_floor_num_nodes) edge_not_pow_2 = ((PCTFS_my_id ^ PCTFS_floor_num_nodes) + 1);
 57:   else edge_not_pow_2 = 0;
 58:   PetscFunctionReturn(PETSC_SUCCESS);
 59: }

 61: /***********************************comm.c*************************************/
 62: PetscErrorCode PCTFS_giop(PetscInt *vals, PetscInt *work, PetscInt n, PetscInt *oprs)
 63: {
 64:   PetscInt   mask, edge;
 65:   PetscInt   type, dest;
 66:   vfp        fp;
 67:   MPI_Status status;

 69:   PetscFunctionBegin;
 70:   /* ok ... should have some data, work, and operator(s) */
 71:   PetscCheck(vals && work && oprs, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop() :: vals=%p, work=%p, oprs=%p", (void *)vals, (void *)work, (void *)oprs);

 73:   /* non-uniform should have at least two entries */
 74:   PetscCheck(!(oprs[0] == NON_UNIFORM) || !(n < 2), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop() :: non_uniform and n=0,1?");

 76:   /* check to make sure comm package has been initialized */
 77:   if (!p_init) PetscCall(PCTFS_comm_init());

 79:   /* if there's nothing to do return */
 80:   if ((PCTFS_num_nodes < 2) || (!n)) PetscFunctionReturn(PETSC_SUCCESS);

 82:   /* a negative number if items to send ==> fatal */
 83:   PetscCheck(n >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop() :: n=%" PetscInt_FMT "<0?", n);

 85:   /* advance to list of n operations for custom */
 86:   if ((type = oprs[0]) == NON_UNIFORM) oprs++;

 88:   /* major league hack */
 89:   PetscCheck(fp = (vfp)PCTFS_ivec_fct_addr(type), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop() :: Could not retrieve function pointer!");

 91:   /* all msgs will be of the same length */
 92:   /* if not a hypercube must collapse partial dim */
 93:   if (edge_not_pow_2) {
 94:     if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
 95:       PetscCallMPI(MPI_Send(vals, n, MPIU_INT, edge_not_pow_2, MSGTAG0 + PCTFS_my_id, MPI_COMM_WORLD));
 96:     } else {
 97:       PetscCallMPI(MPI_Recv(work, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG0 + edge_not_pow_2, MPI_COMM_WORLD, &status));
 98:       PetscCall((*fp)(vals, work, n, oprs));
 99:     }
100:   }

102:   /* implement the mesh fan in/out exchange algorithm */
103:   if (PCTFS_my_id < PCTFS_floor_num_nodes) {
104:     for (mask = 1, edge = 0; edge < PCTFS_i_log2_num_nodes; edge++, mask <<= 1) {
105:       dest = PCTFS_my_id ^ mask;
106:       if (PCTFS_my_id > dest) PetscCallMPI(MPI_Send(vals, n, MPIU_INT, dest, MSGTAG2 + PCTFS_my_id, MPI_COMM_WORLD));
107:       else {
108:         PetscCallMPI(MPI_Recv(work, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG2 + dest, MPI_COMM_WORLD, &status));
109:         PetscCall((*fp)(vals, work, n, oprs));
110:       }
111:     }

113:     mask = PCTFS_floor_num_nodes >> 1;
114:     for (edge = 0; edge < PCTFS_i_log2_num_nodes; edge++, mask >>= 1) {
115:       if (PCTFS_my_id % mask) continue;

117:       dest = PCTFS_my_id ^ mask;
118:       if (PCTFS_my_id < dest) PetscCallMPI(MPI_Send(vals, n, MPIU_INT, dest, MSGTAG4 + PCTFS_my_id, MPI_COMM_WORLD));
119:       else PetscCallMPI(MPI_Recv(vals, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG4 + dest, MPI_COMM_WORLD, &status));
120:     }
121:   }

123:   /* if not a hypercube must expand to partial dim */
124:   if (edge_not_pow_2) {
125:     if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
126:       PetscCallMPI(MPI_Recv(vals, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG5 + edge_not_pow_2, MPI_COMM_WORLD, &status));
127:     } else {
128:       PetscCallMPI(MPI_Send(vals, n, MPIU_INT, edge_not_pow_2, MSGTAG5 + PCTFS_my_id, MPI_COMM_WORLD));
129:     }
130:   }
131:   PetscFunctionReturn(PETSC_SUCCESS);
132: }

134: /***********************************comm.c*************************************/
135: PetscErrorCode PCTFS_grop(PetscScalar *vals, PetscScalar *work, PetscInt n, PetscInt *oprs)
136: {
137:   PetscInt   mask, edge;
138:   PetscInt   type, dest;
139:   vfp        fp;
140:   MPI_Status status;

142:   PetscFunctionBegin;
143:   /* ok ... should have some data, work, and operator(s) */
144:   PetscCheck(vals && work && oprs, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop() :: vals=%p, work=%p, oprs=%p", (void *)vals, (void *)work, (void *)oprs);

146:   /* non-uniform should have at least two entries */
147:   PetscCheck(!(oprs[0] == NON_UNIFORM) || !(n < 2), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop() :: non_uniform and n=0,1?");

149:   /* check to make sure comm package has been initialized */
150:   if (!p_init) PetscCall(PCTFS_comm_init());

152:   /* if there's nothing to do return */
153:   if ((PCTFS_num_nodes < 2) || (!n)) PetscFunctionReturn(PETSC_SUCCESS);

155:   /* a negative number of items to send ==> fatal */
156:   PetscCheck(n >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "gdop() :: n=%" PetscInt_FMT "<0?", n);

158:   /* advance to list of n operations for custom */
159:   if ((type = oprs[0]) == NON_UNIFORM) oprs++;

161:   PetscCheck(fp = (vfp)PCTFS_rvec_fct_addr(type), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop() :: Could not retrieve function pointer!");

163:   /* all msgs will be of the same length */
164:   /* if not a hypercube must collapse partial dim */
165:   if (edge_not_pow_2) {
166:     if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
167:       PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, edge_not_pow_2, MSGTAG0 + PCTFS_my_id, MPI_COMM_WORLD));
168:     } else {
169:       PetscCallMPI(MPI_Recv(work, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG0 + edge_not_pow_2, MPI_COMM_WORLD, &status));
170:       PetscCall((*fp)(vals, work, n, oprs));
171:     }
172:   }

174:   /* implement the mesh fan in/out exchange algorithm */
175:   if (PCTFS_my_id < PCTFS_floor_num_nodes) {
176:     for (mask = 1, edge = 0; edge < PCTFS_i_log2_num_nodes; edge++, mask <<= 1) {
177:       dest = PCTFS_my_id ^ mask;
178:       if (PCTFS_my_id > dest) PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, dest, MSGTAG2 + PCTFS_my_id, MPI_COMM_WORLD));
179:       else {
180:         PetscCallMPI(MPI_Recv(work, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG2 + dest, MPI_COMM_WORLD, &status));
181:         PetscCall((*fp)(vals, work, n, oprs));
182:       }
183:     }

185:     mask = PCTFS_floor_num_nodes >> 1;
186:     for (edge = 0; edge < PCTFS_i_log2_num_nodes; edge++, mask >>= 1) {
187:       if (PCTFS_my_id % mask) continue;

189:       dest = PCTFS_my_id ^ mask;
190:       if (PCTFS_my_id < dest) PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, dest, MSGTAG4 + PCTFS_my_id, MPI_COMM_WORLD));
191:       else PetscCallMPI(MPI_Recv(vals, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG4 + dest, MPI_COMM_WORLD, &status));
192:     }
193:   }

195:   /* if not a hypercube must expand to partial dim */
196:   if (edge_not_pow_2) {
197:     if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
198:       PetscCallMPI(MPI_Recv(vals, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG5 + edge_not_pow_2, MPI_COMM_WORLD, &status));
199:     } else {
200:       PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, edge_not_pow_2, MSGTAG5 + PCTFS_my_id, MPI_COMM_WORLD));
201:     }
202:   }
203:   PetscFunctionReturn(PETSC_SUCCESS);
204: }

206: /***********************************comm.c*************************************/
207: PetscErrorCode PCTFS_grop_hc(PetscScalar *vals, PetscScalar *work, PetscInt n, PetscInt *oprs, PetscInt dim)
208: {
209:   PetscInt   mask, edge;
210:   PetscInt   type, dest;
211:   vfp        fp;
212:   MPI_Status status;

214:   PetscFunctionBegin;
215:   /* ok ... should have some data, work, and operator(s) */
216:   PetscCheck(vals && work && oprs, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop_hc() :: vals=%p, work=%p, oprs=%p", (void *)vals, (void *)work, (void *)oprs);

218:   /* non-uniform should have at least two entries */
219:   PetscCheck(!(oprs[0] == NON_UNIFORM) || !(n < 2), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop_hc() :: non_uniform and n=0,1?");

221:   /* check to make sure comm package has been initialized */
222:   if (!p_init) PetscCall(PCTFS_comm_init());

224:   /* if there's nothing to do return */
225:   if ((PCTFS_num_nodes < 2) || (!n) || (dim <= 0)) PetscFunctionReturn(PETSC_SUCCESS);

227:   /* the error msg says it all!!! */
228:   PetscCheck(!modfl_num_nodes, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop_hc() :: PCTFS_num_nodes not a power of 2!?!");

230:   /* a negative number of items to send ==> fatal */
231:   PetscCheck(n >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop_hc() :: n=%" PetscInt_FMT "<0?", n);

233:   /* can't do more dimensions then exist */
234:   dim = PetscMin(dim, PCTFS_i_log2_num_nodes);

236:   /* advance to list of n operations for custom */
237:   if ((type = oprs[0]) == NON_UNIFORM) oprs++;

239:   PetscCheck(fp = (vfp)PCTFS_rvec_fct_addr(type), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_grop_hc() :: Could not retrieve function pointer!");

241:   for (mask = 1, edge = 0; edge < dim; edge++, mask <<= 1) {
242:     dest = PCTFS_my_id ^ mask;
243:     if (PCTFS_my_id > dest) PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, dest, MSGTAG2 + PCTFS_my_id, MPI_COMM_WORLD));
244:     else {
245:       PetscCallMPI(MPI_Recv(work, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG2 + dest, MPI_COMM_WORLD, &status));
246:       PetscCall((*fp)(vals, work, n, oprs));
247:     }
248:   }

250:   if (edge == dim) mask >>= 1;
251:   else {
252:     while (++edge < dim) mask <<= 1;
253:   }

255:   for (edge = 0; edge < dim; edge++, mask >>= 1) {
256:     if (PCTFS_my_id % mask) continue;

258:     dest = PCTFS_my_id ^ mask;
259:     if (PCTFS_my_id < dest) PetscCallMPI(MPI_Send(vals, n, MPIU_SCALAR, dest, MSGTAG4 + PCTFS_my_id, MPI_COMM_WORLD));
260:     else PetscCallMPI(MPI_Recv(vals, n, MPIU_SCALAR, MPI_ANY_SOURCE, MSGTAG4 + dest, MPI_COMM_WORLD, &status));
261:   }
262:   PetscFunctionReturn(PETSC_SUCCESS);
263: }

265: /******************************************************************************/
266: PetscErrorCode PCTFS_ssgl_radd(PetscScalar *vals, PetscScalar *work, PetscInt level, PetscInt *segs)
267: {
268:   PetscInt     edge, type, dest, mask;
269:   PetscInt     stage_n;
270:   MPI_Status   status;
271:   PetscMPIInt *maxval, iflg;

273:   PetscFunctionBegin;
274:   /* check to make sure comm package has been initialized */
275:   if (!p_init) PetscCall(PCTFS_comm_init());

277:   /* all msgs are *NOT* the same length */
278:   /* implement the mesh fan in/out exchange algorithm */
279:   for (mask = 0, edge = 0; edge < level; edge++, mask++) {
280:     stage_n = (segs[level] - segs[edge]);
281:     if (stage_n && !(PCTFS_my_id & mask)) {
282:       dest = edge_node[edge];
283:       type = MSGTAG3 + PCTFS_my_id + (PCTFS_num_nodes * edge);
284:       if (PCTFS_my_id > dest) PetscCallMPI(MPI_Send(vals + segs[edge], stage_n, MPIU_SCALAR, dest, type, MPI_COMM_WORLD));
285:       else {
286:         type = type - PCTFS_my_id + dest;
287:         PetscCallMPI(MPI_Recv(work, stage_n, MPIU_SCALAR, MPI_ANY_SOURCE, type, MPI_COMM_WORLD, &status));
288:         PetscCall(PCTFS_rvec_add(vals + segs[edge], work, stage_n));
289:       }
290:     }
291:     mask <<= 1;
292:   }
293:   mask >>= 1;
294:   for (edge = 0; edge < level; edge++) {
295:     stage_n = (segs[level] - segs[level - 1 - edge]);
296:     if (stage_n && !(PCTFS_my_id & mask)) {
297:       dest = edge_node[level - edge - 1];
298:       type = MSGTAG6 + PCTFS_my_id + (PCTFS_num_nodes * edge);
299:       PetscCallMPI(MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &iflg));
300:       PetscCheck(iflg, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
301:       PetscCheck(*maxval > type, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPI_TAG_UB for your current MPI implementation is not large enough to use PCTFS");
302:       if (PCTFS_my_id < dest) PetscCallMPI(MPI_Send(vals + segs[level - 1 - edge], stage_n, MPIU_SCALAR, dest, type, MPI_COMM_WORLD));
303:       else {
304:         type = type - PCTFS_my_id + dest;
305:         PetscCallMPI(MPI_Recv(vals + segs[level - 1 - edge], stage_n, MPIU_SCALAR, MPI_ANY_SOURCE, type, MPI_COMM_WORLD, &status));
306:       }
307:     }
308:     mask >>= 1;
309:   }
310:   PetscFunctionReturn(PETSC_SUCCESS);
311: }

313: /***********************************comm.c*************************************/
314: PetscErrorCode PCTFS_giop_hc(PetscInt *vals, PetscInt *work, PetscInt n, PetscInt *oprs, PetscInt dim)
315: {
316:   PetscInt   mask, edge;
317:   PetscInt   type, dest;
318:   vfp        fp;
319:   MPI_Status status;

321:   PetscFunctionBegin;
322:   /* ok ... should have some data, work, and operator(s) */
323:   PetscCheck(vals && work && oprs, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop_hc() :: vals=%p, work=%p, oprs=%p", (void *)vals, (void *)work, (void *)oprs);

325:   /* non-uniform should have at least two entries */
326:   PetscCheck(!(oprs[0] == NON_UNIFORM) || !(n < 2), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop_hc() :: non_uniform and n=0,1?");

328:   /* check to make sure comm package has been initialized */
329:   if (!p_init) PetscCall(PCTFS_comm_init());

331:   /* if there's nothing to do return */
332:   if ((PCTFS_num_nodes < 2) || (!n) || (dim <= 0)) PetscFunctionReturn(PETSC_SUCCESS);

334:   /* the error msg says it all!!! */
335:   PetscCheck(!modfl_num_nodes, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop_hc() :: PCTFS_num_nodes not a power of 2!?!");

337:   /* a negative number of items to send ==> fatal */
338:   PetscCheck(n >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop_hc() :: n=%" PetscInt_FMT "<0?", n);

340:   /* can't do more dimensions then exist */
341:   dim = PetscMin(dim, PCTFS_i_log2_num_nodes);

343:   /* advance to list of n operations for custom */
344:   if ((type = oprs[0]) == NON_UNIFORM) oprs++;

346:   PetscCheck(fp = (vfp)PCTFS_ivec_fct_addr(type), PETSC_COMM_SELF, PETSC_ERR_PLIB, "PCTFS_giop_hc() :: Could not retrieve function pointer!");

348:   for (mask = 1, edge = 0; edge < dim; edge++, mask <<= 1) {
349:     dest = PCTFS_my_id ^ mask;
350:     if (PCTFS_my_id > dest) PetscCallMPI(MPI_Send(vals, n, MPIU_INT, dest, MSGTAG2 + PCTFS_my_id, MPI_COMM_WORLD));
351:     else {
352:       PetscCallMPI(MPI_Recv(work, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG2 + dest, MPI_COMM_WORLD, &status));
353:       PetscCall((*fp)(vals, work, n, oprs));
354:     }
355:   }

357:   if (edge == dim) mask >>= 1;
358:   else {
359:     while (++edge < dim) mask <<= 1;
360:   }

362:   for (edge = 0; edge < dim; edge++, mask >>= 1) {
363:     if (PCTFS_my_id % mask) continue;

365:     dest = PCTFS_my_id ^ mask;
366:     if (PCTFS_my_id < dest) PetscCallMPI(MPI_Send(vals, n, MPIU_INT, dest, MSGTAG4 + PCTFS_my_id, MPI_COMM_WORLD));
367:     else PetscCallMPI(MPI_Recv(vals, n, MPIU_INT, MPI_ANY_SOURCE, MSGTAG4 + dest, MPI_COMM_WORLD, &status));
368:   }
369:   PetscFunctionReturn(PETSC_SUCCESS);
370: }