Actual source code: ex30.c

  1: static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";

  3: /*
  4:    Support 2.5V with axisymmetric coordinates
  5:      - r,z coordinates
  6:      - Domain and species data input by Landau operator
  7:      - "radius" for each grid, normalized with electron thermal velocity
  8:      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
  9:    Supports full 3V

 11:  */

 13: #include <petscdmplex.h>
 14: #include <petscds.h>
 15: #include <petscdmswarm.h>
 16: #include <petscksp.h>
 17: #include <petsc/private/petscimpl.h>
 18: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
 19:   #include <omp.h>
 20: #endif
 21: #include <petsclandau.h>
 22: #include <petscdmcomposite.h>

 24: typedef struct {
 25:   Mat MpTrans;
 26:   Mat Mp;
 27:   Vec ff;
 28:   Vec uu;
 29: } MatShellCtx;

 31: typedef struct {
 32:   PetscInt   v_target;
 33:   PetscInt   g_target;
 34:   PetscInt   global_vertex_id_0;
 35:   DM        *globSwarmArray;
 36:   LandauCtx *ctx;
 37:   DM        *grid_dm;
 38:   Mat       *g_Mass;
 39:   Mat       *globMpArray;
 40:   Vec       *globXArray;
 41:   PetscBool  print;
 42:   PetscBool  print_entropy;
 43: } PrintCtx;

 45: PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
 46: {
 47:   MatShellCtx *matshellctx;

 49:   PetscFunctionBeginUser;
 50:   PetscCall(MatShellGetContext(MtM, &matshellctx));
 51:   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
 52:   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
 53:   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
 54:   PetscFunctionReturn(PETSC_SUCCESS);
 55: }

 57: PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
 58: {
 59:   MatShellCtx *matshellctx;

 61:   PetscFunctionBeginUser;
 62:   PetscCall(MatShellGetContext(MtM, &matshellctx));
 63:   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
 64:   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
 65:   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
 66:   PetscFunctionReturn(PETSC_SUCCESS);
 67: }

 69: PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
 70: {
 71:   PetscInt Nc = 1;

 73:   PetscFunctionBeginUser;
 74:   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
 75:   PetscCall(DMSetType(*sw, DMSWARM));
 76:   PetscCall(DMSetDimension(*sw, dim));
 77:   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
 78:   PetscCall(DMSwarmSetCellDM(*sw, dm));
 79:   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_REAL));
 80:   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
 81:   PetscCall(DMSetFromOptions(*sw));
 82:   PetscCall(PetscObjectSetName((PetscObject)*sw, "Particle Grid"));
 83:   PetscFunctionReturn(PETSC_SUCCESS);
 84: }

 86: static PetscErrorCode makeSwarm(DM sw, const PetscInt dim, const PetscInt Np, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[])
 87: {
 88:   PetscReal    *coords;
 89:   PetscDataType dtype;
 90:   PetscInt      bs, p, zero = 0;

 92:   PetscFunctionBeginUser;
 93:   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
 94:   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
 95:   for (p = 0; p < Np; p++) {
 96:     coords[p * dim + 0] = xx[p];
 97:     coords[p * dim + 1] = yy[p];
 98:     if (dim == 3) coords[p * dim + 2] = zz[p];
 99:   }
100:   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
101:   PetscCall(DMSwarmVectorDefineField(sw, "w_q"));
102:   PetscFunctionReturn(PETSC_SUCCESS);
103: }

105: static PetscErrorCode createMp(const DM dm, DM sw, Mat *Mp_out)
106: {
107:   PetscBool removePoints = PETSC_TRUE;
108:   Mat       M_p;

110:   PetscFunctionBeginUser;
111:   // migrate after coords are set
112:   PetscCall(DMSwarmMigrate(sw, removePoints));
113:   //
114:   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));

116:   /* PetscInt  N,*count,nmin=10000,nmax=0,ntot=0; */
117:   /* // count */
118:   /* PetscCall(DMSwarmCreatePointPerCellCount(sw, &N, &count)); */
119:   /* for (int i=0, n; i< N ; i++) { */
120:   /*   if ((n=count[i]) > nmax) nmax = n; */
121:   /*   if (n < nmin) nmin = n; */
122:   /*   PetscCall(PetscInfo(dm, " %d) %d particles\n", i, n)); */
123:   /*   ntot += n; */
124:   /* } */
125:   /* PetscCall(PetscFree(count)); */
126:   /* PetscCall(PetscInfo(dm, " %" PetscInt_FMT " max particle / cell, and %" PetscInt_FMT " min, ratio = %g,  %" PetscInt_FMT " total\n", nmax, nmin, (double)nmax/(double)nmin,ntot)); */

128:   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
129:   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
130:   PetscCall(DMViewFromOptions(sw, NULL, "-ex30_sw_view"));
131:   // output
132:   *Mp_out = M_p;
133:   PetscFunctionReturn(PETSC_SUCCESS);
134: }

136: static PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt a_tid, const PetscInt dim, const PetscReal a_wp[], Vec rho, Mat M_p)
137: {
138:   PetscReal    *wq;
139:   PetscDataType dtype;
140:   Vec           ff;
141:   PetscInt      bs, p, Np;

143:   PetscFunctionBeginUser;
144:   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
145:   PetscCall(DMSwarmGetLocalSize(sw, &Np));
146:   for (p = 0; p < Np; p++) wq[p] = a_wp[p];
147:   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
148:   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
149:   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
150:   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
151:   PetscCall(MatMultTranspose(M_p, ff, rho));
152:   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
153:   PetscFunctionReturn(PETSC_SUCCESS);
154: }

156: //
157: // add grid to arg 'sw.w_q'
158: //
159: PetscErrorCode gridToParticles(const DM dm, DM sw, const Vec rhs, Vec work_ferhs, Mat M_p, Mat Mass)
160: {
161:   PetscBool    is_lsqr;
162:   KSP          ksp;
163:   Mat          PM_p = NULL, MtM, D = NULL;
164:   Vec          ff;
165:   PetscInt     N, M, nzl;
166:   MatShellCtx *matshellctx = NULL;
167:   PC           pc;

169:   PetscFunctionBeginUser;
170:   // 1) apply M in, for Moore-Penrose with mass: Mp (Mp' Mp)^-1 M
171:   PetscCall(MatMult(Mass, rhs, work_ferhs));
172:   // 2) pseudo-inverse, first part: (Mp' Mp)^-1
173:   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
174:   PetscCall(KSPSetType(ksp, KSPCG));
175:   PetscCall(KSPGetPC(ksp, &pc));
176:   PetscCall(PCSetType(pc, PCJACOBI));
177:   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
178:   PetscCall(KSPSetFromOptions(ksp));
179:   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
180:   if (!is_lsqr) {
181:     PetscCall(MatGetLocalSize(M_p, &M, &N));
182:     if (N > M) {
183:       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") more vertices than particles: revert to lsqr\n", M, N));
184:       is_lsqr = PETSC_TRUE;
185:       PetscCall(KSPSetType(ksp, KSPLSQR));
186:       PetscCall(PCSetType(pc, PCNONE)); // should not happen, but could solve stable (Mp^T Mp), move projection Mp before solve
187:     } else {
188:       PetscCall(PetscNew(&matshellctx));
189:       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
190:       if (0) {
191:         PetscCall(MatTransposeMatMult(M_p, M_p, MAT_INITIAL_MATRIX, 4, &MtM));
192:         PetscCall(KSPSetOperators(ksp, MtM, MtM));
193:         PetscCall(PetscInfo(M_p, "createMtM KSP with explicit Mp'Mp\n"));
194:         PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
195:       } else {
196:         PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
197:         PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
198:         matshellctx->Mp = M_p;
199:         PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (PetscErrorCodeFn *)MatMultMtM_SeqAIJ));
200:         PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (PetscErrorCodeFn *)MatMultAddMtM_SeqAIJ));
201:         PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
202:         PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpT_mat_view"));
203:         for (PetscInt i = 0; i < N; i++) {
204:           const PetscScalar *vals;
205:           const PetscInt    *cols;
206:           PetscScalar        dot = 0;
207:           PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
208:           for (PetscInt ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
209:           if (dot < PETSC_MACHINE_EPSILON) {
210:             PetscCall(PetscInfo(ksp, "empty row in pseudo-inverse %d\n", (int)i));
211:             is_lsqr = PETSC_TRUE; // empty rows
212:             PetscCall(KSPSetType(ksp, KSPLSQR));
213:             PetscCall(PCSetType(pc, PCNONE)); // should not happen, but could solve stable (Mp Mp^T), move projection Mp before solve
214:             // clean up
215:             PetscCall(MatDestroy(&matshellctx->MpTrans));
216:             PetscCall(VecDestroy(&matshellctx->ff));
217:             PetscCall(VecDestroy(&matshellctx->uu));
218:             PetscCall(MatDestroy(&D));
219:             PetscCall(MatDestroy(&MtM));
220:             PetscCall(PetscFree(matshellctx));
221:             D = NULL;
222:             break;
223:           }
224:           PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
225:         }
226:         if (D) {
227:           PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
228:           PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
229:           PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
230:           PetscCall(KSPSetOperators(ksp, MtM, D));
231:           PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
232:           PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
233:           PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
234:           PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
235:         }
236:       }
237:     }
238:   }
239:   if (is_lsqr) {
240:     PC        pc2;
241:     PetscBool is_bjac;
242:     PetscCall(KSPGetPC(ksp, &pc2));
243:     PetscCall(PetscObjectTypeCompare((PetscObject)pc2, PCBJACOBI, &is_bjac));
244:     if (is_bjac) {
245:       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
246:       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
247:     } else {
248:       PetscCall(KSPSetOperators(ksp, M_p, M_p));
249:     }
250:     PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
251:   }
252:   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
253:   if (!is_lsqr) {
254:     PetscCall(KSPSolve(ksp, work_ferhs, matshellctx->uu));
255:     // 3) with Moore-Penrose apply Mp: M_p (Mp' Mp)^-1 M
256:     PetscCall(MatMult(M_p, matshellctx->uu, ff));
257:     PetscCall(MatDestroy(&D));
258:     PetscCall(MatDestroy(&MtM));
259:     PetscCall(MatDestroy(&matshellctx->MpTrans));
260:     PetscCall(VecDestroy(&matshellctx->ff));
261:     PetscCall(VecDestroy(&matshellctx->uu));
262:     PetscCall(PetscFree(matshellctx));
263:   } else {
264:     // finally with LSQR apply M_p^\dagger
265:     PetscCall(KSPSolveTranspose(ksp, work_ferhs, ff));
266:   }
267:   PetscCall(KSPDestroy(&ksp));
268:   PetscCall(MatDestroy(&PM_p));
269:   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
270:   PetscFunctionReturn(PETSC_SUCCESS);
271: }

273: #define EX30_MAX_NUM_THRDS 12
274: #define EX30_MAX_BATCH_SZ  1024
275: //
276: // add grid to arg 'globSwarmArray[].w_q'
277: //
278: PetscErrorCode gridToParticles_private(DM grid_dm[], DM globSwarmArray[], const PetscInt dim, const PetscInt v_target, const PetscInt numthreads, const PetscInt num_vertices, const PetscInt global_vertex_id, Mat globMpArray[], Mat g_Mass[], Vec t_fhat[][EX30_MAX_NUM_THRDS], PetscReal moments[], Vec globXArray[], LandauCtx *ctx)
279: {
280:   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops

282:   PetscFunctionBeginUser;
283:   // map back to particles
284:   for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
285:     PetscCall(PetscInfo(grid_dm[0], "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_vertex_id + 1, num_vertices, v_id_0 + 1, ctx->batch_sz));
286:     //PetscPragmaOMP(parallel for)
287:     for (PetscInt tid = 0; tid < numthreads; tid++) {
288:       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
289:       if (glb_v_id < num_vertices) {
290:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
291:           PetscErrorCode ierr_t;
292:           ierr_t = PetscInfo(grid_dm[0], "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_vertex_id, v_id, grid, LAND_PACK_IDX(v_id, grid));
293:           ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(v_id, grid)], globXArray[LAND_PACK_IDX(v_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]);
294:           if (ierr_t) ierr = ierr_t;
295:         }
296:       }
297:     }
298:     PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
299:     /* Get moments */
300:     PetscCall(PetscInfo(grid_dm[0], "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", v_id_0, v_id_0 + numthreads));
301:     for (PetscInt tid = 0; tid < numthreads; tid++) {
302:       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
303:       if (glb_v_id == v_target) {
304:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
305:           PetscDataType dtype;
306:           PetscReal    *wp, *coords;
307:           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
308:           PetscInt      npoints, bs = 1;
309:           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
310:           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
311:           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
312:           for (PetscInt p = 0; p < npoints; p++) {
313:             PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
314:             for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
315:             moments[0] += w;
316:             moments[1] += w * ctx->v_0 * coords[p * dim + 1]; // z-momentum
317:             moments[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
318:           }
319:           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
320:           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
321:         }
322:         const PetscReal N_inv = 1 / moments[0];
323:         PetscCall(PetscInfo(grid_dm[0], "gridToParticles_private [%" PetscInt_FMT "], n = %g\n", v_id, (double)moments[0]));
324:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
325:           PetscDataType dtype;
326:           PetscReal    *wp, *coords;
327:           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
328:           PetscInt      npoints, bs = 1;
329:           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
330:           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
331:           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
332:           for (PetscInt p = 0; p < npoints; p++) {
333:             const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
334:             if (w > PETSC_REAL_MIN) {
335:               moments[3] -= ww * PetscLogReal(ww);
336:               PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "ww (%g) > 1", (double)ww);
337:             } else moments[4] -= w; // keep track of density that is lost
338:           }
339:           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
340:           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
341:         }
342:       }
343:     } // thread batch
344:   } // batch
345:   PetscFunctionReturn(PETSC_SUCCESS);
346: }

348: static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscReal shift, PetscScalar *u)
349: {
350:   PetscInt  i;
351:   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */

353:   if (shift != 0.) {
354:     v2 = 0;
355:     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
356:     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
357:     /* evaluate the shifted Maxwellian */
358:     u[0] += n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
359:   } else {
360:     /* compute the exponents, v^2 */
361:     for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
362:     /* evaluate the Maxwellian */
363:     u[0] += n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
364:   }
365: }

367: static PetscErrorCode PostStep(TS ts)
368: {
369:   PetscInt   n, dim, nDMs, v_id;
370:   PetscReal  t;
371:   LandauCtx *ctx;
372:   Vec        X;
373:   PrintCtx  *printCtx;
374:   DM         pack;
375:   PetscReal  moments[5], e_grid[LANDAU_MAX_GRIDS];

377:   PetscFunctionBeginUser;
378:   PetscCall(TSGetApplicationContext(ts, &printCtx));
379:   if (!printCtx->print && !printCtx->print_entropy) PetscFunctionReturn(PETSC_SUCCESS);
380:   ctx = printCtx->ctx;
381:   if (printCtx->v_target < printCtx->global_vertex_id_0 || printCtx->v_target >= printCtx->global_vertex_id_0 + ctx->batch_sz) PetscFunctionReturn(PETSC_SUCCESS);
382:   for (PetscInt i = 0; i < 5; i++) moments[i] = 0;
383:   for (PetscInt i = 0; i < LANDAU_MAX_GRIDS; i++) e_grid[i] = 0;
384:   v_id = printCtx->v_target % ctx->batch_sz;
385:   PetscCall(TSGetDM(ts, &pack));
386:   PetscCall(DMGetDimension(pack, &dim));
387:   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
388:   PetscCall(TSGetSolution(ts, &X));
389:   PetscCall(TSGetStepNumber(ts, &n));
390:   PetscCall(TSGetTime(ts, &t));
391:   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
392:   if (printCtx->print_entropy && printCtx->v_target >= 0 && 0) {
393:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
394:       PetscDataType dtype;
395:       PetscReal    *wp, *coords;
396:       DM            sw = printCtx->globSwarmArray[LAND_PACK_IDX(v_id, grid)];
397:       Vec           work, subX = printCtx->globXArray[LAND_PACK_IDX(v_id, grid)];
398:       PetscInt      bs, NN;
399:       // C-G moments
400:       PetscCall(VecDuplicate(subX, &work));
401:       PetscCall(gridToParticles(printCtx->grid_dm[grid], sw, subX, work, printCtx->globMpArray[LAND_PACK_IDX(v_id, grid)], printCtx->g_Mass[grid]));
402:       PetscCall(VecDestroy(&work));
403:       // moments
404:       PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
405:       PetscCall(DMSwarmGetLocalSize(sw, &NN));
406:       PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
407:       for (PetscInt pp = 0; pp < NN; pp++) {
408:         PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
409:         for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
410:         moments[0] += w;
411:         moments[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
412:         moments[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
413:         e_grid[grid] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
414:       }
415:       PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
416:       PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
417:     }
418:     // entropy
419:     const PetscReal N_inv = 1 / moments[0];
420:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
421:       PetscDataType dtype;
422:       PetscReal    *wp, *coords;
423:       DM            sw = printCtx->globSwarmArray[LAND_PACK_IDX(v_id, grid)];
424:       PetscInt      bs, NN;
425:       PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
426:       PetscCall(DMSwarmGetLocalSize(sw, &NN));
427:       PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
428:       for (PetscInt pp = 0; pp < NN; pp++) {
429:         PetscReal fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
430:         if (w > PETSC_REAL_MIN) moments[3] -= ww * PetscLogReal(ww);
431:         else moments[4] -= w;
432:       }
433:       PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
434:       PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
435:     }
436:     PetscCall(PetscInfo(X, "%4d) time %e, Landau particle moments: 0: %18.12e 1: %19.12e 2: %18.12e entropy: %e loss %e. energy = %e + %e + %e\n", (int)n, (double)t, (double)moments[0], (double)moments[1], (double)moments[2], (double)moments[3], (double)(moments[4] / moments[0]), (double)e_grid[0], (double)e_grid[1], (double)e_grid[2]));
437:   }
438:   if (printCtx->print && printCtx->g_target >= 0) {
439:     PetscInt         grid   = printCtx->g_target, id;
440:     static PetscReal last_t = -100000, period = .5;
441:     if (last_t == -100000) last_t = -period + t;
442:     if (t >= last_t + period) {
443:       last_t = t;
444:       PetscCall(DMGetOutputSequenceNumber(ctx->plex[grid], &id, NULL));
445:       PetscCall(DMSetOutputSequenceNumber(ctx->plex[grid], id + 1, t));
446:       PetscCall(VecViewFromOptions(printCtx->globXArray[LAND_PACK_IDX(v_id % ctx->batch_sz, grid)], NULL, "-ex30_vec_view"));
447:       if (ctx->num_grids > grid + 1) {
448:         PetscCall(DMSetOutputSequenceNumber(ctx->plex[grid + 1], id + 1, t));
449:         PetscCall(VecViewFromOptions(printCtx->globXArray[LAND_PACK_IDX(v_id % ctx->batch_sz, grid + 1)], NULL, "-ex30_vec_view2"));
450:       }
451:       PetscCall(PetscInfo(X, "%4d) time %e View\n", (int)n, (double)t));
452:     }
453:   }
454:   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
455:   PetscFunctionReturn(PETSC_SUCCESS);
456: }

458: PetscErrorCode go(TS ts, Vec X, const PetscInt num_vertices, const PetscInt a_Np, const PetscInt dim, const PetscInt v_target, const PetscInt g_target, PetscReal shift, PetscBool use_uniform_particle_grid)
459: {
460:   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
461:   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
462:   KSP            t_ksp[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
463:   Vec            t_fhat[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
464:   PetscInt       nDMs;
465:   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops
466: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
467:   PetscInt numthreads = PetscNumOMPThreads;
468: #else
469:   PetscInt numthreads = 1;
470: #endif
471:   LandauCtx *ctx;
472:   Vec       *globXArray;
473:   PetscReal  moments_0[5], moments_1a[5], moments_1b[5], dt_init;
474:   PrintCtx  *printCtx;

476:   PetscFunctionBeginUser;
477:   PetscCheck(numthreads <= EX30_MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
478:   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
479:   PetscCall(TSGetDM(ts, &pack));
480:   PetscCall(DMGetApplicationContext(pack, &ctx));
481:   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
482:   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
483:   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices) %d DMs\n", ctx->num_grids, ctx->batch_sz, num_vertices, (int)nDMs));
484:   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
485:   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
486:   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
487:   // print ctx
488:   PetscCall(PetscNew(&printCtx));
489:   PetscCall(TSSetApplicationContext(ts, printCtx));
490:   printCtx->v_target       = v_target;
491:   printCtx->g_target       = g_target;
492:   printCtx->ctx            = ctx;
493:   printCtx->globSwarmArray = globSwarmArray;
494:   printCtx->grid_dm        = grid_dm;
495:   printCtx->globMpArray    = globMpArray;
496:   printCtx->g_Mass         = g_Mass;
497:   printCtx->globXArray     = globXArray;
498:   printCtx->print_entropy  = PETSC_FALSE;
499:   PetscOptionsBegin(PETSC_COMM_SELF, "", "Print Options", "DMPLEX");
500:   PetscCall(PetscOptionsBool("-print_entropy", "Print entropy and moments at each time step", "ex30.c", printCtx->print_entropy, &printCtx->print_entropy, NULL));
501:   PetscOptionsEnd();
502:   // view
503:   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
504:   if (ctx->num_grids > g_target + 1) PetscCall(DMViewFromOptions(ctx->plex[g_target + 1], NULL, "-ex30_dm_view2"));
505:   // create mesh mass matrices
506:   PetscCall(VecZeroEntries(X));
507:   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
508:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
509:     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
510:     DM           dm   = ctx->plex[grid];
511:     PetscSection s;
512:     grid_dm[grid] = dm;
513:     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
514:     //
515:     PetscCall(DMGetLocalSection(dm, &s));
516:     PetscCall(DMPlexCreateClosureIndex(dm, s));
517:     for (PetscInt tid = 0; tid < numthreads; tid++) {
518:       PC pc;
519:       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
520:       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
521:       PetscCall(KSPSetType(t_ksp[grid][tid], KSPCG));
522:       PetscCall(KSPGetPC(t_ksp[grid][tid], &pc));
523:       PetscCall(PCSetType(pc, PCJACOBI));
524:       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
525:       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
526:       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
527:     }
528:   }
529:   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
530:   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
531:   // loop over all vertices in chucks that are batched for TSSolve
532:   for (PetscInt i = 0; i < 5; i++) moments_0[i] = moments_1a[i] = moments_1b[i] = 0;
533:   for (PetscInt global_vertex_id_0 = 0; global_vertex_id_0 < num_vertices; global_vertex_id_0 += ctx->batch_sz, shift /= 2) { // outer vertex loop
534:     PetscCall(TSSetTime(ts, 0));
535:     PetscCall(TSSetStepNumber(ts, 0));
536:     PetscCall(TSSetTimeStep(ts, dt_init));
537:     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
538:     printCtx->global_vertex_id_0 = global_vertex_id_0;
539:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
540:       PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], "rho"));
541:       printCtx->print = PETSC_TRUE;
542:     } else printCtx->print = PETSC_FALSE;
543:     // create fake particles in batches with threads
544:     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
545:       PetscReal *xx_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS] /* , radiuses[80000] */;
546:       PetscInt   Np_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
547:       // make particles
548:       for (PetscInt tid = 0; tid < numthreads; tid++) {
549:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
550:         if (glb_v_id < num_vertices) {                                                     // the ragged edge (in last batch)
551:           PetscInt Npp0 = a_Np + (glb_v_id % (a_Np / 10 + 1)), nTargetP[LANDAU_MAX_GRIDS]; // n of particels in each dim with load imbalance
552:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {                         // add same particels for all grids
553:             // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
554:             const PetscReal kT_m  = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0);                      /* theta = 2kT/mc^2 per species */
555:             PetscReal       lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
556:             PetscInt        Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
557:             PetscRandom     rand;
558:             PetscReal       sigma = ctx->thermal_speed[grid] / ctx->thermal_speed[0], p2_shift = grid == 0 ? shift : -shift; // symmetric shift of e vs ions
559:             PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &rand));
560:             PetscCall(PetscRandomSetInterval(rand, 0., 1.));
561:             PetscCall(PetscRandomSetFromOptions(rand));
562:             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
563:             else Npi = Npj = Npk = Npp0;
564:             // User: use glb_v_id to index into your data
565:             const PetscInt NNreal = Npi * Npj * Npk, NN = NNreal + (dim == 2 ? 3 : 6); // make room for bounding box
566:             Np_t[grid][tid] = NN;
567:             if (glb_v_id == v_target) nTargetP[grid] = NN;
568:             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
569:             hp[0] = (hi[0] - lo[0]) / Npi;
570:             hp[1] = (hi[1] - lo[1]) / Npj;
571:             hp[2] = (hi[2] - lo[2]) / Npk;
572:             if (dim == 2) hp[2] = 1;
573:             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
574:             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
575:             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_v_id, grid, NN, v_target));
576:             for (PetscInt pj = 0, pp = 0; pj < Npj; pj++) {
577:               for (PetscInt pk = 0; pk < Npk; pk++) {
578:                 for (PetscInt pi = 0; pi < Npi; pi++, pp++) {
579:                   PetscReal p_shift   = p2_shift;
580:                   wp_t[grid][tid][pp] = 0;
581:                   if (use_uniform_particle_grid) {
582:                     xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
583:                     yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
584:                     if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
585:                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
586:                     p_shift *= ctx->thermal_speed[grid] / ctx->v_0;
587:                     if (ctx->sphere && PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp])) > 0.92 * hi[0]) {
588:                       wp_t[grid][tid][pp] = 0;
589:                     } else {
590:                       maxwellian(dim, x, kT_m, vole, p_shift, &wp_t[grid][tid][pp]);
591:                       if (ctx->num_grids == 1 && shift != 0) {                          // bi-maxwellian, electron plasma
592:                         maxwellian(dim, x, kT_m, vole, -p_shift, &wp_t[grid][tid][pp]); // symmetric shift of electron plasma
593:                       }
594:                     }
595:                   } else {
596:                     PetscReal u1, u2;
597:                     do {
598:                       do {
599:                         PetscCall(PetscRandomGetValueReal(rand, &u1));
600:                       } while (u1 == 0);
601:                       PetscCall(PetscRandomGetValueReal(rand, &u2));
602:                       //compute z0 and z1
603:                       PetscReal mag       = sigma * PetscSqrtReal(-2.0 * PetscLogReal(u1)); // is this the same scale grid Maxwellian? t_therm = sigma
604:                       xx_t[grid][tid][pp] = mag * PetscCosReal(2.0 * PETSC_PI * u2);
605:                       yy_t[grid][tid][pp] = mag * PetscSinReal(2.0 * PETSC_PI * u2);
606:                       if (dim == 2 && xx_t[grid][tid][pp] < lo[0]) xx_t[grid][tid][pp] = -xx_t[grid][tid][pp];
607:                       if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
608:                       if (!ctx->sphere) {
609:                         if (dim == 2 && xx_t[grid][tid][pp] < 0) xx_t[grid][tid][pp] = -xx_t[grid][tid][pp]; // ???
610:                         else if (dim == 3) {
611:                           while (zz_t[grid][tid][pp] >= hi[2] || zz_t[grid][tid][pp] <= lo[2]) zz_t[grid][tid][pp] *= .9;
612:                         }
613:                         while (xx_t[grid][tid][pp] >= hi[0] || xx_t[grid][tid][pp] <= lo[0]) xx_t[grid][tid][pp] *= .9;
614:                         while (yy_t[grid][tid][pp] >= hi[1] || yy_t[grid][tid][pp] <= lo[1]) yy_t[grid][tid][pp] *= .9;
615:                       } else { // 2D
616:                         //if (glb_v_id == v_target && pp < 80000) radiuses[pp] = PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp]));
617:                         while (PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp])) > 0.92 * hi[0]) { // safety factor for facets of sphere
618:                           xx_t[grid][tid][pp] *= .9;
619:                           yy_t[grid][tid][pp] *= .9;
620:                         }
621:                       }
622:                       if (ctx->num_grids == 1 && pp % 2 == 0) p_shift = 0; // one species, split bi-max
623:                       p_shift *= ctx->thermal_speed[grid] / ctx->v_0;
624:                       if (dim == 3) zz_t[grid][tid][pp] += p_shift;
625:                       else yy_t[grid][tid][pp] += p_shift;
626:                       wp_t[grid][tid][pp] += ctx->n[grid] / NNreal * PetscSqrtReal(ctx->masses[ctx->species_offset[grid]] / ctx->masses[0]);
627:                       if (p_shift <= 0) break; // add bi-max for electron plasma only
628:                       p_shift = -p_shift;
629:                     } while (ctx->num_grids == 1); // add bi-max for electron plasma only
630:                   }
631:                   {
632:                     if (glb_v_id == v_target) {
633:                       PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
634:                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1, w = fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
635:                       for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
636:                       moments_0[0] += w;                   // not thread safe
637:                       moments_0[1] += w * ctx->v_0 * x[1]; // z-momentum
638:                       moments_0[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
639:                     }
640:                   }
641:                 }
642:               }
643:             }
644:             if (dim == 2) { // fix bounding box
645:               PetscInt pp           = NNreal;
646:               wp_t[grid][tid][pp]   = 0;
647:               xx_t[grid][tid][pp]   = 1.e-7;
648:               yy_t[grid][tid][pp++] = hi[1] - 5.e-7;
649:               wp_t[grid][tid][pp]   = 0;
650:               xx_t[grid][tid][pp]   = hi[0] - 5.e-7;
651:               yy_t[grid][tid][pp++] = 0;
652:               wp_t[grid][tid][pp]   = 0;
653:               xx_t[grid][tid][pp]   = 1.e-7;
654:               yy_t[grid][tid][pp++] = lo[1] + 5.e-7;
655:             } else {
656:               const PetscInt p0 = NNreal;
657:               for (PetscInt pj = 0; pj < 6; pj++) xx_t[grid][tid][p0 + pj] = yy_t[grid][tid][p0 + pj] = zz_t[grid][tid][p0 + pj] = wp_t[grid][tid][p0 + pj] = 0;
658:               xx_t[grid][tid][p0 + 0] = lo[0];
659:               xx_t[grid][tid][p0 + 1] = hi[0];
660:               yy_t[grid][tid][p0 + 2] = lo[1];
661:               yy_t[grid][tid][p0 + 3] = hi[1];
662:               zz_t[grid][tid][p0 + 4] = lo[2];
663:               zz_t[grid][tid][p0 + 5] = hi[2];
664:             }
665:             PetscCall(PetscRandomDestroy(&rand));
666:           }
667:           // entropy init, need global n
668:           if (glb_v_id == v_target) {
669:             const PetscReal N_inv = 1 / moments_0[0];
670:             PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_v_id, nTargetP[0]));
671:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
672:               const PetscInt NN = nTargetP[grid];
673:               for (PetscInt pp = 0; pp < NN; pp++) {
674:                 const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * xx_t[grid][tid][pp] : 1, w = fact * ctx->n_0 * ctx->masses[ctx->species_offset[grid]] * wp_t[grid][tid][pp], ww = w * N_inv;
675:                 if (w > PETSC_REAL_MIN) {
676:                   moments_0[3] -= ww * PetscLogReal(ww);
677:                   PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
678:                 } else moments_0[4] -= w;
679:               }
680:             } // grid
681:           } // target
682:         } // active
683:       } // threads
684:       /* Create particle swarm */
685:       for (PetscInt tid = 0; tid < numthreads; tid++) {
686:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
687:         if (glb_v_id < num_vertices) {                             // the ragged edge of the last batch
688:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
689:             PetscSection section;
690:             PetscInt     Nf;
691:             DM           dm = grid_dm[grid];
692:             PetscCall(DMGetLocalSection(dm, &section));
693:             PetscCall(PetscSectionGetNumFields(section, &Nf));
694:             PetscCheck(Nf == 1, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only one species per grid supported -- todo");
695:             PetscCall(DMViewFromOptions(dm, NULL, "-dm_view"));
696:             PetscCall(PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local block index %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid)));
697:             PetscCall(createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
698:           }
699:         } // active
700:       } // threads
701:       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Only support one species per grid");
702:       // make globMpArray
703:       PetscPragmaOMP(parallel for)
704:       for (PetscInt tid = 0; tid < numthreads; tid++) {
705:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
706:         if (glb_v_id < num_vertices) {
707:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
708:             // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
709:             PetscErrorCode ierr_t;
710:             DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
711:             ierr_t            = PetscInfo(pack, "makeSwarm %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
712:             ierr_t            = makeSwarm(sw, dim, Np_t[grid][tid], xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid]);
713:             if (ierr_t) ierr = ierr_t;
714:           }
715:         }
716:       }
717:       for (PetscInt tid = 0; tid < numthreads; tid++) {
718:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
719:         if (glb_v_id < num_vertices) {
720:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
721:             DM dm = grid_dm[grid];
722:             DM sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
723:             PetscCall(PetscInfo(pack, "createMp %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid)));
724:             PetscCall(createMp(dm, sw, &globMpArray[LAND_PACK_IDX(v_id, grid)]));
725:             PetscCall(MatViewFromOptions(globMpArray[LAND_PACK_IDX(v_id, grid)], NULL, "-mp_mat_view"));
726:           }
727:         }
728:       }
729:       // p --> g: set X
730:       // PetscPragmaOMP(parallel for)
731:       for (PetscInt tid = 0; tid < numthreads; tid++) {
732:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
733:         if (glb_v_id < num_vertices) {
734:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
735:             PetscErrorCode ierr_t;
736:             DM             dm   = grid_dm[grid];
737:             DM             sw   = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
738:             Vec            subX = globXArray[LAND_PACK_IDX(v_id, grid)], work = t_fhat[grid][tid];
739:             ierr_t = PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
740:             ierr_t = particlesToGrid(dm, sw, tid, dim, wp_t[grid][tid], subX, globMpArray[LAND_PACK_IDX(v_id, grid)]);
741:             if (ierr_t) ierr = ierr_t;
742:             // u = M^_1 f_w
743:             ierr_t = VecCopy(subX, work);
744:             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
745:             if (ierr_t) ierr = ierr_t;
746:           }
747:         }
748:       }
749:       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
750:       /* Cleanup */
751:       for (PetscInt tid = 0; tid < numthreads; tid++) {
752:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
753:         if (glb_v_id < num_vertices) {
754:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
755:             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
756:           }
757:         } // active
758:       } // threads
759:     } // (fake) particle loop
760:     // standard view of initial conditions
761:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
762:       PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target], 0, 0.0));
763:       PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
764:       if (ctx->num_grids > g_target + 1) {
765:         PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target + 1], 0, 0.0));
766:         PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target + 1)], NULL, "-ex30_vec_view2"));
767:       }
768:       PetscCall(MatViewFromOptions(globMpArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_mass_mat_view"));
769:       PetscCall(DMViewFromOptions(globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_sw_view"));
770:       PetscCall(DMSwarmViewXDMF(globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], "initial_swarm.xmf")); // writes a file by default!!!
771:     }
772:     // coarse graining moments_1a, bring f back from grid before advance
773:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz && printCtx->print_entropy) {
774:       const PetscInt v_id = v_target % ctx->batch_sz;
775:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
776:         PetscDataType dtype;
777:         PetscReal    *wp, *coords;
778:         DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
779:         Vec           work, subX = globXArray[LAND_PACK_IDX(v_id, grid)];
780:         PetscInt      bs, NN;
781:         // C-G moments
782:         PetscCall(VecDuplicate(subX, &work));
783:         PetscCall(gridToParticles(grid_dm[grid], sw, subX, work, globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]));
784:         PetscCall(VecDestroy(&work));
785:         // moments
786:         PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
787:         PetscCall(DMSwarmGetLocalSize(sw, &NN));
788:         PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
789:         for (PetscInt pp = 0; pp < NN; pp++) {
790:           PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
791:           for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
792:           moments_1a[0] += w;
793:           moments_1a[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
794:           moments_1a[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
795:         }
796:         PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
797:         PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
798:       }
799:       // entropy
800:       const PetscReal N_inv = 1 / moments_1a[0];
801:       PetscCall(PetscInfo(pack, "Entropy batch %" PetscInt_FMT " of %" PetscInt_FMT ", n = %g\n", v_target, num_vertices, (double)(1 / N_inv)));
802:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
803:         PetscDataType dtype;
804:         PetscReal    *wp, *coords;
805:         DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
806:         PetscInt      bs, NN;
807:         PetscCall(DMSwarmGetLocalSize(sw, &NN));
808:         PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
809:         PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
810:         for (PetscInt pp = 0; pp < NN; pp++) {
811:           PetscReal fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
812:           if (w > PETSC_REAL_MIN) {
813:             moments_1a[3] -= ww * PetscLogReal(ww);
814:             PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
815:           } else moments_1a[4] -= w;
816:         }
817:         PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
818:         PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
819:       }
820:     }
821:     // restore vector
822:     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
823:     // view initial grid
824:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) PetscCall(DMPlexLandauPrintNorms(X, 0));
825:     // advance
826:     PetscCall(TSSetSolution(ts, X));
827:     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT "\n", global_vertex_id_0, global_vertex_id_0 + ctx->batch_sz));
828:     PetscCall(TSSetPostStep(ts, PostStep));
829:     PetscCall(PostStep(ts));
830:     PetscCall(TSSolve(ts, X));
831:     // view
832:     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
833:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
834:       /* Visualize original particle field */
835:       DM  sw = globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)];
836:       Vec f;
837:       PetscCall(DMSetOutputSequenceNumber(sw, 0, 0.0));
838:       PetscCall(DMViewFromOptions(grid_dm[g_target], NULL, "-weights_dm_view"));
839:       PetscCall(DMViewFromOptions(sw, NULL, "-weights_sw_view"));
840:       PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &f));
841:       PetscCall(PetscObjectSetName((PetscObject)f, "weights"));
842:       PetscCall(VecViewFromOptions(f, NULL, "-weights_vec_view"));
843:       PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &f));
844:       //
845:       PetscCall(DMPlexLandauPrintNorms(X, 1));
846:     }
847:     if (!use_uniform_particle_grid) { // resample to uniform grid
848:       for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
849:         PetscReal *xx_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
850:         PetscInt   Np_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
851:         for (PetscInt tid = 0; tid < numthreads; tid++) {
852:           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
853:           if (glb_v_id < num_vertices) {
854:             // create uniform grid w/o weights & smaller
855:             PetscInt Npp0 = (a_Np + (glb_v_id % (a_Np / 10 + 1))) / 2, Nv; // 1/2 of uniform particle grid size
856:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
857:               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++)
858:               PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3];
859:               PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1, NN;
860:               // delete old particles and particle mass matrix
861:               PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
862:               PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(v_id, grid)]));
863:               // create fake particles in batches with threads
864:               PetscCall(MatGetLocalSize(g_Mass[grid], &Nv, NULL));
865:               if (dim == 2) lo[0] = 0;
866:               else Npi = Npj = Npk = Npp0;
867:               NN = Npi * Npj * Npk + (dim == 2 ? 3 : 6); // make a regular grid of particles Npp x Npp
868:               while (Npi * Npj * Npk < Nv) {             // make stable - no LS
869:                 Npi++;
870:                 Npj++;
871:                 Npk++;
872:                 NN = Npi * Npj * Npk + (dim == 2 ? 3 : 6);
873:               }
874:               Np_t[grid][tid] = NN;
875:               PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
876:               hp[0] = (hi[0] - lo[0]) / Npi;
877:               hp[1] = (hi[1] - lo[1]) / Npj;
878:               hp[2] = (hi[2] - lo[2]) / Npk;
879:               if (dim == 2) hp[2] = 1;
880:               PetscCall(PetscInfo(pack, "Resampling %d particles, %d vertices\n", (int)NN, (int)Nv)); // temp
881:               for (PetscInt pj = 0, pp = 0; pj < Npj; pj++) {
882:                 for (PetscInt pk = 0; pk < Npk; pk++) {
883:                   for (PetscInt pi = 0; pi < Npi; pi++, pp++) {
884:                     wp_t[grid][tid][pp] = 0;
885:                     xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
886:                     yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
887:                     if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
888:                   }
889:                 }
890:               }
891:               if (dim == 2) { // fix bounding box
892:                 PetscInt pp           = NN - 3;
893:                 wp_t[grid][tid][pp]   = 0;
894:                 xx_t[grid][tid][pp]   = 1.e-7;
895:                 yy_t[grid][tid][pp++] = hi[1] - 5.e-7;
896:                 wp_t[grid][tid][pp]   = 0;
897:                 xx_t[grid][tid][pp]   = hi[0] - 5.e-7;
898:                 yy_t[grid][tid][pp++] = 0;
899:                 wp_t[grid][tid][pp]   = 0;
900:                 xx_t[grid][tid][pp]   = 1.e-7;
901:                 yy_t[grid][tid][pp++] = lo[1] + 5.e-7;
902:               } else {
903:                 const PetscInt p0 = NN - 6;
904:                 for (PetscInt pj = 0; pj < 6; pj++) xx_t[grid][tid][p0 + pj] = yy_t[grid][tid][p0 + pj] = zz_t[grid][tid][p0 + pj] = wp_t[grid][tid][p0 + pj] = 0;
905:                 xx_t[grid][tid][p0 + 0] = lo[0];
906:                 xx_t[grid][tid][p0 + 1] = hi[0];
907:                 yy_t[grid][tid][p0 + 2] = lo[1];
908:                 yy_t[grid][tid][p0 + 3] = hi[1];
909:                 zz_t[grid][tid][p0 + 4] = lo[2];
910:                 zz_t[grid][tid][p0 + 5] = hi[2];
911:               }
912:             }
913:           } // active
914:         } // threads
915:         /* Create particle swarm */
916:         for (PetscInt tid = 0; tid < numthreads; tid++) {
917:           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
918:           if (glb_v_id < num_vertices) {                             // the ragged edge of the last batch
919:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
920:               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
921:               PetscErrorCode ierr_t;
922:               PetscSection   section;
923:               PetscInt       Nf;
924:               DM             dm = grid_dm[grid];
925:               ierr_t            = DMGetLocalSection(dm, &section);
926:               ierr_t            = PetscSectionGetNumFields(section, &Nf);
927:               if (Nf != 1) ierr_t = (PetscErrorCode)9999;
928:               else {
929:                 ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
930:                 ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local block index %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
931:                 ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(v_id, grid)]);
932:               }
933:               if (ierr_t) ierr = ierr_t;
934:             }
935:           } // active
936:         } // threads
937:         PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
938:         PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
939:         // make globMpArray
940:         PetscPragmaOMP(parallel for)
941:         for (PetscInt tid = 0; tid < numthreads; tid++) {
942:           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
943:           if (glb_v_id < num_vertices) {
944:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
945:               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
946:               PetscErrorCode ierr_t;
947:               DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
948:               ierr_t            = PetscInfo(pack, "makeSwarm %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
949:               ierr_t            = makeSwarm(sw, dim, Np_t[grid][tid], xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid]);
950:               if (ierr_t) ierr = ierr_t;
951:             }
952:           } // active
953:         } // threads
954:         // create particle mass matrices
955:         //PetscPragmaOMP(parallel for)
956:         for (PetscInt tid = 0; tid < numthreads; tid++) {
957:           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
958:           if (glb_v_id < num_vertices) {
959:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
960:               PetscErrorCode ierr_t;
961:               DM             dm = grid_dm[grid];
962:               DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
963:               ierr_t            = PetscInfo(pack, "createMp %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
964:               ierr_t            = createMp(dm, sw, &globMpArray[LAND_PACK_IDX(v_id, grid)]);
965:               if (ierr_t) ierr = ierr_t;
966:             }
967:           } // active
968:         } // threads
969:         PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
970:         /* Cleanup */
971:         for (PetscInt tid = 0; tid < numthreads; tid++) {
972:           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
973:           if (glb_v_id < num_vertices) {
974:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
975:               PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
976:             }
977:           } // active
978:         } // threads
979:       } // batch
980:       // view
981:       if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
982:         /* Visualize particle field */
983:         DM  sw = globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)];
984:         Vec f;
985:         PetscCall(DMSetOutputSequenceNumber(sw, 0, 0.0));
986:         PetscCall(DMViewFromOptions(sw, NULL, "-resampled_weights_sw_view"));
987:         PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &f));
988:         PetscCall(PetscObjectSetName((PetscObject)f, "resampled_weights"));
989:         PetscCall(VecViewFromOptions(f, NULL, "-resampled_weights_vec_view"));
990:         PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &f));
991:         PetscCall(DMSwarmViewXDMF(sw, "resampled.xmf"));
992:       }
993:     } // !uniform
994:     // particles to grid, compute moments and entropy, for target vertex only
995:     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz && printCtx->print_entropy) {
996:       PetscReal energy_error_rel;
997:       PetscCall(gridToParticles_private(grid_dm, globSwarmArray, dim, v_target, numthreads, num_vertices, global_vertex_id_0, globMpArray, g_Mass, t_fhat, moments_1b, globXArray, ctx));
998:       energy_error_rel = PetscAbsReal(moments_1b[2] - moments_0[2]) / moments_0[2];
999:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Particle Moments:\t number density      momentum (par)     energy             entropy            negative weights  : # OMP threads %g\n", (double)numthreads));
1000:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tInitial:         %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_0[0], (double)moments_0[1], (double)moments_0[2], (double)moments_0[3], 100 * (double)(moments_0[4] / moments_0[0])));
1001:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tCoarse-graining: %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_1a[0], (double)moments_1a[1], (double)moments_1a[2], (double)moments_1a[3], 100 * (double)(moments_1a[4] / moments_0[0])));
1002:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tLandau:          %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_1b[0], (double)moments_1b[1], (double)moments_1b[2], (double)moments_1b[3], 100 * (double)(moments_1b[4] / moments_0[0])));
1003:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Coarse-graining entropy generation = %e ; Landau entropy generation = %e\n", (double)(moments_1a[3] - moments_0[3]), (double)(moments_1b[3] - moments_0[3])));
1004:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "(relative) energy conservation: Coarse-graining = %e, Landau = %e (%g %d)\n", (double)(moments_1a[2] - moments_0[2]) / (double)moments_0[2], (double)energy_error_rel, (double)PetscLog10Real(energy_error_rel), (int)(PetscLog10Real(energy_error_rel) + .5)));
1005:     }
1006:     // restore vector
1007:     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
1008:     // cleanup
1009:     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
1010:       for (PetscInt tid = 0; tid < numthreads; tid++) {
1011:         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
1012:         if (glb_v_id < num_vertices) {
1013:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1014:             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
1015:             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(v_id, grid)]));
1016:           }
1017:         }
1018:       }
1019:     }
1020:   } // user batch, not used
1021:   /* Cleanup */
1022:   PetscCall(PetscFree(globXArray));
1023:   PetscCall(PetscFree(globSwarmArray));
1024:   PetscCall(PetscFree(globMpArray));
1025:   PetscCall(PetscFree(printCtx));
1026:   // clean up mass matrices
1027:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
1028:     PetscCall(MatDestroy(&g_Mass[grid]));
1029:     for (PetscInt tid = 0; tid < numthreads; tid++) {
1030:       PetscCall(VecDestroy(&t_fhat[grid][tid]));
1031:       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
1032:     }
1033:   }
1034:   PetscFunctionReturn(PETSC_SUCCESS);
1035: }

1037: int main(int argc, char **argv)
1038: {
1039:   DM         pack;
1040:   Vec        X;
1041:   PetscInt   dim = 2, num_vertices = 1, Np = 10, v_target = 0, g_target = 0;
1042:   TS         ts;
1043:   Mat        J;
1044:   LandauCtx *ctx;
1045:   PetscReal  shift                     = 0;
1046:   PetscBool  use_uniform_particle_grid = PETSC_TRUE;

1048:   PetscFunctionBeginUser;
1049:   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
1050:   // process args
1051:   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
1052:   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
1053:   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", num_vertices, &num_vertices, NULL));
1054:   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
1055:   PetscCall(PetscOptionsBool("-use_uniform_particle_grid", "Use uniform particle grid", "ex30.c", use_uniform_particle_grid, &use_uniform_particle_grid, NULL));
1056:   PetscCall(PetscOptionsInt("-vertex_view_target", "Global vertex for diagnostics", "ex30.c", v_target, &v_target, NULL));
1057:   PetscCall(PetscOptionsReal("-e_shift", "Bi-Maxwellian shift", "ex30.c", shift, &shift, NULL));
1058:   PetscCheck(v_target < num_vertices, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, v_target, num_vertices);
1059:   PetscCall(PetscOptionsInt("-grid_view_target", "Grid to view with diagnostics", "ex30.c", g_target, &g_target, NULL));
1060:   PetscOptionsEnd();
1061:   /* Create a mesh */
1062:   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
1063:   PetscCall(DMGetApplicationContext(pack, &ctx));
1064:   PetscCall(DMSetUp(pack));
1065:   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
1066:   PetscCheck(g_target < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, g_target, ctx->num_grids);
1067:   PetscCheck(ctx->batch_view_idx == v_target % ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Global view index %" PetscInt_FMT " mode batch size %" PetscInt_FMT " != ctx->batch_view_idx %" PetscInt_FMT, v_target, ctx->batch_sz, ctx->batch_view_idx);
1068:   // PetscCheck(!use_uniform_particle_grid || !ctx->sphere, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Can not use -use_uniform_particle_grid and -dm_landau_sphere");
1069:   /* Create timestepping solver context */
1070:   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
1071:   PetscCall(TSSetDM(ts, pack));
1072:   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
1073:   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
1074:   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
1075:   PetscCall(TSSetFromOptions(ts));
1076:   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
1077:   // do particle advance
1078:   PetscCall(go(ts, X, num_vertices, Np, dim, v_target, g_target, shift, use_uniform_particle_grid));
1079:   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
1080:   /* clean up */
1081:   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
1082:   PetscCall(TSDestroy(&ts));
1083:   PetscCall(VecDestroy(&X));
1084:   PetscCall(PetscFinalize());
1085:   return 0;
1086: }

1088: /*TEST

1090:   build:
1091:     requires: !complex

1093:   testset:
1094:     requires: double defined(PETSC_USE_DMLANDAU_2D)
1095:     output_file: output/ex30_0.out
1096:     args: -dim 2 -petscspace_degree 3 -dm_landau_num_species_grid 1,1,1 -dm_refine 1 -number_particles_per_dimension 20 \
1097:           -dm_landau_batch_size 4 -number_spatial_vertices 6 -vertex_view_target 5 -grid_view_target 1 -dm_landau_batch_view_idx 1 \
1098:           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
1099:           -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu -ftop_ksp_error_if_not_converged \
1100:           -ksp_type gmres -ksp_error_if_not_converged -dm_landau_verbose 4 -print_entropy \
1101:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12 -ptof_ksp_error_if_not_converged\
1102:           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12 \
1103:           -ts_time_step 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler
1104:     test:
1105:       suffix: cpu
1106:       args: -dm_landau_device_type cpu -pc_type jacobi
1107:     test:
1108:       suffix: kokkos
1109:       # failed on Sunspot@ALCF with sycl
1110:       requires: kokkos_kernels !openmp !sycl
1111:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi

1113:   testset:
1114:     requires: double !defined(PETSC_USE_DMLANDAU_2D)
1115:     output_file: output/ex30_3d.out
1116:     args: -dim 3 -petscspace_degree 2 -dm_landau_num_species_grid 1,1 -dm_refine 0 -number_particles_per_dimension 10 -dm_plex_hash_location \
1117:           -dm_landau_batch_size 1 -number_spatial_vertices 1 -vertex_view_target 0 -grid_view_target 0 -dm_landau_batch_view_idx 0 \
1118:           -dm_landau_n 1.000018,1 -dm_landau_thermal_temps 2,1 -dm_landau_ion_masses 2 -dm_landau_ion_charges 1 \
1119:           -ftop_ksp_type cg -ftop_pc_type jacobi -ftop_ksp_rtol 1e-12 -ftop_ksp_error_if_not_converged -ksp_type preonly -pc_type lu -ksp_error_if_not_converged \
1120:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12 -ptof_ksp_error_if_not_converged \
1121:           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12 \
1122:           -ts_time_step 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -print_entropy
1123:     test:
1124:       suffix: cpu_3d
1125:       args: -dm_landau_device_type cpu
1126:     test:
1127:       suffix: kokkos_3d
1128:       requires: kokkos_kernels !openmp
1129:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi

1131:   test:
1132:     suffix: conserve
1133:     requires: !complex double defined(PETSC_USE_DMLANDAU_2D) !cuda
1134:     args: -dm_landau_batch_size 4 -dm_refine 0 -dm_landau_num_species_grid 1 -dm_landau_thermal_temps 1 -petscspace_degree 3 -snes_converged_reason -ts_type beuler -ts_time_step .1 \
1135:           -ts_max_steps 1 -ksp_type preonly -ksp_error_if_not_converged -snes_rtol 1e-14 -snes_stol 1e-14 -dm_landau_device_type cpu -number_particles_per_dimension 20 \
1136:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-14 -ptof_ksp_error_if_not_converged -pc_type lu -dm_landau_simplex 1 -use_uniform_particle_grid false -dm_landau_sphere -print_entropy -number_particles_per_dimension 50 -ftop_ksp_type cg -ftop_pc_type jacobi -ftop_ksp_rtol 1e-14

1138: TEST*/