Actual source code: ntr.c

  1: #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h>

  3: #include <petscksp.h>

  5: #define NTR_INIT_CONSTANT      0
  6: #define NTR_INIT_DIRECTION     1
  7: #define NTR_INIT_INTERPOLATION 2
  8: #define NTR_INIT_TYPES         3

 10: #define NTR_UPDATE_REDUCTION     0
 11: #define NTR_UPDATE_INTERPOLATION 1
 12: #define NTR_UPDATE_TYPES         2

 14: static const char *NTR_INIT[64] = {"constant", "direction", "interpolation"};

 16: static const char *NTR_UPDATE[64] = {"reduction", "interpolation"};

 18: /*
 19:    TaoSolve_NTR - Implements Newton's Method with a trust region approach
 20:    for solving unconstrained minimization problems.

 22:    The basic algorithm is taken from MINPACK-2 (dstrn).

 24:    TaoSolve_NTR computes a local minimizer of a twice differentiable function
 25:    f by applying a trust region variant of Newton's method.  At each stage
 26:    of the algorithm, we use the prconditioned conjugate gradient method to
 27:    determine an approximate minimizer of the quadratic equation

 29:         q(s) = <s, Hs + g>

 31:    subject to the trust region constraint

 33:         || s ||_M <= radius,

 35:    where radius is the trust region radius and M is a symmetric positive
 36:    definite matrix (the preconditioner).  Here g is the gradient and H
 37:    is the Hessian matrix.

 39:    Note:  TaoSolve_NTR MUST use the iterative solver KSPNASH, KSPSTCG,
 40:           or KSPGLTR.  Thus, we set KSPNASH, KSPSTCG, or KSPGLTR in this
 41:           routine regardless of what the user may have previously specified.
 42: */
 43: static PetscErrorCode TaoSolve_NTR(Tao tao)
 44: {
 45:   TAO_NTR           *tr = (TAO_NTR *)tao->data;
 46:   KSPType            ksp_type;
 47:   PetscBool          is_nash, is_stcg, is_gltr, is_bfgs, is_jacobi, is_symmetric, sym_set;
 48:   KSPConvergedReason ksp_reason;
 49:   PC                 pc;
 50:   PetscReal          fmin, ftrial, prered, actred, kappa, sigma, beta;
 51:   PetscReal          tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 52:   PetscReal          f, gnorm;

 54:   PetscReal norm_d;
 55:   PetscInt  needH;

 57:   PetscInt i_max = 5;
 58:   PetscInt j_max = 1;
 59:   PetscInt i, j, N, n, its;

 61:   PetscFunctionBegin;
 62:   if (tao->XL || tao->XU || tao->ops->computebounds) PetscCall(PetscInfo(tao, "WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n"));

 64:   PetscCall(KSPGetType(tao->ksp, &ksp_type));
 65:   PetscCall(PetscStrcmp(ksp_type, KSPNASH, &is_nash));
 66:   PetscCall(PetscStrcmp(ksp_type, KSPSTCG, &is_stcg));
 67:   PetscCall(PetscStrcmp(ksp_type, KSPGLTR, &is_gltr));
 68:   PetscCheck(is_nash || is_stcg || is_gltr, PetscObjectComm((PetscObject)tao), PETSC_ERR_SUP, "TAO_NTR requires nash, stcg, or gltr for the KSP");

 70:   /* Initialize the radius and modify if it is too large or small */
 71:   tao->trust = tao->trust0;
 72:   tao->trust = PetscMax(tao->trust, tr->min_radius);
 73:   tao->trust = PetscMin(tao->trust, tr->max_radius);

 75:   /* Allocate the vectors needed for the BFGS approximation */
 76:   PetscCall(KSPGetPC(tao->ksp, &pc));
 77:   PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs));
 78:   PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi));
 79:   if (is_bfgs) {
 80:     tr->bfgs_pre = pc;
 81:     PetscCall(PCLMVMGetMatLMVM(tr->bfgs_pre, &tr->M));
 82:     PetscCall(VecGetLocalSize(tao->solution, &n));
 83:     PetscCall(VecGetSize(tao->solution, &N));
 84:     PetscCall(MatSetSizes(tr->M, n, n, N, N));
 85:     PetscCall(MatLMVMAllocate(tr->M, tao->solution, tao->gradient));
 86:     PetscCall(MatIsSymmetricKnown(tr->M, &sym_set, &is_symmetric));
 87:     PetscCheck(sym_set && is_symmetric, PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
 88:   } else if (is_jacobi) PetscCall(PCJacobiSetUseAbs(pc, PETSC_TRUE));

 90:   /* Check convergence criteria */
 91:   PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient));
 92:   PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
 93:   PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
 94:   needH = 1;

 96:   tao->reason = TAO_CONTINUE_ITERATING;
 97:   PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
 98:   PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0));
 99:   PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
100:   if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);

102:   /*  Initialize trust-region radius */
103:   switch (tr->init_type) {
104:   case NTR_INIT_CONSTANT:
105:     /*  Use the initial radius specified */
106:     break;

108:   case NTR_INIT_INTERPOLATION:
109:     /*  Use the initial radius specified */
110:     max_radius = 0.0;

112:     for (j = 0; j < j_max; ++j) {
113:       fmin  = f;
114:       sigma = 0.0;

116:       if (needH) {
117:         PetscCall(TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre));
118:         needH = 0;
119:       }

121:       for (i = 0; i < i_max; ++i) {
122:         PetscCall(VecCopy(tao->solution, tr->W));
123:         PetscCall(VecAXPY(tr->W, -tao->trust / gnorm, tao->gradient));
124:         PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));

126:         if (PetscIsInfOrNanReal(ftrial)) {
127:           tau = tr->gamma1_i;
128:         } else {
129:           if (ftrial < fmin) {
130:             fmin  = ftrial;
131:             sigma = -tao->trust / gnorm;
132:           }

134:           PetscCall(MatMult(tao->hessian, tao->gradient, tao->stepdirection));
135:           PetscCall(VecDot(tao->gradient, tao->stepdirection, &prered));

137:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
138:           actred = f - ftrial;
139:           if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
140:             kappa = 1.0;
141:           } else {
142:             kappa = actred / prered;
143:           }

145:           tau_1   = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
146:           tau_2   = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
147:           tau_min = PetscMin(tau_1, tau_2);
148:           tau_max = PetscMax(tau_1, tau_2);

150:           if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu1_i) {
151:             /*  Great agreement */
152:             max_radius = PetscMax(max_radius, tao->trust);

154:             if (tau_max < 1.0) {
155:               tau = tr->gamma3_i;
156:             } else if (tau_max > tr->gamma4_i) {
157:               tau = tr->gamma4_i;
158:             } else {
159:               tau = tau_max;
160:             }
161:           } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu2_i) {
162:             /*  Good agreement */
163:             max_radius = PetscMax(max_radius, tao->trust);

165:             if (tau_max < tr->gamma2_i) {
166:               tau = tr->gamma2_i;
167:             } else if (tau_max > tr->gamma3_i) {
168:               tau = tr->gamma3_i;
169:             } else {
170:               tau = tau_max;
171:             }
172:           } else {
173:             /*  Not good agreement */
174:             if (tau_min > 1.0) {
175:               tau = tr->gamma2_i;
176:             } else if (tau_max < tr->gamma1_i) {
177:               tau = tr->gamma1_i;
178:             } else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
179:               tau = tr->gamma1_i;
180:             } else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
181:               tau = tau_1;
182:             } else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
183:               tau = tau_2;
184:             } else {
185:               tau = tau_max;
186:             }
187:           }
188:         }
189:         tao->trust = tau * tao->trust;
190:       }

192:       if (fmin < f) {
193:         f = fmin;
194:         PetscCall(VecAXPY(tao->solution, sigma, tao->gradient));
195:         PetscCall(TaoComputeGradient(tao, tao->solution, tao->gradient));

197:         PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
198:         PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
199:         needH = 1;

201:         PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
202:         PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0));
203:         PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
204:         if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
205:       }
206:     }
207:     tao->trust = PetscMax(tao->trust, max_radius);

209:     /*  Modify the radius if it is too large or small */
210:     tao->trust = PetscMax(tao->trust, tr->min_radius);
211:     tao->trust = PetscMin(tao->trust, tr->max_radius);
212:     break;

214:   default:
215:     /*  Norm of the first direction will initialize radius */
216:     tao->trust = 0.0;
217:     break;
218:   }

220:   /* Have not converged; continue with Newton method */
221:   while (tao->reason == TAO_CONTINUE_ITERATING) {
222:     /* Call general purpose update function */
223:     if (tao->ops->update) {
224:       PetscUseTypeMethod(tao, update, tao->niter, tao->user_update);
225:       PetscCall(TaoComputeObjective(tao, tao->solution, &f));
226:     }
227:     ++tao->niter;
228:     tao->ksp_its = 0;
229:     /* Compute the Hessian */
230:     if (needH) {
231:       PetscCall(TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre));
232:       needH = 0;
233:     }

235:     if (tr->bfgs_pre) {
236:       /* Update the limited memory preconditioner */
237:       PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient));
238:     }

240:     while (tao->reason == TAO_CONTINUE_ITERATING) {
241:       PetscCall(KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre));

243:       /* Solve the trust region subproblem */
244:       PetscCall(KSPCGSetRadius(tao->ksp, tao->trust));
245:       PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection));
246:       PetscCall(KSPGetIterationNumber(tao->ksp, &its));
247:       tao->ksp_its += its;
248:       tao->ksp_tot_its += its;
249:       PetscCall(KSPCGGetNormD(tao->ksp, &norm_d));

251:       if (0.0 == tao->trust) {
252:         /* Radius was uninitialized; use the norm of the direction */
253:         if (norm_d > 0.0) {
254:           tao->trust = norm_d;

256:           /* Modify the radius if it is too large or small */
257:           tao->trust = PetscMax(tao->trust, tr->min_radius);
258:           tao->trust = PetscMin(tao->trust, tr->max_radius);
259:         } else {
260:           /* The direction was bad; set radius to default value and re-solve
261:              the trust-region subproblem to get a direction */
262:           tao->trust = tao->trust0;

264:           /* Modify the radius if it is too large or small */
265:           tao->trust = PetscMax(tao->trust, tr->min_radius);
266:           tao->trust = PetscMin(tao->trust, tr->max_radius);

268:           PetscCall(KSPCGSetRadius(tao->ksp, tao->trust));
269:           PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection));
270:           PetscCall(KSPGetIterationNumber(tao->ksp, &its));
271:           tao->ksp_its += its;
272:           tao->ksp_tot_its += its;
273:           PetscCall(KSPCGGetNormD(tao->ksp, &norm_d));

275:           PetscCheck(norm_d != 0.0, PetscObjectComm((PetscObject)tao), PETSC_ERR_PLIB, "Initial direction zero");
276:         }
277:       }
278:       PetscCall(VecScale(tao->stepdirection, -1.0));
279:       PetscCall(KSPGetConvergedReason(tao->ksp, &ksp_reason));
280:       if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tr->bfgs_pre)) {
281:         /* Preconditioner is numerically indefinite; reset the
282:            approximate if using BFGS preconditioning. */
283:         PetscCall(MatLMVMReset(tr->M, PETSC_FALSE));
284:         PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient));
285:       }

287:       if (NTR_UPDATE_REDUCTION == tr->update_type) {
288:         /* Get predicted reduction */
289:         PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
290:         if (prered >= 0.0) {
291:           /* The predicted reduction has the wrong sign.  This cannot
292:              happen in infinite precision arithmetic.  Step should
293:              be rejected! */
294:           tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
295:         } else {
296:           /* Compute trial step and function value */
297:           PetscCall(VecCopy(tao->solution, tr->W));
298:           PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection));
299:           PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));

301:           if (PetscIsInfOrNanReal(ftrial)) {
302:             tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
303:           } else {
304:             /* Compute and actual reduction */
305:             actred = f - ftrial;
306:             prered = -prered;
307:             if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
308:               kappa = 1.0;
309:             } else {
310:               kappa = actred / prered;
311:             }

313:             /* Accept or reject the step and update radius */
314:             if (kappa < tr->eta1) {
315:               /* Reject the step */
316:               tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
317:             } else {
318:               /* Accept the step */
319:               if (kappa < tr->eta2) {
320:                 /* Marginal bad step */
321:                 tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
322:               } else if (kappa < tr->eta3) {
323:                 /* Reasonable step */
324:                 tao->trust = tr->alpha3 * tao->trust;
325:               } else if (kappa < tr->eta4) {
326:                 /* Good step */
327:                 tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
328:               } else {
329:                 /* Very good step */
330:                 tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
331:               }
332:               break;
333:             }
334:           }
335:         }
336:       } else {
337:         /* Get predicted reduction */
338:         PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
339:         if (prered >= 0.0) {
340:           /* The predicted reduction has the wrong sign.  This cannot
341:              happen in infinite precision arithmetic.  Step should
342:              be rejected! */
343:           tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
344:         } else {
345:           PetscCall(VecCopy(tao->solution, tr->W));
346:           PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection));
347:           PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));
348:           if (PetscIsInfOrNanReal(ftrial)) {
349:             tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
350:           } else {
351:             PetscCall(VecDot(tao->gradient, tao->stepdirection, &beta));
352:             actred = f - ftrial;
353:             prered = -prered;
354:             if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
355:               kappa = 1.0;
356:             } else {
357:               kappa = actred / prered;
358:             }

360:             tau_1   = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
361:             tau_2   = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
362:             tau_min = PetscMin(tau_1, tau_2);
363:             tau_max = PetscMax(tau_1, tau_2);

365:             if (kappa >= 1.0 - tr->mu1) {
366:               /* Great agreement; accept step and update radius */
367:               if (tau_max < 1.0) {
368:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
369:               } else if (tau_max > tr->gamma4) {
370:                 tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
371:               } else {
372:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
373:               }
374:               break;
375:             } else if (kappa >= 1.0 - tr->mu2) {
376:               /* Good agreement */

378:               if (tau_max < tr->gamma2) {
379:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
380:               } else if (tau_max > tr->gamma3) {
381:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
382:               } else if (tau_max < 1.0) {
383:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
384:               } else {
385:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
386:               }
387:               break;
388:             } else {
389:               /* Not good agreement */
390:               if (tau_min > 1.0) {
391:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
392:               } else if (tau_max < tr->gamma1) {
393:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
394:               } else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
395:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
396:               } else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) && ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
397:                 tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
398:               } else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) && ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
399:                 tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
400:               } else {
401:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
402:               }
403:             }
404:           }
405:         }
406:       }

408:       /* The step computed was not good and the radius was decreased.
409:          Monitor the radius to terminate. */
410:       PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
411:       PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust));
412:       PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
413:     }

415:     /* The radius may have been increased; modify if it is too large */
416:     tao->trust = PetscMin(tao->trust, tr->max_radius);

418:     if (tao->reason == TAO_CONTINUE_ITERATING) {
419:       PetscCall(VecCopy(tr->W, tao->solution));
420:       f = ftrial;
421:       PetscCall(TaoComputeGradient(tao, tao->solution, tao->gradient));
422:       PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
423:       PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
424:       needH = 1;
425:       PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
426:       PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust));
427:       PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
428:     }
429:   }
430:   PetscFunctionReturn(PETSC_SUCCESS);
431: }

433: /*------------------------------------------------------------*/
434: static PetscErrorCode TaoSetUp_NTR(Tao tao)
435: {
436:   TAO_NTR *tr = (TAO_NTR *)tao->data;

438:   PetscFunctionBegin;
439:   if (!tao->gradient) PetscCall(VecDuplicate(tao->solution, &tao->gradient));
440:   if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution, &tao->stepdirection));
441:   if (!tr->W) PetscCall(VecDuplicate(tao->solution, &tr->W));

443:   tr->bfgs_pre = NULL;
444:   tr->M        = NULL;
445:   PetscFunctionReturn(PETSC_SUCCESS);
446: }

448: /*------------------------------------------------------------*/
449: static PetscErrorCode TaoDestroy_NTR(Tao tao)
450: {
451:   TAO_NTR *tr = (TAO_NTR *)tao->data;

453:   PetscFunctionBegin;
454:   if (tao->setupcalled) PetscCall(VecDestroy(&tr->W));
455:   PetscCall(KSPDestroy(&tao->ksp));
456:   PetscCall(PetscFree(tao->data));
457:   PetscFunctionReturn(PETSC_SUCCESS);
458: }

460: /*------------------------------------------------------------*/
461: static PetscErrorCode TaoSetFromOptions_NTR(Tao tao, PetscOptionItems *PetscOptionsObject)
462: {
463:   TAO_NTR *tr = (TAO_NTR *)tao->data;

465:   PetscFunctionBegin;
466:   PetscOptionsHeadBegin(PetscOptionsObject, "Newton trust region method for unconstrained optimization");
467:   PetscCall(PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type, NULL));
468:   PetscCall(PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type, NULL));
469:   PetscCall(PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1, NULL));
470:   PetscCall(PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2, NULL));
471:   PetscCall(PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3, NULL));
472:   PetscCall(PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4, NULL));
473:   PetscCall(PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1, NULL));
474:   PetscCall(PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2, NULL));
475:   PetscCall(PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3, NULL));
476:   PetscCall(PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4, NULL));
477:   PetscCall(PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5, NULL));
478:   PetscCall(PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1, NULL));
479:   PetscCall(PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2, NULL));
480:   PetscCall(PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1, NULL));
481:   PetscCall(PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2, NULL));
482:   PetscCall(PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3, NULL));
483:   PetscCall(PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4, NULL));
484:   PetscCall(PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta, NULL));
485:   PetscCall(PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i, NULL));
486:   PetscCall(PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i, NULL));
487:   PetscCall(PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i, NULL));
488:   PetscCall(PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i, NULL));
489:   PetscCall(PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i, NULL));
490:   PetscCall(PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i, NULL));
491:   PetscCall(PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i, NULL));
492:   PetscCall(PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius, NULL));
493:   PetscCall(PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius, NULL));
494:   PetscCall(PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon, NULL));
495:   PetscOptionsHeadEnd();
496:   PetscCall(KSPSetFromOptions(tao->ksp));
497:   PetscFunctionReturn(PETSC_SUCCESS);
498: }

500: /*------------------------------------------------------------*/
501: /*MC
502:   TAONTR - Newton's method with trust region for unconstrained minimization.
503:   At each iteration, the Newton trust region method solves the system.
504:   NTR expects a KSP solver with a trust region radius.
505:             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k

507:   Options Database Keys:
508: + -tao_ntr_init_type - "constant","direction","interpolation"
509: . -tao_ntr_update_type - "reduction","interpolation"
510: . -tao_ntr_min_radius - lower bound on trust region radius
511: . -tao_ntr_max_radius - upper bound on trust region radius
512: . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction
513: . -tao_ntr_mu1_i - mu1 interpolation init factor
514: . -tao_ntr_mu2_i - mu2 interpolation init factor
515: . -tao_ntr_gamma1_i - gamma1 interpolation init factor
516: . -tao_ntr_gamma2_i - gamma2 interpolation init factor
517: . -tao_ntr_gamma3_i - gamma3 interpolation init factor
518: . -tao_ntr_gamma4_i - gamma4 interpolation init factor
519: . -tao_ntr_theta_i - theta1 interpolation init factor
520: . -tao_ntr_eta1 - eta1 reduction update factor
521: . -tao_ntr_eta2 - eta2 reduction update factor
522: . -tao_ntr_eta3 - eta3 reduction update factor
523: . -tao_ntr_eta4 - eta4 reduction update factor
524: . -tao_ntr_alpha1 - alpha1 reduction update factor
525: . -tao_ntr_alpha2 - alpha2 reduction update factor
526: . -tao_ntr_alpha3 - alpha3 reduction update factor
527: . -tao_ntr_alpha4 - alpha4 reduction update factor
528: . -tao_ntr_alpha4 - alpha4 reduction update factor
529: . -tao_ntr_mu1 - mu1 interpolation update
530: . -tao_ntr_mu2 - mu2 interpolation update
531: . -tao_ntr_gamma1 - gamma1 interpolcation update
532: . -tao_ntr_gamma2 - gamma2 interpolcation update
533: . -tao_ntr_gamma3 - gamma3 interpolcation update
534: . -tao_ntr_gamma4 - gamma4 interpolation update
535: - -tao_ntr_theta - theta interpolation update

537:   Level: beginner
538: M*/

540: PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao)
541: {
542:   TAO_NTR *tr;

544:   PetscFunctionBegin;
545:   PetscCall(PetscNew(&tr));

547:   tao->ops->setup          = TaoSetUp_NTR;
548:   tao->ops->solve          = TaoSolve_NTR;
549:   tao->ops->setfromoptions = TaoSetFromOptions_NTR;
550:   tao->ops->destroy        = TaoDestroy_NTR;

552:   /* Override default settings (unless already changed) */
553:   PetscCall(TaoParametersInitialize(tao));
554:   PetscObjectParameterSetDefault(tao, max_it, 50);
555:   PetscObjectParameterSetDefault(tao, trust0, 100.0);
556:   tao->data = (void *)tr;

558:   /*  Standard trust region update parameters */
559:   tr->eta1 = 1.0e-4;
560:   tr->eta2 = 0.25;
561:   tr->eta3 = 0.50;
562:   tr->eta4 = 0.90;

564:   tr->alpha1 = 0.25;
565:   tr->alpha2 = 0.50;
566:   tr->alpha3 = 1.00;
567:   tr->alpha4 = 2.00;
568:   tr->alpha5 = 4.00;

570:   /*  Interpolation trust region update parameters */
571:   tr->mu1 = 0.10;
572:   tr->mu2 = 0.50;

574:   tr->gamma1 = 0.25;
575:   tr->gamma2 = 0.50;
576:   tr->gamma3 = 2.00;
577:   tr->gamma4 = 4.00;

579:   tr->theta = 0.05;

581:   /*  Interpolation parameters for initialization */
582:   tr->mu1_i = 0.35;
583:   tr->mu2_i = 0.50;

585:   tr->gamma1_i = 0.0625;
586:   tr->gamma2_i = 0.50;
587:   tr->gamma3_i = 2.00;
588:   tr->gamma4_i = 5.00;

590:   tr->theta_i = 0.25;

592:   tr->min_radius = 1.0e-10;
593:   tr->max_radius = 1.0e10;
594:   tr->epsilon    = 1.0e-6;

596:   tr->init_type   = NTR_INIT_INTERPOLATION;
597:   tr->update_type = NTR_UPDATE_REDUCTION;

599:   /* Set linear solver to default for trust region */
600:   PetscCall(KSPCreate(((PetscObject)tao)->comm, &tao->ksp));
601:   PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1));
602:   PetscCall(KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix));
603:   PetscCall(KSPAppendOptionsPrefix(tao->ksp, "tao_ntr_"));
604:   PetscCall(KSPSetType(tao->ksp, KSPSTCG));
605:   PetscFunctionReturn(PETSC_SUCCESS);
606: }