Actual source code: bntr.c

  1: #include <../src/tao/bound/impls/bnk/bnk.h>
  2: #include <petscksp.h>

  4: /*
  5:  Implements Newton's Method with a trust region approach for solving
  6:  bound constrained minimization problems.

  8:  x_0 = VecMedian(x_0)
  9:  f_0, g_0= TaoComputeObjectiveAndGradient(x_0)
 10:  pg_0 = project(g_0)
 11:  check convergence at pg_0
 12:  needH = TaoBNKInitialize(default:BNK_INIT_INTERPOLATION)
 13:  niter = 0
 14:  step_accepted = false

 16:  while niter <= max_it

 18:     if needH
 19:       If max_cg_steps > 0
 20:         x_k, g_k, pg_k = TaoSolve(BNCG)
 21:       end

 23:       H_k = TaoComputeHessian(x_k)
 24:       if pc_type == BNK_PC_BFGS
 25:         add correction to BFGS approx
 26:         if scale_type == BNK_SCALE_AHESS
 27:           D = VecMedian(1e-6, abs(diag(H_k)), 1e6)
 28:           scale BFGS with VecReciprocal(D)
 29:         end
 30:       end
 31:       needH = False
 32:     end

 34:     if pc_type = BNK_PC_BFGS
 35:       B_k = BFGS
 36:     else
 37:       B_k = VecMedian(1e-6, abs(diag(H_k)), 1e6)
 38:       B_k = VecReciprocal(B_k)
 39:     end
 40:     w = x_k - VecMedian(x_k - 0.001*B_k*g_k)
 41:     eps = min(eps, norm2(w))
 42:     determine the active and inactive index sets such that
 43:       L = {i : (x_k)_i <= l_i + eps && (g_k)_i > 0}
 44:       U = {i : (x_k)_i >= u_i - eps && (g_k)_i < 0}
 45:       F = {i : l_i = (x_k)_i = u_i}
 46:       A = {L + U + F}
 47:       IA = {i : i not in A}

 49:     generate the reduced system Hr_k dr_k = -gr_k for variables in IA
 50:     if pc_type == BNK_PC_BFGS && scale_type == BNK_SCALE_PHESS
 51:       D = VecMedian(1e-6, abs(diag(Hr_k)), 1e6)
 52:       scale BFGS with VecReciprocal(D)
 53:     end

 55:     while !stepAccepted
 56:       solve Hr_k dr_k = -gr_k
 57:       set d_k to (l - x) for variables in L, (u - x) for variables in U, and 0 for variables in F

 59:       x_{k+1} = VecMedian(x_k + d_k)
 60:       s = x_{k+1} - x_k
 61:       prered = dot(s, 0.5*gr_k - Hr_k*s)
 62:       f_{k+1} = TaoComputeObjective(x_{k+1})
 63:       actred = f_k - f_{k+1}

 65:       oldTrust = trust
 66:       step_accepted, trust = TaoBNKUpdateTrustRadius(default: BNK_UPDATE_REDUCTION)
 67:       if step_accepted
 68:         g_{k+1} = TaoComputeGradient(x_{k+1})
 69:         pg_{k+1} = project(g_{k+1})
 70:         count the accepted Newton step
 71:         needH = True
 72:       else
 73:         f_{k+1} = f_k
 74:         x_{k+1} = x_k
 75:         g_{k+1} = g_k
 76:         pg_{k+1} = pg_k
 77:         if trust == oldTrust
 78:           terminate because we cannot shrink the radius any further
 79:         end
 80:       end

 82:     end
 83:     check convergence at pg_{k+1}
 84:     niter += 1

 86:  end
 87: */

 89: PetscErrorCode TaoSolve_BNTR(Tao tao)
 90: {
 91:   TAO_BNK           *bnk = (TAO_BNK *)tao->data;
 92:   KSPConvergedReason ksp_reason;

 94:   PetscReal oldTrust, prered, actred, steplen = 0.0, resnorm;
 95:   PetscBool cgTerminate, needH = PETSC_TRUE, stepAccepted, shift = PETSC_FALSE;
 96:   PetscInt  stepType, nDiff;

 98:   PetscFunctionBegin;
 99:   /* Initialize the preconditioner, KSP solver and trust radius/line search */
100:   tao->reason = TAO_CONTINUE_ITERATING;
101:   PetscCall(TaoBNKInitialize(tao, bnk->init_type, &needH));
102:   if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);

104:   /* Have not converged; continue with Newton method */
105:   while (tao->reason == TAO_CONTINUE_ITERATING) {
106:     /* Call general purpose update function */
107:     if (tao->ops->update) {
108:       PetscUseTypeMethod(tao, update, tao->niter, tao->user_update);
109:       PetscCall(TaoComputeObjective(tao, tao->solution, &bnk->f));
110:     }

112:     if (needH && bnk->inactive_idx) {
113:       /* Take BNCG steps (if enabled) to trade-off Hessian evaluations for more gradient evaluations */
114:       PetscCall(TaoBNKTakeCGSteps(tao, &cgTerminate));
115:       if (cgTerminate) {
116:         tao->reason = bnk->bncg->reason;
117:         PetscFunctionReturn(PETSC_SUCCESS);
118:       }
119:       /* Compute the hessian and update the BFGS preconditioner at the new iterate */
120:       PetscCall((*bnk->computehessian)(tao));
121:       needH = PETSC_FALSE;
122:     }

124:     /* Store current solution before it changes */
125:     bnk->fold = bnk->f;
126:     PetscCall(VecCopy(tao->solution, bnk->Xold));
127:     PetscCall(VecCopy(tao->gradient, bnk->Gold));
128:     PetscCall(VecCopy(bnk->unprojected_gradient, bnk->unprojected_gradient_old));

130:     /* Enter into trust region loops */
131:     stepAccepted = PETSC_FALSE;
132:     while (!stepAccepted && tao->reason == TAO_CONTINUE_ITERATING) {
133:       tao->ksp_its = 0;

135:       /* Use the common BNK kernel to compute the Newton step (for inactive variables only) */
136:       PetscCall((*bnk->computestep)(tao, shift, &ksp_reason, &stepType));

138:       /* Temporarily accept the step and project it into the bounds */
139:       PetscCall(VecAXPY(tao->solution, 1.0, tao->stepdirection));
140:       PetscCall(TaoBoundSolution(tao->solution, tao->XL, tao->XU, 0.0, &nDiff, tao->solution));

142:       /* Check if the projection changed the step direction */
143:       if (nDiff > 0) {
144:         /* Projection changed the step, so we have to recompute the step and
145:            the predicted reduction. Leave the trust radius unchanged. */
146:         PetscCall(VecCopy(tao->solution, tao->stepdirection));
147:         PetscCall(VecAXPY(tao->stepdirection, -1.0, bnk->Xold));
148:         PetscCall(TaoBNKRecomputePred(tao, tao->stepdirection, &prered));
149:       } else {
150:         /* Step did not change, so we can just recover the pre-computed prediction */
151:         PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
152:       }
153:       prered = -prered;

155:       /* Compute the actual reduction and update the trust radius */
156:       PetscCall(TaoComputeObjective(tao, tao->solution, &bnk->f));
157:       PetscCheck(!PetscIsInfOrNanReal(bnk->f), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated infinity or NaN");
158:       actred   = bnk->fold - bnk->f;
159:       oldTrust = tao->trust;
160:       PetscCall(TaoBNKUpdateTrustRadius(tao, prered, actred, bnk->update_type, stepType, &stepAccepted));

162:       if (stepAccepted) {
163:         /* Step is good, evaluate the gradient and flip the need-Hessian switch */
164:         steplen = 1.0;
165:         needH   = PETSC_TRUE;
166:         ++bnk->newt;
167:         PetscCall(TaoComputeGradient(tao, tao->solution, bnk->unprojected_gradient));
168:         PetscCall(TaoBNKEstimateActiveSet(tao, bnk->as_type));
169:         PetscCall(VecCopy(bnk->unprojected_gradient, tao->gradient));
170:         if (bnk->active_idx) PetscCall(VecISSet(tao->gradient, bnk->active_idx, 0.0));
171:         PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm));
172:       } else {
173:         /* Step is bad, revert old solution and re-solve with new radius*/
174:         steplen = 0.0;
175:         needH   = PETSC_FALSE;
176:         bnk->f  = bnk->fold;
177:         PetscCall(VecCopy(bnk->Xold, tao->solution));
178:         PetscCall(VecCopy(bnk->Gold, tao->gradient));
179:         PetscCall(VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient));
180:         if (oldTrust == tao->trust) {
181:           /* Can't change the radius anymore so just terminate */
182:           tao->reason = TAO_DIVERGED_TR_REDUCTION;
183:         }
184:       }
185:     }
186:     /*  Check for termination */
187:     PetscCall(VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W));
188:     PetscCall(VecNorm(bnk->W, NORM_2, &resnorm));
189:     PetscCheck(!PetscIsInfOrNanReal(resnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated infinity or NaN");
190:     ++tao->niter;
191:     PetscCall(TaoLogConvergenceHistory(tao, bnk->f, resnorm, 0.0, tao->ksp_its));
192:     PetscCall(TaoMonitor(tao, tao->niter, bnk->f, resnorm, 0.0, steplen));
193:     PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
194:   }
195:   PetscFunctionReturn(PETSC_SUCCESS);
196: }

198: static PetscErrorCode TaoSetUp_BNTR(Tao tao)
199: {
200:   KSP       ksp;
201:   PetscBool valid;

203:   PetscFunctionBegin;
204:   PetscCall(TaoSetUp_BNK(tao));
205:   PetscCall(TaoGetKSP(tao, &ksp));
206:   PetscCall(PetscObjectHasFunction((PetscObject)ksp, "KSPCGSetRadius_C", &valid));
207:   PetscCheck(valid, PetscObjectComm((PetscObject)tao), PETSC_ERR_SUP, "Not for KSP type %s. Must use a trust-region CG method for KSP (e.g. KSPNASH, KSPSTCG, KSPGLTR)", ((PetscObject)ksp)->type_name);
208:   PetscFunctionReturn(PETSC_SUCCESS);
209: }

211: static PetscErrorCode TaoSetFromOptions_BNTR(Tao tao, PetscOptionItems PetscOptionsObject)
212: {
213:   TAO_BNK *bnk = (TAO_BNK *)tao->data;

215:   PetscFunctionBegin;
216:   PetscCall(TaoSetFromOptions_BNK(tao, PetscOptionsObject));
217:   if (bnk->update_type == BNK_UPDATE_STEP) bnk->update_type = BNK_UPDATE_REDUCTION;
218:   PetscFunctionReturn(PETSC_SUCCESS);
219: }

221: /*MC
222:   TAOBNTR - Bounded Newton Trust Region for nonlinear minimization with bound constraints.

224:   Options Database Keys:
225: + -tao_bnk_max_cg_its - maximum number of bounded conjugate-gradient iterations taken in each Newton loop
226: . -tao_bnk_init_type - trust radius initialization method ("constant", "direction", "interpolation")
227: . -tao_bnk_update_type - trust radius update method ("step", "direction", "interpolation")
228: - -tao_bnk_as_type - active-set estimation method ("none", "bertsekas")

230:   Level: beginner
231: M*/
232: PETSC_EXTERN PetscErrorCode TaoCreate_BNTR(Tao tao)
233: {
234:   TAO_BNK *bnk;

236:   PetscFunctionBegin;
237:   PetscCall(TaoCreate_BNK(tao));
238:   tao->ops->solve          = TaoSolve_BNTR;
239:   tao->ops->setup          = TaoSetUp_BNTR;
240:   tao->ops->setfromoptions = TaoSetFromOptions_BNTR;

242:   bnk              = (TAO_BNK *)tao->data;
243:   bnk->update_type = BNK_UPDATE_REDUCTION; /* trust region updates based on predicted/actual reduction */
244:   PetscFunctionReturn(PETSC_SUCCESS);
245: }