Actual source code: bntl.c
1: #include <../src/tao/bound/impls/bnk/bnk.h>
2: #include <petscksp.h>
4: /*
5: Implements Newton's Method with a trust region approach for solving
6: bound constrained minimization problems.
8: In this variant, the trust region failures trigger a line search with
9: the existing Newton step instead of re-solving the step with a
10: different radius.
12: ------------------------------------------------------------
14: x_0 = VecMedian(x_0)
15: f_0, g_0 = TaoComputeObjectiveAndGradient(x_0)
16: pg_0 = project(g_0)
17: check convergence at pg_0
18: needH = TaoBNKInitialize(default:BNK_INIT_INTERPOLATION)
19: niter = 0
20: step_accepted = true
22: while niter <= max_it
23: niter += 1
25: if needH
26: If max_cg_steps > 0
27: x_k, g_k, pg_k = TaoSolve(BNCG)
28: end
30: H_k = TaoComputeHessian(x_k)
31: if pc_type == BNK_PC_BFGS
32: add correction to BFGS approx
33: if scale_type == BNK_SCALE_AHESS
34: D = VecMedian(1e-6, abs(diag(H_k)), 1e6)
35: scale BFGS with VecReciprocal(D)
36: end
37: end
38: needH = False
39: end
41: if pc_type = BNK_PC_BFGS
42: B_k = BFGS
43: else
44: B_k = VecMedian(1e-6, abs(diag(H_k)), 1e6)
45: B_k = VecReciprocal(B_k)
46: end
47: w = x_k - VecMedian(x_k - 0.001*B_k*g_k)
48: eps = min(eps, norm2(w))
49: determine the active and inactive index sets such that
50: L = {i : (x_k)_i <= l_i + eps && (g_k)_i > 0}
51: U = {i : (x_k)_i >= u_i - eps && (g_k)_i < 0}
52: F = {i : l_i = (x_k)_i = u_i}
53: A = {L + U + F}
54: IA = {i : i not in A}
56: generate the reduced system Hr_k dr_k = -gr_k for variables in IA
57: if pc_type == BNK_PC_BFGS && scale_type == BNK_SCALE_PHESS
58: D = VecMedian(1e-6, abs(diag(Hr_k)), 1e6)
59: scale BFGS with VecReciprocal(D)
60: end
61: solve Hr_k dr_k = -gr_k
62: set d_k to (l - x) for variables in L, (u - x) for variables in U, and 0 for variables in F
64: x_{k+1} = VecMedian(x_k + d_k)
65: s = x_{k+1} - x_k
66: prered = dot(s, 0.5*gr_k - Hr_k*s)
67: f_{k+1} = TaoComputeObjective(x_{k+1})
68: actred = f_k - f_{k+1}
70: oldTrust = trust
71: step_accepted, trust = TaoBNKUpdateTrustRadius(default: BNK_UPDATE_REDUCTION)
72: if step_accepted
73: g_{k+1} = TaoComputeGradient(x_{k+1})
74: pg_{k+1} = project(g_{k+1})
75: count the accepted Newton step
76: else
77: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
78: dr_k = -BFGS*gr_k for variables in I
79: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
80: reset the BFGS preconditioner
81: calculate scale delta and apply it to BFGS
82: dr_k = -BFGS*gr_k for variables in I
83: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
84: dr_k = -gr_k for variables in I
85: end
86: end
87: end
89: x_{k+1}, f_{k+1}, g_{k+1}, ls_failed = TaoBNKPerformLineSearch()
90: if ls_failed
91: f_{k+1} = f_k
92: x_{k+1} = x_k
93: g_{k+1} = g_k
94: pg_{k+1} = pg_k
95: terminate
96: else
97: pg_{k+1} = project(g_{k+1})
98: trust = oldTrust
99: trust = TaoBNKUpdateTrustRadius(BNK_UPDATE_STEP)
100: count the accepted step type (Newton, BFGS, scaled grad or grad)
101: end
102: end
104: check convergence at pg_{k+1}
105: end
106: */
108: PetscErrorCode TaoSolve_BNTL(Tao tao)
109: {
110: TAO_BNK *bnk = (TAO_BNK *)tao->data;
111: KSPConvergedReason ksp_reason;
112: TaoLineSearchConvergedReason ls_reason;
114: PetscReal oldTrust, prered, actred, steplen, resnorm;
115: PetscBool cgTerminate, needH = PETSC_TRUE, stepAccepted, shift = PETSC_FALSE;
116: PetscInt stepType, nDiff;
118: PetscFunctionBegin;
119: /* Initialize the preconditioner, KSP solver and trust radius/line search */
120: tao->reason = TAO_CONTINUE_ITERATING;
121: PetscCall(TaoBNKInitialize(tao, bnk->init_type, &needH));
122: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
124: /* Have not converged; continue with Newton method */
125: while (tao->reason == TAO_CONTINUE_ITERATING) {
126: /* Call general purpose update function */
127: if (tao->ops->update) {
128: PetscUseTypeMethod(tao, update, tao->niter, tao->user_update);
129: PetscCall(TaoComputeObjective(tao, tao->solution, &bnk->f));
130: }
132: if (needH && bnk->inactive_idx) {
133: /* Take BNCG steps (if enabled) to trade-off Hessian evaluations for more gradient evaluations */
134: PetscCall(TaoBNKTakeCGSteps(tao, &cgTerminate));
135: if (cgTerminate) {
136: tao->reason = bnk->bncg->reason;
137: PetscFunctionReturn(PETSC_SUCCESS);
138: }
139: /* Compute the hessian and update the BFGS preconditioner at the new iterate */
140: PetscCall((*bnk->computehessian)(tao));
141: needH = PETSC_FALSE;
142: }
144: /* Use the common BNK kernel to compute the Newton step (for inactive variables only) */
145: PetscCall((*bnk->computestep)(tao, shift, &ksp_reason, &stepType));
147: /* Store current solution before it changes */
148: oldTrust = tao->trust;
149: bnk->fold = bnk->f;
150: PetscCall(VecCopy(tao->solution, bnk->Xold));
151: PetscCall(VecCopy(tao->gradient, bnk->Gold));
152: PetscCall(VecCopy(bnk->unprojected_gradient, bnk->unprojected_gradient_old));
154: /* Temporarily accept the step and project it into the bounds */
155: PetscCall(VecAXPY(tao->solution, 1.0, tao->stepdirection));
156: PetscCall(TaoBoundSolution(tao->solution, tao->XL, tao->XU, 0.0, &nDiff, tao->solution));
158: /* Check if the projection changed the step direction */
159: if (nDiff > 0) {
160: /* Projection changed the step, so we have to recompute the step and
161: the predicted reduction. Leave the trust radius unchanged. */
162: PetscCall(VecCopy(tao->solution, tao->stepdirection));
163: PetscCall(VecAXPY(tao->stepdirection, -1.0, bnk->Xold));
164: PetscCall(TaoBNKRecomputePred(tao, tao->stepdirection, &prered));
165: } else {
166: /* Step did not change, so we can just recover the pre-computed prediction */
167: PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
168: }
169: prered = -prered;
171: /* Compute the actual reduction and update the trust radius */
172: PetscCall(TaoComputeObjective(tao, tao->solution, &bnk->f));
173: PetscCheck(!PetscIsInfOrNanReal(bnk->f), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
174: actred = bnk->fold - bnk->f;
175: PetscCall(TaoBNKUpdateTrustRadius(tao, prered, actred, bnk->update_type, stepType, &stepAccepted));
177: if (stepAccepted) {
178: /* Step is good, evaluate the gradient and the hessian */
179: steplen = 1.0;
180: needH = PETSC_TRUE;
181: ++bnk->newt;
182: PetscCall(TaoComputeGradient(tao, tao->solution, bnk->unprojected_gradient));
183: PetscCall(TaoBNKEstimateActiveSet(tao, bnk->as_type));
184: PetscCall(VecCopy(bnk->unprojected_gradient, tao->gradient));
185: if (bnk->active_idx) PetscCall(VecISSet(tao->gradient, bnk->active_idx, 0.0));
186: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm));
187: } else {
188: /* Trust-region rejected the step. Revert the solution. */
189: bnk->f = bnk->fold;
190: PetscCall(VecCopy(bnk->Xold, tao->solution));
191: /* Trigger the line search */
192: PetscCall(TaoBNKSafeguardStep(tao, ksp_reason, &stepType));
193: PetscCall(TaoBNKPerformLineSearch(tao, &stepType, &steplen, &ls_reason));
194: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
195: /* Line search failed, revert solution and terminate */
196: stepAccepted = PETSC_FALSE;
197: needH = PETSC_FALSE;
198: bnk->f = bnk->fold;
199: PetscCall(VecCopy(bnk->Xold, tao->solution));
200: PetscCall(VecCopy(bnk->Gold, tao->gradient));
201: PetscCall(VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient));
202: tao->trust = 0.0;
203: tao->reason = TAO_DIVERGED_LS_FAILURE;
204: } else {
205: /* new iterate so we need to recompute the Hessian */
206: needH = PETSC_TRUE;
207: /* compute the projected gradient */
208: PetscCall(TaoBNKEstimateActiveSet(tao, bnk->as_type));
209: PetscCall(VecCopy(bnk->unprojected_gradient, tao->gradient));
210: if (bnk->active_idx) PetscCall(VecISSet(tao->gradient, bnk->active_idx, 0.0));
211: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm));
212: /* Line search succeeded so we should update the trust radius based on the LS step length */
213: tao->trust = oldTrust;
214: PetscCall(TaoBNKUpdateTrustRadius(tao, prered, actred, BNK_UPDATE_STEP, stepType, &stepAccepted));
215: /* count the accepted step type */
216: PetscCall(TaoBNKAddStepCounts(tao, stepType));
217: }
218: }
220: /* Check for termination */
221: PetscCall(VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W));
222: PetscCall(VecNorm(bnk->W, NORM_2, &resnorm));
223: PetscCheck(!PetscIsInfOrNanReal(resnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
224: ++tao->niter;
225: PetscCall(TaoLogConvergenceHistory(tao, bnk->f, resnorm, 0.0, tao->ksp_its));
226: PetscCall(TaoMonitor(tao, tao->niter, bnk->f, resnorm, 0.0, steplen));
227: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
228: }
229: PetscFunctionReturn(PETSC_SUCCESS);
230: }
232: static PetscErrorCode TaoSetUp_BNTL(Tao tao)
233: {
234: KSP ksp;
235: PetscBool valid;
237: PetscFunctionBegin;
238: PetscCall(TaoSetUp_BNK(tao));
239: PetscCall(TaoGetKSP(tao, &ksp));
240: PetscCall(PetscObjectHasFunction((PetscObject)ksp, "KSPCGSetRadius_C", &valid));
241: PetscCheck(valid, PetscObjectComm((PetscObject)tao), PETSC_ERR_SUP, "Not for KSP type %s. Must use a trust-region CG method for KSP (e.g. KSPNASH, KSPSTCG, KSPGLTR)", ((PetscObject)ksp)->type_name);
242: PetscFunctionReturn(PETSC_SUCCESS);
243: }
245: static PetscErrorCode TaoSetFromOptions_BNTL(Tao tao, PetscOptionItems *PetscOptionsObject)
246: {
247: TAO_BNK *bnk = (TAO_BNK *)tao->data;
249: PetscFunctionBegin;
250: PetscCall(TaoSetFromOptions_BNK(tao, PetscOptionsObject));
251: if (bnk->update_type == BNK_UPDATE_STEP) bnk->update_type = BNK_UPDATE_REDUCTION;
252: PetscFunctionReturn(PETSC_SUCCESS);
253: }
255: /*MC
256: TAOBNTL - Bounded Newton Trust Region method with line-search fall-back for nonlinear
257: minimization with bound constraints.
259: Options Database Keys:
260: + -tao_bnk_max_cg_its - maximum number of bounded conjugate-gradient iterations taken in each Newton loop
261: . -tao_bnk_init_type - trust radius initialization method ("constant", "direction", "interpolation")
262: . -tao_bnk_update_type - trust radius update method ("step", "direction", "interpolation")
263: - -tao_bnk_as_type - active-set estimation method ("none", "bertsekas")
265: Level: beginner
267: Developer Note:
268: One should control the maximum number of cg iterations through the standard pc_max_it option not with a special
269: ad hoc option
271: M*/
272: PETSC_EXTERN PetscErrorCode TaoCreate_BNTL(Tao tao)
273: {
274: TAO_BNK *bnk;
276: PetscFunctionBegin;
277: PetscCall(TaoCreate_BNK(tao));
278: tao->ops->solve = TaoSolve_BNTL;
279: tao->ops->setup = TaoSetUp_BNTL;
280: tao->ops->setfromoptions = TaoSetFromOptions_BNTL;
282: bnk = (TAO_BNK *)tao->data;
283: bnk->update_type = BNK_UPDATE_REDUCTION; /* trust region updates based on predicted/actual reduction */
284: PetscFunctionReturn(PETSC_SUCCESS);
285: }