ROL
ROL_AugmentedLagrangianStep.hpp
Go to the documentation of this file.
1// @HEADER
2// ************************************************************************
3//
4// Rapid Optimization Library (ROL) Package
5// Copyright (2014) Sandia Corporation
6//
7// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8// license for use of this work by or on behalf of the U.S. Government.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact lead developers:
38// Drew Kouri (dpkouri@sandia.gov) and
39// Denis Ridzal (dridzal@sandia.gov)
40//
41// ************************************************************************
42// @HEADER
43
44#ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
45#define ROL_AUGMENTEDLAGRANGIANSTEP_H
46
48#include "ROL_Types.hpp"
49#include "ROL_Algorithm.hpp"
50#include "ROL_ParameterList.hpp"
51
52// Step (bound constrained or unconstrained) includes
57#include "ROL_BundleStep.hpp"
59
60// StatusTest includes
61#include "ROL_StatusTest.hpp"
63
151namespace ROL {
152
153template<class Real>
154class MoreauYosidaPenaltyStep;
155
156template<class Real>
157class InteriorPointStep;
158
159template <class Real>
160class AugmentedLagrangianStep : public Step<Real> {
161private:
162 ROL::Ptr<StatusTest<Real>> status_;
163 ROL::Ptr<Step<Real>> step_;
164 ROL::Ptr<Algorithm<Real>> algo_;
165 ROL::Ptr<Vector<Real>> x_;
166 ROL::Ptr<BoundConstraint<Real>> bnd_;
167
168 ROL::ParameterList parlist_;
169 // Lagrange multiplier update
176 // Optimality tolerance update
181 // Feasibility tolerance update
186 // Subproblem information
187 bool print_;
190 std::string subStep_;
194 // Scaling information
198 // Verbosity flag
200
202 const Real mu, Objective<Real> &obj,
205 = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
206 Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
207 augLag.gradient(g,x,tol);
208 if ( scaleLagrangian_ ) {
209 g.scale(mu);
210 }
211 // Compute norm of projected gradient
212 if (bnd.isActivated()) {
213 x_->set(x);
214 x_->axpy(static_cast<Real>(-1),g.dual());
215 bnd.project(*x_);
216 x_->axpy(static_cast<Real>(-1),x);
217 gnorm = x_->norm();
218 }
219 else {
220 gnorm = g.norm();
221 }
222 return gnorm;
223 }
224
225public:
226
227 using Step<Real>::initialize;
228 using Step<Real>::compute;
229 using Step<Real>::update;
230
232
233 AugmentedLagrangianStep(ROL::ParameterList &parlist)
234 : Step<Real>(), algo_(ROL::nullPtr),
235 x_(ROL::nullPtr), parlist_(parlist), subproblemIter_(0) {
236 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
237 ROL::ParameterList& sublist = parlist.sublist("Step").sublist("Augmented Lagrangian");
238 useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter",true);
239 Step<Real>::getState()->searchSize = sublist.get("Initial Penalty Parameter",ten);
240 // Multiplier update parameters
241 scaleLagrangian_ = sublist.get("Use Scaled Augmented Lagrangian", false);
242 minPenaltyLowerBound_ = sublist.get("Penalty Parameter Reciprocal Lower Bound", p1);
244 penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
245 maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
246 // Optimality tolerance update
247 optIncreaseExponent_ = sublist.get("Optimality Tolerance Update Exponent", one);
248 optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
249 optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
250 // Feasibility tolerance update
251 feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Update Exponent", p1);
252 feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p9);
253 feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
254 // Subproblem information
255 print_ = sublist.get("Print Intermediate Optimization History", false);
256 maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
257 subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
258 parlist_.sublist("Step").set("Type",subStep_);
259 parlist_.sublist("Status Test").set("Iteration Limit",maxit_);
260 // Verbosity setting
261 verbosity_ = parlist.sublist("General").get("Print Verbosity", 0);
262 print_ = (verbosity_ > 0 ? true : print_);
263 // Outer iteration tolerances
264 outerFeasTolerance_ = parlist.sublist("Status Test").get("Constraint Tolerance", oem8);
265 outerOptTolerance_ = parlist.sublist("Status Test").get("Gradient Tolerance", oem8);
266 outerStepTolerance_ = parlist.sublist("Status Test").get("Step Tolerance", oem8);
267 // Scaling
268 useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
269 fscale_ = sublist.get("Objective Scaling", 1.0);
270 cscale_ = sublist.get("Constraint Scaling", 1.0);
271 }
272
277 AlgorithmState<Real> &algo_state ) {
278 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
279 bnd_->deactivate();
280 initialize(x,g,l,c,obj,con,*bnd_,algo_state);
281 }
282
287 AlgorithmState<Real> &algo_state ) {
288 Real one(1), TOL(1.e-2);
290 = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
291 // Initialize step state
292 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
293 state->descentVec = x.clone();
294 state->gradientVec = g.clone();
295 state->constraintVec = c.clone();
296 // Initialize additional storage
297 x_ = x.clone();
298 // Initialize the algorithm state
299 algo_state.nfval = 0;
300 algo_state.ncval = 0;
301 algo_state.ngrad = 0;
302 // Project x onto the feasible set
303 if ( bnd.isActivated() ) {
304 bnd.project(x);
305 }
306 // Update objective and constraint.
307 augLag.update(x,true,algo_state.iter);
308 if (useDefaultScaling_) {
309 fscale_ = one/std::max(one,augLag.getObjectiveGradient(x)->norm());
310 try {
311 Real tol = std::sqrt(ROL_EPSILON<Real>());
312 Ptr<Vector<Real>> ji = x.clone();
313 Real maxji(0), normji(0);
314 for (int i = 0; i < c.dimension(); ++i) {
315 con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
316 normji = ji->norm();
317 maxji = std::max(normji,maxji);
318 }
319 cscale_ = one/std::max(one,maxji);
320 }
321 catch (std::exception &e) {
322 cscale_ = one;
323 }
324 }
325 augLag.setScaling(fscale_,cscale_);
326 algo_state.value = augLag.getObjectiveValue(x);
327 algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
328 augLag.getConstraintVec(*(state->constraintVec),x);
329 algo_state.cnorm = (state->constraintVec)->norm();
330 if (useDefaultInitPen_) {
331 Step<Real>::getState()->searchSize
332 = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
333 std::max(one,std::abs(fscale_*algo_state.value))
334 /std::max(one,std::pow(cscale_*algo_state.cnorm,2)),
335 static_cast<Real>(1e-2)*maxPenaltyParam_));
336 }
337 // Update evaluation counters
338 algo_state.ncval += augLag.getNumberConstraintEvaluations();
339 algo_state.nfval += augLag.getNumberFunctionEvaluations();
340 algo_state.ngrad += augLag.getNumberGradientEvaluations();
341 // Initialize intermediate stopping tolerances
342 minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
343 optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
345 optTolerance_ = std::min<Real>(optTolerance_,TOL*algo_state.gnorm);
346 feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
348 if (verbosity_ > 0) {
349 std::cout << std::endl;
350 std::cout << "Augmented Lagrangian Initialize" << std::endl;
351 std::cout << "Objective Scaling: " << fscale_ << std::endl;
352 std::cout << "Constraint Scaling: " << cscale_ << std::endl;
353 std::cout << std::endl;
354 }
355 }
356
359 void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
361 AlgorithmState<Real> &algo_state ) {
362 compute(s,x,l,obj,con,*bnd_,algo_state);
363 }
364
367 void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
369 BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) {
370 Real one(1);
371 //AugmentedLagrangian<Real> &augLag
372 // = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
373 parlist_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
374 parlist_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
375 Ptr<Objective<Real>> penObj;
376 if (subStep_ == "Bundle") {
377 step_ = makePtr<BundleStep<Real>>(parlist_);
378 status_ = makePtr<BundleStatusTest<Real>>(parlist_);
379 penObj = makePtrFromRef(obj);
380 }
381 else if (subStep_ == "Line Search") {
382 step_ = makePtr<LineSearchStep<Real>>(parlist_);
383 status_ = makePtr<StatusTest<Real>>(parlist_);
384 penObj = makePtrFromRef(obj);
385 }
386 else if (subStep_ == "Moreau-Yosida Penalty") {
387 step_ = makePtr<MoreauYosidaPenaltyStep<Real>>(parlist_);
388 status_ = makePtr<StatusTest<Real>>(parlist_);
389 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
390 penObj = ROL::makePtr<MoreauYosidaPenalty<Real>>(raw_obj,bnd_,x,parlist_);
391 }
392 else if (subStep_ == "Primal Dual Active Set") {
393 step_ = makePtr<PrimalDualActiveSetStep<Real>>(parlist_);
394 status_ = makePtr<StatusTest<Real>>(parlist_);
395 penObj = makePtrFromRef(obj);
396 }
397 else if (subStep_ == "Trust Region") {
398 step_ = makePtr<TrustRegionStep<Real>>(parlist_);
399 status_ = makePtr<StatusTest<Real>>(parlist_);
400 penObj = makePtrFromRef(obj);
401 }
402 else if (subStep_ == "Interior Point") {
403 step_ = makePtr<InteriorPointStep<Real>>(parlist_);
404 status_ = makePtr<StatusTest<Real>>(parlist_);
405 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
406 penObj = ROL::makePtr<InteriorPoint::PenalizedObjective<Real>>(raw_obj,bnd_,x,parlist_);
407 }
408 else {
409 throw Exception::NotImplemented(">>> ROL::AugmentedLagrangianStep: Incompatible substep type!");
410 }
411 algo_ = makePtr<Algorithm<Real>>(step_,status_,false);
412 //algo_ = ROL::makePtr<Algorithm<Real>>(subStep_,parlist_,false);
413 x_->set(x);
414 if ( bnd.isActivated() ) {
415 //algo_->run(*x_,augLag,bnd,print_);
416 algo_->run(*x_,*penObj,bnd,print_);
417 }
418 else {
419 //algo_->run(*x_,augLag,print_);
420 algo_->run(*x_,*penObj,print_);
421 }
422 s.set(*x_); s.axpy(-one,x);
423 subproblemIter_ = (algo_->getState())->iter;
424 }
425
430 AlgorithmState<Real> &algo_state ) {
431 update(x,l,s,obj,con,*bnd_,algo_state);
432 }
433
439 AlgorithmState<Real> &algo_state ) {
440 Real one(1), oem2(1.e-2);
442 = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
443 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
444 state->SPiter = subproblemIter_;
445 // Update the step and store in state
446 x.plus(s);
447 algo_state.iterateVec->set(x);
448 state->descentVec->set(s);
449 algo_state.snorm = s.norm();
450 algo_state.iter++;
451 // Update objective function value
452 obj.update(x);
453 algo_state.value = augLag.getObjectiveValue(x);
454 // Update constraint value
455 augLag.getConstraintVec(*(state->constraintVec),x);
456 algo_state.cnorm = (state->constraintVec)->norm();
457 // Compute gradient of the augmented Lagrangian
458 algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
459 algo_state.gnorm /= std::min(fscale_,cscale_);
460 // Update evaluation counters
461 algo_state.nfval += augLag.getNumberFunctionEvaluations();
462 algo_state.ngrad += augLag.getNumberGradientEvaluations();
463 algo_state.ncval += augLag.getNumberConstraintEvaluations();
464 // Update objective function and constraints
465 augLag.update(x,true,algo_state.iter);
466 // Update multipliers
467 minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
468 if ( cscale_*algo_state.cnorm < feasTolerance_ ) {
469 l.axpy(state->searchSize*cscale_,(state->constraintVec)->dual());
470 if ( algo_->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
471 optTolerance_ = std::max(oem2*outerOptTolerance_,
473 }
474 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
476 // Update Algorithm State
477 algo_state.snorm += state->searchSize*cscale_*algo_state.cnorm;
478 algo_state.lagmultVec->set(l);
479 }
480 else {
481 state->searchSize = std::min(penaltyUpdate_*state->searchSize,maxPenaltyParam_);
482 optTolerance_ = std::max(oem2*outerOptTolerance_,
484 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
486 }
487 augLag.reset(l,state->searchSize);
488 }
489
492 std::string printHeader( void ) const {
493 std::stringstream hist;
494
495 if(verbosity_>0) {
496 hist << std::string(114,'-') << std::endl;
497 hist << "Augmented Lagrangian status output definitions" << std::endl << std::endl;
498 hist << " iter - Number of iterates (steps taken)" << std::endl;
499 hist << " fval - Objective function value" << std::endl;
500 hist << " cnorm - Norm of the constraint violation" << std::endl;
501 hist << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
502 hist << " snorm - Norm of the step" << std::endl;
503 hist << " penalty - Penalty parameter" << std::endl;
504 hist << " feasTol - Feasibility tolerance" << std::endl;
505 hist << " optTol - Optimality tolerance" << std::endl;
506 hist << " #fval - Number of times the objective was computed" << std::endl;
507 hist << " #grad - Number of times the gradient was computed" << std::endl;
508 hist << " #cval - Number of times the constraint was computed" << std::endl;
509 hist << " subIter - Number of iterations to solve subproblem" << std::endl;
510 hist << std::string(114,'-') << std::endl;
511 }
512 hist << " ";
513 hist << std::setw(6) << std::left << "iter";
514 hist << std::setw(15) << std::left << "fval";
515 hist << std::setw(15) << std::left << "cnorm";
516 hist << std::setw(15) << std::left << "gLnorm";
517 hist << std::setw(15) << std::left << "snorm";
518 hist << std::setw(10) << std::left << "penalty";
519 hist << std::setw(10) << std::left << "feasTol";
520 hist << std::setw(10) << std::left << "optTol";
521 hist << std::setw(8) << std::left << "#fval";
522 hist << std::setw(8) << std::left << "#grad";
523 hist << std::setw(8) << std::left << "#cval";
524 hist << std::setw(8) << std::left << "subIter";
525 hist << std::endl;
526 return hist.str();
527 }
528
531 std::string printName( void ) const {
532 std::stringstream hist;
533 hist << std::endl << " Augmented Lagrangian Solver";
534 hist << std::endl;
535 hist << "Subproblem Solver: " << subStep_ << std::endl;
536 return hist.str();
537 }
538
541 std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
542 std::stringstream hist;
543 hist << std::scientific << std::setprecision(6);
544 if ( algo_state.iter == 0 ) {
545 hist << printName();
546 }
547 if ( pHeader ) {
548 hist << printHeader();
549 }
550 if ( algo_state.iter == 0 ) {
551 hist << " ";
552 hist << std::setw(6) << std::left << algo_state.iter;
553 hist << std::setw(15) << std::left << algo_state.value;
554 hist << std::setw(15) << std::left << algo_state.cnorm;
555 hist << std::setw(15) << std::left << algo_state.gnorm;
556 hist << std::setw(15) << std::left << " ";
557 hist << std::scientific << std::setprecision(2);
558 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
559 hist << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
560 hist << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
561 hist << std::endl;
562 }
563 else {
564 hist << " ";
565 hist << std::setw(6) << std::left << algo_state.iter;
566 hist << std::setw(15) << std::left << algo_state.value;
567 hist << std::setw(15) << std::left << algo_state.cnorm;
568 hist << std::setw(15) << std::left << algo_state.gnorm;
569 hist << std::setw(15) << std::left << algo_state.snorm;
570 hist << std::scientific << std::setprecision(2);
571 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
572 hist << std::setw(10) << std::left << feasTolerance_;
573 hist << std::setw(10) << std::left << optTolerance_;
574 hist << std::scientific << std::setprecision(6);
575 hist << std::setw(8) << std::left << algo_state.nfval;
576 hist << std::setw(8) << std::left << algo_state.ngrad;
577 hist << std::setw(8) << std::left << algo_state.ncval;
578 hist << std::setw(8) << std::left << subproblemIter_;
579 hist << std::endl;
580 }
581 return hist.str();
582 }
583
589 AlgorithmState<Real> &algo_state ) {}
590
596 AlgorithmState<Real> &algo_state ) {}
597
598}; // class AugmentedLagrangianStep
599
600} // namespace ROL
601
602#endif
Contains definitions of custom data types in ROL.
Provides the interface to compute augmented Lagrangian steps.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
std::string printHeader(void) const
Print iterate header.
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements,...
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
AugmentedLagrangianStep(ROL::ParameterList &parlist)
ROL::Ptr< BoundConstraint< Real > > bnd_
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements,...
ROL::Ptr< StatusTest< Real > > status_
ROL::Ptr< Algorithm< Real > > algo_
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
std::string printName(void) const
Print step name.
Provides the interface to evaluate the augmented Lagrangian.
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
virtual int getNumberConstraintEvaluations(void) const
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
virtual Real getObjectiveValue(const Vector< Real > &x)
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual int getNumberGradientEvaluations(void) const
Provides the interface to apply upper and lower bound constraints.
bool isActivated(void) const
Check if bounds are on.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Defines the general constraint operator interface.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
Provides the interface to evaluate objective functions.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:68
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:73
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:84
virtual Real norm() const =0
Returns where .
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual void scale(const Real alpha)=0
Compute where .
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis,...
Definition: ROL_Vector.hpp:226
virtual void plus(const Vector &x)=0
Compute , where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
Definition: ROL_Vector.hpp:196
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
Definition: ROL_Vector.hpp:182
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
@ EXITSTATUS_CONVERGED
Definition: ROL_Types.hpp:118
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:143
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:158
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:157