19 #include "pism/inverse/IP_SSATaucTikhonovGNSolver.hh"
20 #include "pism/util/TerminationReason.hh"
21 #include "pism/util/pism_options.hh"
22 #include "pism/util/ConfigInterface.hh"
23 #include "pism/util/Grid.hh"
24 #include "pism/util/Context.hh"
25 #include "pism/util/petscwrappers/Vec.hh"
34 : m_design_stencil_width(d0.stencil_width()),
35 m_state_stencil_width(u_obs.stencil_width()),
36 m_ssaforward(ssaforward),
38 m_tmp_D1Global(d0.grid(),
"work vector"),
39 m_tmp_D2Global(d0.grid(),
"work vector"),
40 m_tmp_D1Local(d0.grid(),
"work vector"),
41 m_tmp_D2Local(d0.grid(),
"work vector"),
42 m_tmp_S1Global(d0.grid(),
"work vector"),
43 m_tmp_S2Global(d0.grid(),
"work vector"),
44 m_tmp_S1Local(d0.grid(),
"work vector"),
45 m_tmp_S2Local(d0.grid(),
"work vector"),
46 m_GN_rhs(d0.grid(),
"GN_rhs"),
48 m_dGlobal(d0.grid(),
"d (sans ghosts)"),
49 m_d_diff(d0.grid(),
"d_diff"),
50 m_d_diff_lin(d0.grid(),
"d_diff linearized"),
52 m_hGlobal(d0.grid(),
"h (sans ghosts)"),
53 m_dalpha_rhs(d0.grid(),
"dalpha rhs"),
54 m_dh_dalpha(d0.grid(),
"dh_dalpha"),
55 m_dh_dalphaGlobal(d0.grid(),
"dh_dalpha"),
56 m_grad_design(d0.grid(),
"grad design"),
57 m_grad_state(d0.grid(),
"grad design"),
58 m_gradient(d0.grid(),
"grad design"),
60 m_u_diff(d0.grid(),
"du"),
62 m_designFunctional(designFunctional),
63 m_stateFunctional(stateFunctional),
67 std::shared_ptr<const Grid> grid =
m_d0.
grid();
70 m_d = std::make_shared<DesignVecGhosted>(grid,
"d");
75 ierr = KSPSetOptionsPrefix(
m_ksp,
"inv_gn_");
76 PISM_CHK(ierr,
"KSPSetOptionsPrefix");
78 double ksp_rtol = 1e-5;
79 ierr = KSPSetTolerances(
m_ksp, ksp_rtol, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
82 ierr = KSPSetType(
m_ksp, KSPCG);
86 ierr = KSPGetPC(
m_ksp, &pc);
89 ierr = PCSetType(pc, PCNONE);
92 ierr = KSPSetFromOptions(
m_ksp);
95 int nLocalNodes = grid->xm()*grid->ym();
96 int nGlobalNodes = grid->Mx()*grid->My();
97 ierr = MatCreateShell(grid->com, nLocalNodes, nLocalNodes,
113 m_tikhonov_atol = grid->ctx()->config()->get_number(
"inverse.tikhonov.atol");
114 m_tikhonov_rtol = grid->ctx()->config()->get_number(
"inverse.tikhonov.rtol");
115 m_tikhonov_ptol = grid->ctx()->config()->get_number(
"inverse.tikhonov.ptol");
138 ierr = DMGlobalToLocalBegin(*
m_x.
dm(), x, INSERT_VALUES,
m_x.
vec());
139 PISM_CHK(ierr,
"DMGlobalToLocalBegin");
141 ierr = DMGlobalToLocalEnd(*
m_x.
dm(), x, INSERT_VALUES,
m_x.
vec());
142 PISM_CHK(ierr,
"DMGlobalToLocalEnd");
155 ierr = VecCopy(GNx.
vec(), y);
PISM_CHK(ierr,
"VecCopy");
182 KSPConvergedReason ksp_reason;
183 ierr = KSPGetConvergedReason(
m_ksp ,&ksp_reason);
184 PISM_CHK(ierr,
"KSPGetConvergedReason");
206 *value =
m_alpha*dValue + sValue;
212 double designNorm, stateNorm, sumNorm;
213 double dWeight, sWeight;
220 designNorm *= dWeight;
221 stateNorm *= sWeight;
226 "----------------------------------------------------------\n");
228 "IP_SSATaucTikhonovGNSolver Iteration %d: misfit %g; functional %g \n",
233 double relsum = (sumNorm/
std::max(designNorm,stateNorm));
235 "design norm %g stateNorm %g sum %g; relative difference %g\n",
236 designNorm, stateNorm, sumNorm, relsum);
265 if (reason->failed()) {
286 double valDesign, valState;
301 std::shared_ptr<TerminationReason> step_reason;
305 double descent_derivative;
312 if (descent_derivative >=0) {
313 printf(
"descent derivative: %g\n",descent_derivative);
322 if (step_reason->succeeded()) {
323 if (
m_value <= old_value + 1e-3*alpha*descent_derivative) {
328 printf(
"forward solve failed in linsearch. Shrinking.\n");
332 printf(
"alpha= %g; derivative = %g\n",alpha,descent_derivative);
345 " IP_SSATaucTikhonovGNSolver::solve.");
351 double dlogalpha = 0;
353 std::shared_ptr<TerminationReason> step_reason, reason;
356 if (step_reason->failed()) {
358 reason->set_root_cause(step_reason);
365 if (reason->done()) {
375 if (step_reason->failed()) {
377 reason->set_root_cause(step_reason);
382 if (step_reason->failed()) {
383 std::shared_ptr<TerminationReason> cause = reason;
385 reason->set_root_cause(step_reason);
391 if (step_reason->failed()) {
392 std::shared_ptr<TerminationReason> cause = reason;
394 reason->set_root_cause(step_reason);
424 KSPConvergedReason ksp_reason;
425 ierr = KSPGetConvergedReason(
m_ksp,&ksp_reason);
426 PISM_CHK(ierr,
"KSPGetConvergedReason");
450 double ddisc_sq_dalpha;
454 if (ddisc_sq_dalpha <= 0) {
458 "Adaptive Tikhonov sanity check failed (dh/dalpha= %g <= 0)."
459 " Tighten inv_gn_ksp_rtol?\n",
466 double ddisc_sq_dalpha_a;
468 double ddisc_sq_dalpha_b;
470 ddisc_sq_dalpha = 2*
m_alpha*(ddisc_sq_dalpha_a+
m_alpha*ddisc_sq_dalpha_b);
473 "Adaptive Tikhonov sanity check recovery attempt: dh/dalpha= %g. \n",
487 if (fabs(*dlogalpha)> stepmax) {
488 double sgn = *dlogalpha > 0 ? 1 : -1;
489 *dlogalpha = stepmax*sgn;
static std::shared_ptr< TerminationReason > max_iter()
static std::shared_ptr< TerminationReason > keep_iterating()
static std::shared_ptr< TerminationReason > success()
static RuntimeError formatted(const ErrorLocation &location, const char format[],...) __attribute__((format(printf
build a RuntimeError with a formatted message
void copy_from(const Array2D< T > &source)
void add(double alpha, const Array2D< T > &x)
void scale(double alpha)
Result: v <- v * alpha. Calls VecScale.
std::shared_ptr< const Grid > grid() const
void set(double c)
Result: v[j] <- c for all j.
std::shared_ptr< petsc::DM > dm() const
std::vector< double > norm(int n) const
Computes the norm of all the components of an Array.
void update_ghosts()
Updates ghost points.
Abstract base class for IPFunctionals arising from an inner product.
virtual std::shared_ptr< array::Vector > solution()
Returns the last solution of the SSA as computed by linearize_at.
virtual void apply_linearization(array::Scalar &dzeta, array::Vector &du)
Applies the linearization of the forward map (i.e. the reduced gradient described in the class-level...
virtual std::shared_ptr< TerminationReason > linearize_at(array::Scalar &zeta)
Sets the current value of the design variable and solves the SSA to find the associated .
virtual void apply_linearization_transpose(array::Vector &du, array::Scalar &dzeta)
Applies the transpose of the linearization of the forward map (i.e. the transpose of the reduced grad...
Implements the forward problem of the map taking to the corresponding solution of the SSA.
virtual std::shared_ptr< TerminationReason > linesearch()
virtual void apply_GN(array::Scalar &h, array::Scalar &out)
virtual void evaluateGNFunctional(DesignVec &h, double *value)
virtual void assemble_GN_rhs(DesignVec &out)
virtual std::shared_ptr< TerminationReason > init()
std::shared_ptr< DesignVecGhosted > m_d
virtual std::shared_ptr< TerminationReason > evaluate_objective_and_gradient()
DesignVecGhosted m_d_diff_lin
IPInnerProductFunctional< StateVec > & m_stateFunctional
DesignVec m_dh_dalphaGlobal
IPInnerProductFunctional< DesignVec > & m_designFunctional
DesignVecGhosted m_tmp_D1Local
virtual std::shared_ptr< TerminationReason > compute_dlogalpha(double *dalpha)
DesignVecGhosted m_d_diff
virtual std::shared_ptr< TerminationReason > solve()
virtual std::shared_ptr< TerminationReason > check_convergence()
virtual std::shared_ptr< TerminationReason > solve_linearized()
IP_SSATaucForwardProblem & m_ssaforward
IP_SSATaucTikhonovGNSolver(IP_SSATaucForwardProblem &ssaforward, DesignVec &d0, StateVec &u_obs, double eta, IPInnerProductFunctional< DesignVec > &designFunctional, IPInnerProductFunctional< StateVec > &stateFunctional)
#define PISM_CHK(errcode, name)
#define PISM_ERROR_LOCATION
double max(const array::Scalar &input)
Finds maximum over all the values in an array::Scalar object. Ignores ghosts.
bool Bool(const std::string &option, const std::string &description)
std::string printf(const char *format,...)