19#include "pism/inverse/IP_SSATaucTikhonovGNSolver.hh"
20#include "pism/util/TerminationReason.hh"
21#include "pism/util/pism_options.hh"
22#include "pism/util/ConfigInterface.hh"
23#include "pism/util/Grid.hh"
24#include "pism/util/Context.hh"
25#include "pism/util/petscwrappers/Vec.hh"
34 : m_design_stencil_width(d0.stencil_width()),
35 m_state_stencil_width(u_obs.stencil_width()),
36 m_ssaforward(ssaforward),
38 m_tmp_D1Global(d0.grid(),
"work vector"),
39 m_tmp_D2Global(d0.grid(),
"work vector"),
40 m_tmp_D1Local(d0.grid(),
"work vector"),
41 m_tmp_D2Local(d0.grid(),
"work vector"),
42 m_tmp_S1Global(d0.grid(),
"work vector"),
43 m_tmp_S2Global(d0.grid(),
"work vector"),
44 m_tmp_S1Local(d0.grid(),
"work vector"),
45 m_tmp_S2Local(d0.grid(),
"work vector"),
46 m_GN_rhs(d0.grid(),
"GN_rhs"),
48 m_dGlobal(d0.grid(),
"d (sans ghosts)"),
49 m_d_diff(d0.grid(),
"d_diff"),
50 m_d_diff_lin(d0.grid(),
"d_diff linearized"),
52 m_hGlobal(d0.grid(),
"h (sans ghosts)"),
53 m_dalpha_rhs(d0.grid(),
"dalpha rhs"),
54 m_dh_dalpha(d0.grid(),
"dh_dalpha"),
55 m_dh_dalphaGlobal(d0.grid(),
"dh_dalpha"),
56 m_grad_design(d0.grid(),
"grad design"),
57 m_grad_state(d0.grid(),
"grad design"),
58 m_gradient(d0.grid(),
"grad design"),
60 m_u_diff(d0.grid(),
"du"),
62 m_designFunctional(designFunctional),
63 m_stateFunctional(stateFunctional),
67 std::shared_ptr<const Grid> grid =
m_d0.
grid();
70 m_d = std::make_shared<DesignVecGhosted>(grid,
"d");
75 ierr = KSPSetOptionsPrefix(
m_ksp,
"inv_gn_");
76 PISM_CHK(ierr,
"KSPSetOptionsPrefix");
78 double ksp_rtol = 1e-5;
79 ierr = KSPSetTolerances(
m_ksp, ksp_rtol, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
82 ierr = KSPSetType(
m_ksp, KSPCG);
86 ierr = KSPGetPC(
m_ksp, &pc);
89 ierr = PCSetType(pc, PCNONE);
92 ierr = KSPSetFromOptions(
m_ksp);
95 int nLocalNodes = grid->xm()*grid->ym();
96 int nGlobalNodes = grid->Mx()*grid->My();
97 ierr = MatCreateShell(grid->com, nLocalNodes, nLocalNodes,
111 auto config = grid->ctx()->config();
139 ierr = DMGlobalToLocalBegin(*
m_x.
dm(), x, INSERT_VALUES,
m_x.
vec());
140 PISM_CHK(ierr,
"DMGlobalToLocalBegin");
142 ierr = DMGlobalToLocalEnd(*
m_x.
dm(), x, INSERT_VALUES,
m_x.
vec());
143 PISM_CHK(ierr,
"DMGlobalToLocalEnd");
156 ierr = VecCopy(GNx.
vec(), y);
PISM_CHK(ierr,
"VecCopy");
183 KSPConvergedReason ksp_reason;
184 ierr = KSPGetConvergedReason(
m_ksp ,&ksp_reason);
185 PISM_CHK(ierr,
"KSPGetConvergedReason");
207 *value =
m_alpha*dValue + sValue;
213 double designNorm, stateNorm, sumNorm;
214 double dWeight, sWeight;
221 designNorm *= dWeight;
222 stateNorm *= sWeight;
227 "----------------------------------------------------------\n");
229 "IP_SSATaucTikhonovGNSolver Iteration %d: misfit %g; functional %g \n",
234 double relsum = (sumNorm/std::max(designNorm,stateNorm));
236 "design norm %g stateNorm %g sum %g; relative difference %g\n",
237 designNorm, stateNorm, sumNorm, relsum);
266 if (reason->failed()) {
287 double valDesign, valState;
302 std::shared_ptr<TerminationReason> step_reason;
306 double descent_derivative;
313 if (descent_derivative >=0) {
314 printf(
"descent derivative: %g\n",descent_derivative);
323 if (step_reason->succeeded()) {
324 if (
m_value <= old_value + 1e-3*alpha*descent_derivative) {
329 printf(
"forward solve failed in linsearch. Shrinking.\n");
333 printf(
"alpha= %g; derivative = %g\n",alpha,descent_derivative);
346 " IP_SSATaucTikhonovGNSolver::solve.");
352 double dlogalpha = 0;
354 std::shared_ptr<TerminationReason> step_reason, reason;
357 if (step_reason->failed()) {
359 reason->set_root_cause(step_reason);
366 if (reason->done()) {
376 if (step_reason->failed()) {
378 reason->set_root_cause(step_reason);
383 if (step_reason->failed()) {
384 std::shared_ptr<TerminationReason> cause = reason;
386 reason->set_root_cause(step_reason);
392 if (step_reason->failed()) {
393 std::shared_ptr<TerminationReason> cause = reason;
395 reason->set_root_cause(step_reason);
425 KSPConvergedReason ksp_reason;
426 ierr = KSPGetConvergedReason(
m_ksp,&ksp_reason);
427 PISM_CHK(ierr,
"KSPGetConvergedReason");
451 double ddisc_sq_dalpha;
455 if (ddisc_sq_dalpha <= 0) {
459 "Adaptive Tikhonov sanity check failed (dh/dalpha= %g <= 0)."
460 " Tighten inv_gn_ksp_rtol?\n",
467 double ddisc_sq_dalpha_a;
469 double ddisc_sq_dalpha_b;
471 ddisc_sq_dalpha = 2*
m_alpha*(ddisc_sq_dalpha_a+
m_alpha*ddisc_sq_dalpha_b);
474 "Adaptive Tikhonov sanity check recovery attempt: dh/dalpha= %g. \n",
488 if (fabs(*dlogalpha)> stepmax) {
489 double sgn = *dlogalpha > 0 ? 1 : -1;
490 *dlogalpha = stepmax*sgn;
static std::shared_ptr< TerminationReason > max_iter()
static std::shared_ptr< TerminationReason > keep_iterating()
static std::shared_ptr< TerminationReason > success()
static RuntimeError formatted(const ErrorLocation &location, const char format[],...) __attribute__((format(printf
build a RuntimeError with a formatted message
void copy_from(const Array2D< T > &source)
void add(double alpha, const Array2D< T > &x)
void scale(double alpha)
Result: v <- v * alpha. Calls VecScale.
std::shared_ptr< const Grid > grid() const
void set(double c)
Result: v[j] <- c for all j.
std::shared_ptr< petsc::DM > dm() const
std::vector< double > norm(int n) const
Computes the norm of all the components of an Array.
void update_ghosts()
Updates ghost points.
Abstract base class for IPFunctionals arising from an inner product.
virtual std::shared_ptr< array::Vector > solution()
Returns the last solution of the SSA as computed by linearize_at.
virtual void apply_linearization(array::Scalar &dzeta, array::Vector &du)
Applies the linearization of the forward map (i.e. the reduced gradient described in the class-level...
virtual std::shared_ptr< TerminationReason > linearize_at(array::Scalar &zeta)
Sets the current value of the design variable and solves the SSA to find the associated .
virtual void apply_linearization_transpose(array::Vector &du, array::Scalar &dzeta)
Applies the transpose of the linearization of the forward map (i.e. the transpose of the reduced grad...
Implements the forward problem of the map taking to the corresponding solution of the SSA.
virtual std::shared_ptr< TerminationReason > linesearch()
virtual void apply_GN(array::Scalar &h, array::Scalar &out)
virtual void evaluateGNFunctional(DesignVec &h, double *value)
virtual void assemble_GN_rhs(DesignVec &out)
virtual std::shared_ptr< TerminationReason > init()
std::shared_ptr< DesignVecGhosted > m_d
virtual std::shared_ptr< TerminationReason > evaluate_objective_and_gradient()
DesignVecGhosted m_d_diff_lin
IPInnerProductFunctional< StateVec > & m_stateFunctional
DesignVec m_dh_dalphaGlobal
IPInnerProductFunctional< DesignVec > & m_designFunctional
DesignVecGhosted m_tmp_D1Local
virtual std::shared_ptr< TerminationReason > compute_dlogalpha(double *dalpha)
DesignVecGhosted m_d_diff
virtual std::shared_ptr< TerminationReason > solve()
virtual std::shared_ptr< TerminationReason > check_convergence()
virtual std::shared_ptr< TerminationReason > solve_linearized()
IP_SSATaucForwardProblem & m_ssaforward
IP_SSATaucTikhonovGNSolver(IP_SSATaucForwardProblem &ssaforward, DesignVec &d0, StateVec &u_obs, double eta, IPInnerProductFunctional< DesignVec > &designFunctional, IPInnerProductFunctional< StateVec > &stateFunctional)
#define PISM_CHK(errcode, name)
#define PISM_ERROR_LOCATION
std::string printf(const char *format,...)