Loading [MathJax]/jax/output/HTML-CSS/config.js
PISM, A Parallel Ice Sheet Model 2.2.2-d6b3a29ca committed by Constantine Khrulev on 2025-03-28
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
SSAFD.cc
Go to the documentation of this file.
1// Copyright (C) 2004--2024 Constantine Khroulev, Ed Bueler and Jed Brown
2//
3// This file is part of PISM.
4//
5// PISM is free software; you can redistribute it and/or modify it under the
6// terms of the GNU General Public License as published by the Free Software
7// Foundation; either version 3 of the License, or (at your option) any later
8// version.
9//
10// PISM is distributed in the hope that it will be useful, but WITHOUT ANY
11// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
13// details.
14//
15// You should have received a copy of the GNU General Public License
16// along with PISM; if not, write to the Free Software
17// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
19#include <cassert>
20#include <memory>
21#include <stdexcept>
22
23#include "pism/geometry/Geometry.hh"
24#include "pism/stressbalance/StressBalance.hh"
25#include "pism/stressbalance/ssa/SSAFD.hh"
26#include "pism/util/Grid.hh"
27#include "pism/util/array/CellType.hh"
28#include "pism/util/petscwrappers/DM.hh"
29#include "pism/util/petscwrappers/Vec.hh"
30#include "pism/util/pism_options.hh"
31#include "pism/util/pism_utilities.hh"
32
33namespace pism {
34namespace stressbalance {
35
37 : RuntimeError(ErrorLocation(), std::string("SSAFD KSP (linear solver) failed: ") + reason){
38 // empty
39}
40
41SSAFD::PicardFailure::PicardFailure(const std::string &message)
42 : RuntimeError(ErrorLocation(), "SSAFD Picard iterations failed: " + message) {
43 // empty
44}
45
46/*!
47Because the FD implementation of the SSA uses Picard iteration, a PETSc KSP
48and Mat are used directly. In particular we set up \f$A\f$
49(Mat m_A) and a \f$b\f$ (= Vec m_rhs) and iteratively solve
50linear systems
51 \f[ A x = b \f]
52where \f$x\f$ (= Vec m_velocity_global). A PETSc SNES object is never created.
53 */
54SSAFD::SSAFD(std::shared_ptr<const Grid> grid, bool regional_mode)
55 : SSAFDBase(grid, regional_mode),
56 m_nuH_old(grid, "nuH_old"),
57 m_velocity_old(grid, "velocity_old") {
58
60 .long_name("old SSA velocity field; used for re-trying with a different epsilon")
61 .units("m s^-1");
62
64 .long_name("ice thickness times effective viscosity (before an update)")
65 .units("Pa s m");
66
67 // The nuH viewer:
68 m_nuh_viewer = nullptr;
69
70 // PETSc objects and settings
71 {
72 auto dm = m_velocity_global.dm();
73
74 PetscErrorCode ierr;
75 ierr = DMSetMatType(*dm, MATAIJ);
76 PISM_CHK(ierr, "DMSetMatType");
77
78 ierr = DMCreateMatrix(*dm, m_A.rawptr());
79 PISM_CHK(ierr, "DMCreateMatrix");
80
81 ierr = KSPCreate(m_grid->com, m_KSP.rawptr());
82 PISM_CHK(ierr, "KSPCreate");
83
84 ierr = KSPSetOptionsPrefix(m_KSP, "ssafd_");
85 PISM_CHK(ierr, "KSPSetOptionsPrefix");
86
87 // Use non-zero initial guess (i.e. SSA velocities from the last
88 // solve() call).
89 ierr = KSPSetInitialGuessNonzero(m_KSP, PETSC_TRUE);
90 PISM_CHK(ierr, "KSPSetInitialGuessNonzero");
91
92 // Use the initial residual norm.
93 ierr = KSPConvergedDefaultSetUIRNorm(m_KSP);
94 PISM_CHK(ierr, "KSPConvergedDefaultSetUIRNorm");
95 }
96}
97
98//! @note Uses `PetscErrorCode` *intentionally*.
100 PetscErrorCode ierr;
101 PC pc;
102
103 ierr = KSPSetType(m_KSP, KSPGMRES);
104 PISM_CHK(ierr, "KSPSetType");
105
106 ierr = KSPSetOperators(m_KSP, m_A, m_A);
107 PISM_CHK(ierr, "KSPSetOperators");
108
109 // Get the PC from the KSP solver:
110 ierr = KSPGetPC(m_KSP, &pc);
111 PISM_CHK(ierr, "KSPGetPC");
112
113 // Set the PC type:
114 ierr = PCSetType(pc, PCBJACOBI);
115 PISM_CHK(ierr, "PCSetType");
116
117 // Process options:
118 ierr = KSPSetFromOptions(m_KSP);
119 PISM_CHK(ierr, "KSPSetFromOptions");
120}
121
122//! @note Uses `PetscErrorCode` *intentionally*.
124 PetscErrorCode ierr;
125 PC pc, sub_pc;
126
127 // Set parameters equivalent to
128 // -ksp_type gmres -ksp_norm_type unpreconditioned -ksp_pc_side right -pc_type asm -sub_pc_type lu
129
130 ierr = KSPSetType(m_KSP, KSPGMRES);
131 PISM_CHK(ierr, "KSPSetType");
132
133 ierr = KSPSetOperators(m_KSP, m_A, m_A);
134 PISM_CHK(ierr, "KSPSetOperators");
135
136 // Switch to using the "unpreconditioned" norm.
137 ierr = KSPSetNormType(m_KSP, KSP_NORM_UNPRECONDITIONED);
138 PISM_CHK(ierr, "KSPSetNormType");
139
140 // Switch to "right" preconditioning.
141 ierr = KSPSetPCSide(m_KSP, PC_RIGHT);
142 PISM_CHK(ierr, "KSPSetPCSide");
143
144 // Get the PC from the KSP solver:
145 ierr = KSPGetPC(m_KSP, &pc);
146 PISM_CHK(ierr, "KSPGetPC");
147
148 // Set the PC type:
149 ierr = PCSetType(pc, PCASM);
150 PISM_CHK(ierr, "PCSetType");
151
152 // Set the sub-KSP object to "preonly"
153 KSP *sub_ksp;
154 ierr = PCSetUp(pc);
155 PISM_CHK(ierr, "PCSetUp");
156
157 ierr = PCASMGetSubKSP(pc, NULL, NULL, &sub_ksp);
158 PISM_CHK(ierr, "PCASMGetSubKSP");
159
160 ierr = KSPSetType(*sub_ksp, KSPPREONLY);
161 PISM_CHK(ierr, "KSPSetType");
162
163 // Set the PC of the sub-KSP to "LU".
164 ierr = KSPGetPC(*sub_ksp, &sub_pc);
165 PISM_CHK(ierr, "KSPGetPC");
166
167 ierr = PCSetType(sub_pc, PCLU);
168 PISM_CHK(ierr, "PCSetType");
169
170 // Let the user override all this:
171 // Process options:
172 ierr = KSPSetFromOptions(m_KSP);
173 PISM_CHK(ierr, "KSPSetFromOptions");
174}
175
178
179 m_log->message(2, " [using the KSP-based finite difference implementation]\n");
180
181 int nuh_viewer_size =
182 static_cast<int>(m_config->get_number("stress_balance.ssa.fd.nuH_viewer_size"));
183
184 if (nuh_viewer_size > 0) {
185 m_nuh_viewer = std::make_shared<petsc::Viewer>(m_grid->com, "nuH", nuh_viewer_size,
186 m_grid->Lx(), m_grid->Ly());
187 }
188
189 if (m_config->get_flag("stress_balance.calving_front_stress_bc")) {
190 m_log->message(2, " using PISM-PIK calving-front stress boundary condition ...\n");
191 }
192
195}
196
197void SSAFD::assemble_matrix(const Inputs &inputs, const array::Vector1 &velocity,
198 const array::Staggered1 &nuH, const array::CellType1 &cell_type, Mat A) {
201 m_basal_sliding_law, velocity.array(), nuH, cell_type, &A, nullptr);
202}
203
204//! \brief Compute the vertically-averaged horizontal velocity from the shallow
205//! shelf approximation.
206/*!
207This is the main procedure in the SSAFD. It manages the nonlinear solve process
208and the Picard iteration.
209
210The outer loop (over index `k`) is the nonlinear iteration. In this loop the effective
211viscosity is computed by compute_nuH() and then the linear system is
212set up and solved.
213
214Specifically, we call the PETSc procedure KSPSolve() to solve the linear system.
215Solving the linear system is also a loop, an iteration, but it occurs
216inside KSPSolve(). The user has full control of the KSP solve through the PETSc
217interface. The default choicess for KSP type `-ksp_type` and preconditioner type
218`-pc_type` are GMRES(30) for the former and block Jacobi with ILU on the
219blocks for the latter. The defaults usually work. These choices are important
220but poorly understood. The eigenvalues of the linearized
221SSA vary with ice sheet geometry and temperature in ways that are not well-studied.
222Nonetheless these eigenvalues determine the convergence of
223this (inner) linear iteration. A well-chosen preconditioner can put the eigenvalues
224in the right place so that the KSP can converge quickly.
225
226The preconditioner will behave differently on different numbers of
227processors. If the user wants the results of SSA calculations to be
228independent of the number of processors, then `-pc_type none` could
229be used, but performance will be poor.
230
231If you want to test different KSP methods, it may be helpful to see how many
232iterations were necessary. Use `-ksp_monitor`.
233Initial testing implies that CGS takes roughly half the iterations of
234GMRES(30), but is not significantly faster because the iterations are each
235roughly twice as slow. The outputs of PETSc options `-ksp_monitor_singular_value`,
236`-ksp_compute_eigenvalues` and `-ksp_plot_eigenvalues -draw_pause N`
237(the last holds plots for N seconds) may be useful to diagnose.
238
239The outer loop terminates when the effective viscosity times thickness
240is no longer changing much, according to the tolerance set by the
241option `-ssafd_picard_rtol`. The outer loop also terminates when a maximum
242number of iterations is exceeded. We save the velocity from the last
243time step in order to have a better estimate of the effective
244viscosity than the u=v=0 result.
245
246In truth there is an "outer outer" loop (over index `l`). This attempts to
247over-regularize the effective viscosity if the nonlinear iteration (the "outer"
248loop over `k`) is not converging with the default regularization. The same
249over-regularization is attempted if the KSP object reports that it has not
250converged.
251
252(An alternative for recovery in the KSP diverged case, suggested by Jed, is to
253revert to a direct linear solve, either for the whole domain (not scalable) or
254on the subdomains. This recovery alternative requires a more nontrivial choice
255but it may be worthwhile. Note the user can already do `-pc_type asm
256-sub_pc_type lu` at the command line, forcing subdomain direct solves.)
257
258FIXME: update this doxygen comment
259*/
260void SSAFD::solve(const Inputs &inputs) {
261
262 // These computations do not depend on the solution, so they need to
263 // be done only once.
264 initialize_iterations(inputs);
265
266 // Store away old SSA velocity (it might be needed in case a solver
267 // fails).
269
270 for (unsigned int k = 0; k < 3; ++k) {
271 try {
272 if (k == 0) {
273 // default strategy
274 picard_iteration(inputs, m_config->get_number("stress_balance.ssa.epsilon"), 1.0);
275
276 break;
277 }
278 if (k == 1) {
279 // try underrelaxing the iteration
280 const double underrelax =
281 m_config->get_number("stress_balance.ssa.fd.nuH_iter_failure_underrelaxation");
282 m_log->message(
283 1, " re-trying with effective viscosity under-relaxation (parameter = %.2f) ...\n",
284 underrelax);
285 picard_iteration(inputs, m_config->get_number("stress_balance.ssa.epsilon"), underrelax);
286
287 break;
288 }
289 if (k == 2) {
290 // try over-regularization
292
293 break;
294 }
295
296 // if we reached this, then all strategies above failed
297 write_system_petsc("all_strategies_failed");
298 throw RuntimeError(PISM_ERROR_LOCATION, "all SSAFD strategies failed");
299 } catch (PicardFailure &f) {
300 // proceed to the next strategy
301 }
302 }
303
304 if (m_config->get_flag("stress_balance.ssa.fd.extrapolate_at_margins")) {
306 }
307
308 // Post-process velocities if the user asked for it:
309 if (m_config->get_flag("stress_balance.ssa.fd.brutal_sliding")) {
310 const double brutal_sliding_scaleFactor =
311 m_config->get_number("stress_balance.ssa.fd.brutal_sliding_scale");
312 m_velocity.scale(brutal_sliding_scaleFactor);
313
315 }
316}
317
318void SSAFD::picard_iteration(const Inputs &inputs, double nuH_regularization,
319 double nuH_iter_failure_underrelax) {
320
322 // Give BJACOBI another shot if we haven't tried it enough yet
323
324 try {
326 picard_manager(inputs, nuH_regularization, nuH_iter_failure_underrelax);
327
328 } catch (KSPFailure &f) {
329
331
332 m_log->message(1, " re-trying using the Additive Schwarz preconditioner...\n");
333
334 pc_setup_asm();
335
337
338 picard_manager(inputs, nuH_regularization, nuH_iter_failure_underrelax);
339 }
340
341 } else {
342 // otherwise use ASM
343 pc_setup_asm();
344
345 picard_manager(inputs, nuH_regularization, nuH_iter_failure_underrelax);
346 }
347}
348
349//! \brief Manages the Picard iteration loop.
350void SSAFD::picard_manager(const Inputs &inputs, double nuH_regularization,
351 double nuH_iter_failure_underrelax) {
352 PetscErrorCode ierr;
353 // ksp_iterations should be a PetscInt because it is used in the
354 // KSPGetIterationNumber() call below
355 PetscInt ksp_iterations, ksp_iterations_total = 0, outer_iterations;
356 KSPConvergedReason reason;
357
358 int max_iterations =
359 static_cast<int>(m_config->get_number("stress_balance.ssa.fd.max_iterations"));
360 double ssa_relative_tolerance =
361 m_config->get_number("stress_balance.ssa.fd.relative_convergence");
362 bool verbose = m_log->get_threshold() >= 2, very_verbose = m_log->get_threshold() > 2;
363
364 // set the initial guess:
366
367 m_stdout_ssa.clear();
368
369 {
372 nuH_regularization, m_nuH);
373 }
374
375 if (m_nuh_viewer != nullptr) {
377 }
378
379 // outer loop
380 for (int k = 0; k < max_iterations; ++k) {
381
382 if (very_verbose) {
383 m_stdout_ssa += pism::printf(" %2d:", k);
384 }
385
386 // assemble (or re-assemble) matrix, which depends on updated viscosity
388
389 if (very_verbose) {
390 m_stdout_ssa += "A:";
391 }
392
393 // Call PETSc to solve linear system by iterative method; "inner iteration":
394 ierr = KSPSetOperators(m_KSP, m_A, m_A);
395 PISM_CHK(ierr, "KSPSetOperator");
396
397 ierr = KSPSolve(m_KSP, m_rhs.vec(), m_velocity_global.vec());
398 PISM_CHK(ierr, "KSPSolve");
399
400 // Check if diverged; report to standard out about iteration
401 ierr = KSPGetConvergedReason(m_KSP, &reason);
402 PISM_CHK(ierr, "KSPGetConvergedReason");
403
404 if (reason < 0) {
405 // KSP diverged
406 m_log->message(1, "PISM WARNING: KSPSolve() reports 'diverged'; reason = %d = '%s'\n",
407 reason, KSPConvergedReasons[reason]);
408
409 write_system_petsc("kspdivergederror");
410
411 // Tell the caller that we failed. (The caller might try again,
412 // though.)
413 throw KSPFailure(KSPConvergedReasons[reason]);
414 }
415
416 // report on KSP success; the "inner" iteration is done
417 ierr = KSPGetIterationNumber(m_KSP, &ksp_iterations);
418 PISM_CHK(ierr, "KSPGetIterationNumber");
419
420 ksp_iterations_total += ksp_iterations;
421
422 if (very_verbose) {
423 m_stdout_ssa += pism::printf("S:%d,%d: ", (int)ksp_iterations, reason);
424 }
425
426 // limit ice speed
427 {
428 auto max_speed = m_config->get_number("stress_balance.ssa.fd.max_speed", "m second-1");
429 int high_speed_counter = 0;
430
432
433 for (auto p = m_grid->points(); p; p.next()) {
434 const int i = p.i(), j = p.j();
435
436 auto speed = m_velocity_global(i, j).magnitude();
437
438 if (speed > max_speed) {
439 m_velocity_global(i, j) *= max_speed / speed;
440 high_speed_counter += 1;
441 }
442 }
443
444 high_speed_counter = GlobalSum(m_grid->com, high_speed_counter);
445
446 if (high_speed_counter > 0) {
447 m_log->message(2, " SSA speed was capped at %d locations\n", high_speed_counter);
448 }
449 }
450
451 // Communicate so that we have stencil width for evaluation of effective
452 // viscosity on next "outer" iteration (and geometry etc. if done):
453 // Note that copy_from() updates ghosts of m_velocity.
455
456 // update viscosity
457 double nuH_norm = 0.0, nuH_norm_change = 0.0;
458 {
459 // in preparation of measuring change of effective viscosity:
461
462 {
465 nuH_regularization, m_nuH);
466 }
467
468 if (nuH_iter_failure_underrelax != 1.0) {
469 m_nuH.scale(nuH_iter_failure_underrelax);
470 m_nuH.add(1.0 - nuH_iter_failure_underrelax, m_nuH_old);
471 }
472 auto norm = compute_nuH_norm(m_nuH, m_nuH_old);
473 nuH_norm = norm[0];
474 nuH_norm_change = norm[1];
475 }
476
477 if (m_nuh_viewer != nullptr) {
479 }
480
481 if (very_verbose) {
482 m_stdout_ssa += pism::printf("|nu|_2, |Delta nu|_2/|nu|_2 = %10.3e %10.3e\n", nuH_norm,
483 nuH_norm_change / nuH_norm);
484
485 // assume that high verbosity shows interest in immediate
486 // feedback about SSA iterations
487 m_log->message(2, m_stdout_ssa);
488
489 m_stdout_ssa.clear();
490 }
491
492 outer_iterations = k + 1;
493
494 // check for viscosity convergence
495 if (nuH_norm == 0 || nuH_norm_change / nuH_norm < ssa_relative_tolerance) {
496 goto done;
497 }
498 } // outer loop (k)
499
500 // If we're here, it means that we exceeded max_iterations and still
501 // failed.
502
503 throw PicardFailure(pism::printf("effective viscosity not converged after %d iterations\n"
504 "with nuH_regularization=%8.2e.",
505 max_iterations, nuH_regularization));
506
507done:
508
509 if (very_verbose) {
510 auto tempstr =
511 pism::printf("... =%5d outer iterations, ~%3.1f KSP iterations each\n",
512 (int)outer_iterations, ((double)ksp_iterations_total) / outer_iterations);
513 m_stdout_ssa += tempstr;
514 } else if (verbose) {
515 // at default verbosity, just record last nuH_norm_change and iterations
516 auto tempstr =
517 pism::printf("%5d outer iterations, ~%3.1f KSP iterations each\n", (int)outer_iterations,
518 ((double)ksp_iterations_total) / outer_iterations);
519
520 m_stdout_ssa += tempstr;
521 }
522
523 if (verbose) {
524 m_stdout_ssa = " SSA: " + m_stdout_ssa;
525 }
526}
527
528//! Old SSAFD recovery strategy: increase the SSA regularization parameter.
530 // this has no units; epsilon goes up by this ratio when previous value failed
531 const double DEFAULT_EPSILON_MULTIPLIER_SSA = 4.0;
532 double nuH_regularization = m_config->get_number("stress_balance.ssa.epsilon");
533 unsigned int k = 0, max_tries = 5;
534
535 if (nuH_regularization <= 0.0) {
536 throw PicardFailure("will not attempt over-regularization of nuH\n"
537 "because nuH_regularization == 0.0.");
538 }
539
540 while (k < max_tries) {
542 m_log->message(1, " re-trying with nuH_regularization multiplied by %8.2f...\n",
543 DEFAULT_EPSILON_MULTIPLIER_SSA);
544
545 nuH_regularization *= DEFAULT_EPSILON_MULTIPLIER_SSA;
546
547 try {
548 // 1.0 is the under-relaxation parameter
549 picard_iteration(inputs, nuH_regularization, 1.0);
550 // if this call succeeded, stop over-regularizing
551 break;
552 } catch (PicardFailure &f) {
553 k += 1;
554
555 if (k == max_tries) {
556 throw PicardFailure("exceeded the max. number of nuH over-regularization attempts");
557 }
558 }
559 }
560}
561
562//! \brief Compute the norm of `nu H` and the norm of the change in `nu H`.
563/*!
564Verification and PST experiments
565suggest that an \f$L^1\f$ criterion for convergence is best. For verification
566there seems to be little difference, presumably because the solutions are smooth
567and the norms are roughly equivalent on a subspace of smooth functions. For PST,
568the \f$L^1\f$ criterion gives faster runs with essentially the same results.
569Presumably that is because rapid (temporal and spatial) variation in
570\f$\nu H\f$ occurs at margins, occupying very few horizontal grid cells.
571For the significant (e.g.~in terms of flux) parts of the flow, it is o.k. to ignore
572a bit of bad behavior at these few places, and \f$L^1\f$ ignores it more than
573\f$L^2\f$ (much less \f$L^\infty\f$, which might not work at all).
574
575Note: clobbers `nuH_old` -- it is used to compute the change in `nu H`.
576 */
577std::array<double, 2> SSAFD::compute_nuH_norm(const array::Staggered &nuH,
578 array::Staggered &nuH_old) {
579
580 const double area = m_grid->cell_area();
581 const NormType MY_NORM = NORM_1;
582
583 // Test for change in nu
584 nuH_old.add(-1, nuH);
585
586 std::vector<double> nuNorm = nuH.norm(MY_NORM), nuChange = nuH_old.norm(MY_NORM);
587
588 nuChange[0] *= area;
589 nuChange[1] *= area;
590 nuNorm[0] *= area;
591 nuNorm[1] *= area;
592
593 double norm_change = sqrt(PetscSqr(nuChange[0]) + PetscSqr(nuChange[1]));
594 double norm = sqrt(PetscSqr(nuNorm[0]) + PetscSqr(nuNorm[1]));
595
596 return { norm, norm_change };
597}
598
599
600//! Update the nuH viewer, which shows log10(nu H).
602
603 if (not m_nuh_viewer) {
604 return;
605 }
606
607 array::Scalar tmp(m_grid, "nuH");
608 tmp.metadata(0)
609 .long_name("log10 of (viscosity * thickness)")
610 .units("Pa s m");
611
612 array::AccessScope list{&nuH, &tmp};
613
614 for (auto p = m_grid->points(); p; p.next()) {
615 const int i = p.i(), j = p.j();
616
617 double avg_nuH = 0.5 * (nuH(i,j,0) + nuH(i,j,1));
618 if (avg_nuH > 1.0e14) {
619 tmp(i,j) = log10(avg_nuH);
620 } else {
621 tmp(i,j) = 14.0;
622 }
623 }
624
625 tmp.view({m_nuh_viewer});
626}
627
628void SSAFD::write_system_petsc(const std::string &namepart) {
629 PetscErrorCode ierr;
630
631 // write a file with a fixed filename; avoid zillions of files
632 std::string filename = "SSAFD_" + namepart + ".petsc";
633 m_log->message(1,
634 " writing linear system to PETSc binary file %s ...\n", filename.c_str());
635
636 petsc::Viewer viewer; // will be destroyed automatically
637 ierr = PetscViewerBinaryOpen(m_grid->com, filename.c_str(), FILE_MODE_WRITE,
638 viewer.rawptr());
639 PISM_CHK(ierr, "PetscViewerBinaryOpen");
640
641 ierr = MatView(m_A, viewer);
642 PISM_CHK(ierr, "MatView");
643
644 ierr = VecView(m_rhs.vec(), viewer);
645 PISM_CHK(ierr, "VecView");
646}
647
648} // end of namespace stressbalance
649} // end of namespace pism
const Config::ConstPtr m_config
configuration database used by this component
Definition Component.hh:158
const Logger::ConstPtr m_log
logger (for easy access)
Definition Component.hh:162
const std::shared_ptr< const Grid > m_grid
grid used by this component
Definition Component.hh:156
array::CellType2 cell_type
Definition Geometry.hh:55
array::Scalar2 ice_thickness
Definition Geometry.hh:51
VariableMetadata & long_name(const std::string &input)
VariableMetadata & units(const std::string &input)
T * rawptr()
Definition Wrapper.hh:39
Makes sure that we call begin_access() and end_access() for all accessed array::Arrays.
Definition Array.hh:64
void copy_from(const Array2D< T > &source)
Definition Array2D.hh:73
petsc::Vec & vec() const
Definition Array.cc:310
void scale(double alpha)
Result: v <- v * alpha. Calls VecScale.
Definition Array.cc:224
void view(std::vector< std::shared_ptr< petsc::Viewer > > viewers) const
View a 2D vector field using existing PETSc viewers.
Definition Array.cc:1168
std::shared_ptr< petsc::DM > dm() const
Definition Array.cc:324
void add(double alpha, const Array &x)
Result: v <- v + alpha * x. Calls VecAXPY.
Definition Array.cc:206
std::vector< double > norm(int n) const
Computes the norm of all the components of an Array.
Definition Array.cc:668
void update_ghosts()
Updates ghost points.
Definition Array.cc:615
SpatialVariableMetadata & metadata(unsigned int N=0)
Returns a reference to the SpatialVariableMetadata object containing metadata for the compoment N.
Definition Array.cc:476
void copy_from(const array::Staggered &input)
Definition Staggered.cc:42
A class for storing and accessing internal staggered-grid 2D fields. Uses dof=2 storage....
Definition Staggered.hh:37
const array::Scalar * basal_yield_stress
const array::Scalar * bc_mask
array::Staggered m_hardness
ice hardness
Definition SSAFDBase.hh:116
void fd_operator(const Geometry &geometry, const array::Scalar *bc_mask, double bc_scaling, const array::Scalar &basal_yield_stress, IceBasalResistancePlasticLaw *basal_sliding_law, const pism::Vector2d *const *velocity, const array::Staggered1 &nuH, const array::CellType1 &cell_type, Mat *A, Vector2d **Ax) const
Assemble the left-hand side matrix for the KSP-based, Picard iteration, and finite difference impleme...
Definition SSAFDBase.cc:549
void initialize_iterations(const Inputs &inputs)
array::Staggered1 m_nuH
viscosity times thickness
Definition SSAFDBase.hh:119
const double m_bc_scaling
scaling used for diagonal matrix elements at Dirichlet BC locations
Definition SSAFDBase.hh:130
array::Vector m_rhs
right hand side
Definition SSAFDBase.hh:124
void compute_nuH(const array::Scalar1 &ice_thickness, const array::CellType2 &cell_type, const pism::Vector2d *const *velocity, const array::Staggered &hardness, double nuH_regularization, array::Staggered1 &result)
KSPFailure(const char *reason)
Definition SSAFD.cc:36
PicardFailure(const std::string &message)
Definition SSAFD.cc:41
void assemble_matrix(const Inputs &inputs, const array::Vector1 &velocity, const array::Staggered1 &nuH, const array::CellType1 &cell_type, Mat A)
Definition SSAFD.cc:197
void init_impl()
Initialize a generic regular-grid SSA solver.
Definition SSAFD.cc:176
void picard_strategy_regularization(const Inputs &inputs)
Old SSAFD recovery strategy: increase the SSA regularization parameter.
Definition SSAFD.cc:529
std::shared_ptr< petsc::Viewer > m_nuh_viewer
Definition SSAFD.hh:79
array::Vector1 m_velocity_old
Definition SSAFD.hh:74
SSAFD(std::shared_ptr< const Grid > g, bool regional_mode)
Definition SSAFD.cc:54
void solve(const Inputs &inputs)
Compute the vertically-averaged horizontal velocity from the shallow shelf approximation.
Definition SSAFD.cc:260
void write_system_petsc(const std::string &namepart)
Definition SSAFD.cc:628
unsigned int m_default_pc_failure_max_count
Definition SSAFD.hh:77
void update_nuH_viewers(const array::Staggered &nuH)
Update the nuH viewer, which shows log10(nu H).
Definition SSAFD.cc:601
std::array< double, 2 > compute_nuH_norm(const array::Staggered &nuH, array::Staggered &nuH_old)
Compute the norm of nu H and the norm of the change in nu H.
Definition SSAFD.cc:577
void picard_iteration(const Inputs &inputs, double nuH_regularization, double nuH_iter_failure_underrelax)
Definition SSAFD.cc:318
void picard_manager(const Inputs &inputs, double nuH_regularization, double nuH_iter_failure_underrelax)
Manages the Picard iteration loop.
Definition SSAFD.cc:350
unsigned int m_default_pc_failure_count
Definition SSAFD.hh:76
array::Staggered1 m_nuH_old
Definition SSAFD.hh:69
array::Vector m_velocity_global
Definition SSA.hh:133
void extrapolate_velocity(const array::CellType1 &cell_type, array::Vector1 &velocity) const
Definition SSA.cc:156
std::string m_stdout_ssa
Definition SSA.hh:131
virtual void init_impl()
Initialize a generic regular-grid SSA solver.
Definition SSA.cc:101
const array::Vector1 & velocity() const
Get the thickness-advective 2D velocity.
IceBasalResistancePlasticLaw * m_basal_sliding_law
#define PISM_CHK(errcode, name)
#define PISM_ERROR_LOCATION
std::string printf(const char *format,...)
static const double k
Definition exactTestP.cc:42
void GlobalSum(MPI_Comm comm, double *local, double *result, int count)