opm-simulators
Loading...
Searching...
No Matches
fvbaselinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef EWOMS_FV_BASE_LINEARIZER_HH
29#define EWOMS_FV_BASE_LINEARIZER_HH
30
31#include <dune/common/fmatrix.hh>
32#include <dune/common/fvector.hh>
33#include <dune/common/version.hh>
34
35#include <dune/grid/common/gridenums.hh>
36
37#include <opm/common/Exceptions.hpp>
38#include <opm/common/TimingMacros.hpp>
39
40#include <opm/grid/utility/SparseTable.hpp>
41
42#include <opm/material/common/MathToolbox.hpp>
43
47
51
52#include <cstddef>
53#include <exception> // current_exception, rethrow_exception
54#include <iostream>
55#include <map>
56#include <memory>
57#include <mutex>
58#include <set>
59#include <vector>
60
61namespace Opm {
62
63// forward declarations
64template<class TypeTag>
66
76template<class TypeTag>
77class FvBaseLinearizer
78{
90
98
100
101 using Toolbox = MathToolbox<Evaluation>;
102
103 using Element = typename GridView::template Codim<0>::Entity;
104 using ElementIterator = typename GridView::template Codim<0>::Iterator;
105
106 using Vector = GlobalEqVector;
107
108 using IstlMatrix = typename SparseMatrixAdapter::IstlMatrix;
109
112
113 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
114 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
115
116 static constexpr bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
117
119
120public:
121 FvBaseLinearizer() = default;
122
123 // copying the linearizer is not a good idea
124 FvBaseLinearizer(const FvBaseLinearizer&) = delete;
125
129 static void registerParameters()
130 {}
131
141 void init(Simulator& simulator)
142 {
143 simulatorPtr_ = &simulator;
144 eraseMatrix();
145 elementCtx_.clear();
146 fullDomain_ = std::make_unique<FullDomain>(simulator.gridView());
147 }
148
157 {
158 jacobian_.reset();
159 }
160
171 {
174 }
175
187 {
188 linearizeDomain(*fullDomain_);
189 }
190
191 template <class SubDomainType>
192 void linearizeDomain(const SubDomainType& domain)
193 {
194 OPM_TIMEBLOCK(linearizeDomain);
195 // we defer the initialization of the Jacobian matrix until here because the
196 // auxiliary modules usually assume the problem, model and grid to be fully
197 // initialized...
198 if (!jacobian_) {
199 initFirstIteration_();
200 }
201
202 // Called here because it is no longer called from linearize_().
203 if (problem_().iterationContext().inLocalSolve()) {
204 resetSystem_(domain);
205 }
206 else {
207 resetSystem_();
208 }
209
210 int succeeded;
211 try {
212 linearize_(domain);
213 succeeded = 1;
214 }
215 catch (const std::exception& e)
216 {
217 std::cout << "rank " << simulator_().gridView().comm().rank()
218 << " caught an exception while linearizing:" << e.what()
219 << "\n" << std::flush;
220 succeeded = 0;
221 }
222 catch (...)
223 {
224 std::cout << "rank " << simulator_().gridView().comm().rank()
225 << " caught an exception while linearizing"
226 << "\n" << std::flush;
227 succeeded = 0;
228 }
229 succeeded = simulator_().gridView().comm().min(succeeded);
230
231 if (!succeeded) {
232 throw NumericalProblem("A process did not succeed in linearizing the system");
233 }
234 }
235
236 void finalize()
237 { jacobian_->finalize(); }
238
244 {
245 OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
246 // flush possible local caches into matrix structure
247 jacobian_->commit();
248
249 auto& model = model_();
250 const auto& comm = simulator_().gridView().comm();
251 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
252 bool succeeded = true;
253 try {
254 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
255 }
256 catch (const std::exception& e) {
257 succeeded = false;
258
259 std::cout << "rank " << simulator_().gridView().comm().rank()
260 << " caught an exception while linearizing:" << e.what()
261 << "\n" << std::flush;
262 }
263
264 succeeded = comm.min(succeeded);
265
266 if (!succeeded) {
267 throw NumericalProblem("linearization of an auxiliary equation failed");
268 }
269 }
270 }
271
275 const SparseMatrixAdapter& jacobian() const
276 { return *jacobian_; }
277
278 SparseMatrixAdapter& jacobian()
279 { return *jacobian_; }
280
284 const GlobalEqVector& residual() const
285 { return residual_; }
286
287 GlobalEqVector& residual()
288 { return residual_; }
289
290 void setLinearizationType(LinearizationType linearizationType){
291 linearizationType_ = linearizationType;
292 };
293
294 const LinearizationType& getLinearizationType() const
295 { return linearizationType_; }
296
297 void updateDiscretizationParameters()
298 {
299 // This linearizer stores no such parameters.
300 }
301
302 void updateBoundaryConditionData()
303 {
304 // This linearizer stores no such data.
305 }
306
307 void updateFlowsInfo()
308 {
309 // This linearizer stores no such data.
310 }
311
317 const std::map<unsigned, Constraints>& constraintsMap() const
318 { return constraintsMap_; }
319
325 const auto& getFlowsInfo() const
326 { return flowsInfo_; }
327
333 const auto& getFloresInfo() const
334 { return floresInfo_; }
335
342 const auto& getVelocityInfo() const
343 { return velocityInfo_; }
344
345 template <class SubDomainType>
346 void resetSystem_(const SubDomainType& domain)
347 {
348 if (!jacobian_) {
349 initFirstIteration_();
350 }
351
352 // loop over selected elements
353 using GridViewType = decltype(domain.view);
354 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
355#ifdef _OPENMP
356#pragma omp parallel
357#endif
358 {
359 const unsigned threadId = ThreadManager::threadId();
360 auto elemIt = threadedElemIt.beginParallel();
361 MatrixBlock zeroBlock;
362 zeroBlock = 0.0;
363 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
364 const Element& elem = *elemIt;
365 ElementContext& elemCtx = *elementCtx_[threadId];
366 elemCtx.updatePrimaryStencil(elem);
367 // Set to zero the relevant residual and jacobian parts.
368 for (unsigned primaryDofIdx = 0;
369 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
370 ++primaryDofIdx)
371 {
372 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
373 residual_[globI] = 0.0;
374 jacobian_->clearRow(globI, 0.0);
375 }
376 }
377 }
378 }
379
380private:
381 Simulator& simulator_()
382 { return *simulatorPtr_; }
383
384 const Simulator& simulator_() const
385 { return *simulatorPtr_; }
386
387 Problem& problem_()
388 { return simulator_().problem(); }
389
390 const Problem& problem_() const
391 { return simulator_().problem(); }
392
393 Model& model_()
394 { return simulator_().model(); }
395
396 const Model& model_() const
397 { return simulator_().model(); }
398
399 const GridView& gridView_() const
400 { return problem_().gridView(); }
401
402 const ElementMapper& elementMapper_() const
403 { return model_().elementMapper(); }
404
405 const DofMapper& dofMapper_() const
406 { return model_().dofMapper(); }
407
408 void initFirstIteration_()
409 {
410 // initialize the BCRS matrix for the Jacobian of the residual function
411 createMatrix_();
412
413 // initialize the Jacobian matrix and the vector for the residual function
414 residual_.resize(model_().numTotalDof());
415 resetSystem_();
416
417 // create the per-thread context objects
418 elementCtx_.clear();
419 elementCtx_.reserve(ThreadManager::maxThreads());
420 for (unsigned threadId = 0; threadId != ThreadManager::maxThreads(); ++ threadId) {
421 elementCtx_.push_back(std::make_unique<ElementContext>(simulator_()));
422 }
423 }
424
425 // Construct the BCRS matrix for the Jacobian of the residual function
426 void createMatrix_()
427 {
428 const auto& model = model_();
429 Stencil stencil(gridView_(), model_().dofMapper());
430
431 // for the main model, find out the global indices of the neighboring degrees of
432 // freedom of each primary degree of freedom
433 sparsityPattern_.clear();
434 sparsityPattern_.resize(model.numTotalDof());
435
436 for (const auto& elem : elements(gridView_())) {
437 stencil.update(elem);
438
439 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
440 const unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
441
442 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
443 const unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
444 sparsityPattern_[myIdx].insert(neighborIdx);
445 }
446 }
447 }
448
449 // add the additional neighbors and degrees of freedom caused by the auxiliary
450 // equations
451 const std::size_t numAuxMod = model.numAuxiliaryModules();
452 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx) {
453 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern_);
454 }
455
456 // allocate raw matrix
457 jacobian_ = std::make_unique<SparseMatrixAdapter>(simulator_());
458
459 // create matrix structure based on sparsity pattern
460 jacobian_->reserve(sparsityPattern_);
461 }
462
463 // reset the global linear system of equations.
464 void resetSystem_()
465 {
466 residual_ = 0.0;
467 // zero all matrix entries
468 jacobian_->clear();
469 }
470
471 // query the problem for all constraint degrees of freedom. note that this method is
472 // quite involved and is thus relatively slow.
473 void updateConstraintsMap_()
474 {
475 if (!enableConstraints_()) {
476 // constraints are not explictly enabled, so we don't need to consider them!
477 return;
478 }
479
480 constraintsMap_.clear();
481
482 // loop over all elements...
483 ThreadedEntityIterator<GridView, /*codim=*/0> threadedElemIt(gridView_());
484#ifdef _OPENMP
485#pragma omp parallel
486#endif
487 {
488 const unsigned threadId = ThreadManager::threadId();
489 ElementIterator elemIt = threadedElemIt.beginParallel();
490 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
491 // create an element context (the solution-based quantities are not
492 // available here!)
493 const Element& elem = *elemIt;
494 ElementContext& elemCtx = *elementCtx_[threadId];
495 elemCtx.updateStencil(elem);
496
497 // check if the problem wants to constrain any degree of the current
498 // element's freedom. if yes, add the constraint to the map.
499 for (unsigned primaryDofIdx = 0;
500 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
501 ++primaryDofIdx)
502 {
503 Constraints constraints;
504 elemCtx.problem().constraints(constraints,
505 elemCtx,
506 primaryDofIdx,
507 /*timeIdx=*/0);
508 if (constraints.isActive()) {
509 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
510 constraintsMap_[globI] = constraints;
511 continue;
512 }
513 }
514 }
515 }
516 }
517
518 // linearize the whole or part of the system
519 template <class SubDomainType>
520 void linearize_(const SubDomainType& domain)
521 {
522 OPM_TIMEBLOCK(linearize_);
523
524 // We do not call resetSystem_() here, since that will set
525 // the full system to zero, not just our part.
526 // Instead, that must be called before starting the linearization.
527
528 // before the first iteration of each time step, we need to update the
529 // constraints. (i.e., we assume that constraints can be time dependent, but they
530 // can't depend on the solution.)
531 if (problem_().iterationContext().isFirstGlobalIteration()) {
532 updateConstraintsMap_();
533 }
534
535 applyConstraintsToSolution_();
536
537 // to avoid a race condition if two threads handle an exception at the same time,
538 // we use an explicit lock to control access to the exception storage object
539 // amongst thread-local handlers
540 std::mutex exceptionLock;
541
542 // storage to any exception that needs to be bridged out of the
543 // parallel block below. initialized to null to indicate no exception
544 std::exception_ptr exceptionPtr = nullptr;
545
546 // relinearize the elements...
547 using GridViewType = decltype(domain.view);
548 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
549#ifdef _OPENMP
550#pragma omp parallel
551#endif
552 {
553 auto elemIt = threadedElemIt.beginParallel();
554 auto nextElemIt = elemIt;
555 try {
556 for (; !threadedElemIt.isFinished(elemIt); elemIt = nextElemIt) {
557 // give the model and the problem a chance to prefetch the data required
558 // to linearize the next element, but only if we need to consider it
559 nextElemIt = threadedElemIt.increment();
560 if (!threadedElemIt.isFinished(nextElemIt)) {
561 const auto& nextElem = *nextElemIt;
562 if (linearizeNonLocalElements ||
563 nextElem.partitionType() == Dune::InteriorEntity)
564 {
565 model_().prefetch(nextElem);
566 problem_().prefetch(nextElem);
567 }
568 }
569
570 const auto& elem = *elemIt;
571 if (!linearizeNonLocalElements && elem.partitionType() != Dune::InteriorEntity) {
572 continue;
573 }
574
575 linearizeElement_(elem);
576 }
577 }
578 // If an exception occurs in the parallel block, it won't escape the
579 // block; terminate() is called instead of a handler outside! hence, we
580 // tuck any exceptions that occur away in the pointer. If an exception
581 // occurs in more than one thread at the same time, we must pick one of
582 // them to be rethrown as we cannot have two active exceptions at the
583 // same time. This solution essentially picks one at random. This will
584 // only be a problem if two different kinds of exceptions are thrown, for
585 // instance if one thread experiences a (recoverable) numerical issue
586 // while another is out of memory.
587 catch(...) {
588 std::lock_guard<std::mutex> take(exceptionLock);
589 exceptionPtr = std::current_exception();
590 threadedElemIt.setFinished();
591 }
592 } // parallel block
593
594 // after reduction from the parallel block, exceptionPtr will point to
595 // a valid exception if one occurred in one of the threads; rethrow
596 // it here to let the outer handler take care of it properly
597 if (exceptionPtr) {
598 std::rethrow_exception(exceptionPtr);
599 }
600
601 applyConstraintsToLinearization_();
602 }
603
604 // linearize an element in the interior of the process' grid partition
605 template <class ElementType>
606 void linearizeElement_(const ElementType& elem)
607 {
608 const unsigned threadId = ThreadManager::threadId();
609
610 ElementContext& elementCtx = *elementCtx_[threadId];
611 auto& localLinearizer = model_().localLinearizer(threadId);
612
613 // the actual work of linearization is done by the local linearizer class
614 localLinearizer.linearize(elementCtx, elem);
615
616 // update the right hand side and the Jacobian matrix
618 globalMatrixMutex_.lock();
619 }
620
621 const std::size_t numPrimaryDof = elementCtx.numPrimaryDof(/*timeIdx=*/0);
622 for (unsigned primaryDofIdx = 0; primaryDofIdx < numPrimaryDof; ++primaryDofIdx) {
623 const unsigned globI = elementCtx.globalSpaceIndex(/*spaceIdx=*/primaryDofIdx, /*timeIdx=*/0);
624
625 // update the right hand side
626 residual_[globI] += localLinearizer.residual(primaryDofIdx);
627
628 // update the global Jacobian matrix
629 for (unsigned dofIdx = 0; dofIdx < elementCtx.numDof(/*timeIdx=*/0); ++dofIdx) {
630 const unsigned globJ = elementCtx.globalSpaceIndex(/*spaceIdx=*/dofIdx, /*timeIdx=*/0);
631
632 jacobian_->addToBlock(globJ, globI, localLinearizer.jacobian(dofIdx, primaryDofIdx));
633 }
634 }
635
637 globalMatrixMutex_.unlock();
638 }
639 }
640
641 // apply the constraints to the solution. (i.e., the solution of constraint degrees
642 // of freedom is set to the value of the constraint.)
643 void applyConstraintsToSolution_()
644 {
645 if (!enableConstraints_()) {
646 return;
647 }
648
649 // TODO: assuming a history size of 2 only works for Euler time discretizations!
650 auto& sol = model_().solution(/*timeIdx=*/0);
651 auto& oldSol = model_().solution(/*timeIdx=*/1);
652
653 for (const auto& constraint : constraintsMap_) {
654 sol[constraint.first] = constraint.second;
655 oldSol[constraint.first] = constraint.second;
656 }
657 }
658
659 // apply the constraints to the linearization. (i.e., for constrain degrees of
660 // freedom the Jacobian matrix maps to identity and the residual is zero)
661 void applyConstraintsToLinearization_()
662 {
663 if (!enableConstraints_()) {
664 return;
665 }
666
667 for (const auto& constraint : constraintsMap_) {
668 // reset the column of the Jacobian matrix
669 // put an identity matrix on the main diagonal of the Jacobian
670 jacobian_->clearRow(constraint.first, Scalar(1.0));
671
672 // make the right-hand side of constraint DOFs zero
673 residual_[constraint.first] = 0.0;
674 }
675 }
676
677 static bool enableConstraints_()
679
680 Simulator* simulatorPtr_{};
681 std::vector<std::unique_ptr<ElementContext>> elementCtx_;
682
683 // The constraint equations (only non-empty if the
684 // EnableConstraints property is true)
685 std::map<unsigned, Constraints> constraintsMap_;
686
687 struct FlowInfo
688 {
689 int faceId;
690 VectorBlock flow;
691 unsigned int nncId;
692 };
693 SparseTable<FlowInfo> flowsInfo_;
694 SparseTable<FlowInfo> floresInfo_;
695
696 struct VelocityInfo
697 {
698 int faceId;
699 VectorBlock velocity;
700 };
701 SparseTable<VelocityInfo> velocityInfo_;
702
703 // the jacobian matrix
704 std::unique_ptr<SparseMatrixAdapter> jacobian_;
705
706 // the right-hand side
707 GlobalEqVector residual_;
708
709 LinearizationType linearizationType_;
710
711 std::mutex globalMatrixMutex_;
712
713 std::vector<std::set<unsigned int>> sparsityPattern_;
714
715 struct FullDomain
716 {
717 explicit FullDomain(const GridView& v) : view (v) {}
718 GridView view;
719 std::vector<bool> interior; // Should remain empty.
720 };
721 // Simple domain object used for full-domain linearization, it allows
722 // us to have the same interface for sub-domain and full-domain work.
723 // Pointer since it must defer construction, due to GridView member.
724 std::unique_ptr<FullDomain> fullDomain_;
725};
726
727} // namespace Opm
728
729#endif
Base class for specifying auxiliary equations.
The base class for the element-centered finite-volume discretization scheme.
Definition ecfvdiscretization.hh:160
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition fvbaselinearizer.hh:325
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition fvbaselinearizer.hh:284
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition fvbaselinearizer.hh:129
void linearize()
Linearize the full system of non-linear equations.
Definition fvbaselinearizer.hh:170
void init(Simulator &simulator)
Initialize the linearizer.
Definition fvbaselinearizer.hh:141
const auto & getVelocityInfo() const
Return constant reference to the velocityInfo.
Definition fvbaselinearizer.hh:342
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition fvbaselinearizer.hh:186
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition fvbaselinearizer.hh:275
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition fvbaselinearizer.hh:333
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition fvbaselinearizer.hh:243
const std::map< unsigned, Constraints > & constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition fvbaselinearizer.hh:317
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition fvbaselinearizer.hh:156
Definition matrixblock.hh:229
Manages the initializing and running of time dependent problems.
Definition simulator.hh:84
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition simulator.hh:246
Simplifies multi-threaded capabilities.
Definition threadmanager.hpp:36
static unsigned maxThreads()
Return the maximum number of threads of the current process.
Definition threadmanager.hpp:66
static unsigned threadId()
Return the index of the current OpenMP thread.
Definition threadmanager.cpp:84
Declare the properties used by the infrastructure code of the finite volume discretizations.
Provides data handles for parallel communication which operate on DOFs.
The common code for the linearizers of non-linear systems of equations.
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition blackoilbioeffectsmodules.hh:45
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition propertysystem.hh:233
ElementType
The types of reference elements available.
Definition vcfvstencil.hh:57
constexpr auto getPropValue()
get the value data member of a property
Definition propertysystem.hh:240
Provides an STL-iterator like interface to iterate over the enties of a GridView in OpenMP threaded a...
Simplifies multi-threaded capabilities.