93 OPM_TIMEBLOCK(prec_construct);
97 if constexpr (std::is_same_v<Comm, Dune::Amg::SequentialInformation>) {
98 mpi_comm = MPI_COMM_SELF;
100 mpi_comm = comm.communicator();
102 MPI_Comm_size(mpi_comm, &size);
103 MPI_Comm_rank(mpi_comm, &rank);
105 assert(size == comm.communicator().size());
106 assert(rank == comm.communicator().rank());
112 HypreInterface::initialize(use_gpu_backend_);
115 solver_ = HypreInterface::createAMGSolver();
116 HypreInterface::setSolverParameters(solver_, prm, use_gpu_backend_);
119 par_info_ = HypreInterface::setupHypreParallelInfo(comm_, A_);
122 sparsity_pattern_ = HypreInterface::setupSparsityPattern(A_, par_info_, par_info_.owner_first);
125 host_arrays_.row_indexes = HypreInterface::computeRowIndexes(
126 A_, sparsity_pattern_.ncols, par_info_.local_dune_to_local_hypre, par_info_.owner_first);
129 host_arrays_.indices.resize(par_info_.N_owned);
130 std::iota(host_arrays_.indices.begin(), host_arrays_.indices.end(), par_info_.dof_offset);
133 if (!par_info_.owner_first) {
134 host_arrays_.continuous_vector_values.resize(par_info_.N_owned);
138 if (use_gpu_backend_) {
139#if HYPRE_USING_CUDA || HYPRE_USING_HIP
140 device_arrays_.ncols_device = hypre_CTAlloc(HYPRE_Int, par_info_.N_owned, HYPRE_MEMORY_DEVICE);
141 device_arrays_.rows_device = hypre_CTAlloc(HYPRE_BigInt, par_info_.N_owned, HYPRE_MEMORY_DEVICE);
142 device_arrays_.cols_device = hypre_CTAlloc(HYPRE_BigInt, sparsity_pattern_.nnz, HYPRE_MEMORY_DEVICE);
143 device_arrays_.row_indexes_device = hypre_CTAlloc(HYPRE_Int, par_info_.N_owned, HYPRE_MEMORY_DEVICE);
144 device_arrays_.indices_device = hypre_CTAlloc(HYPRE_BigInt, par_info_.N_owned, HYPRE_MEMORY_DEVICE);
145 device_arrays_.vector_buffer_device = hypre_CTAlloc(HYPRE_Real, par_info_.N_owned, HYPRE_MEMORY_DEVICE);
150 device_arrays_.matrix_buffer_device = hypre_CTAlloc(HYPRE_Real, A_.nonzeroes(), HYPRE_MEMORY_DEVICE);
153 hypre_TMemcpy(device_arrays_.ncols_device, sparsity_pattern_.ncols.data(), HYPRE_Int, par_info_.N_owned, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
154 hypre_TMemcpy(device_arrays_.rows_device, sparsity_pattern_.rows.data(), HYPRE_BigInt, par_info_.N_owned, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
155 hypre_TMemcpy(device_arrays_.cols_device, sparsity_pattern_.cols.data(), HYPRE_BigInt, sparsity_pattern_.nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
156 hypre_TMemcpy(device_arrays_.row_indexes_device, host_arrays_.row_indexes.data(), HYPRE_Int, par_info_.N_owned, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
157 hypre_TMemcpy(device_arrays_.indices_device, host_arrays_.indices.data(), HYPRE_BigInt, par_info_.N_owned, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
162 A_hypre_ = HypreInterface::createMatrix(par_info_.N_owned, par_info_.dof_offset, comm_);
163 x_hypre_ = HypreInterface::createVector(par_info_.N_owned, par_info_.dof_offset, comm_);
164 b_hypre_ = HypreInterface::createVector(par_info_.N_owned, par_info_.dof_offset, comm_);
178 if (use_gpu_backend_) {
179#if HYPRE_USING_CUDA || HYPRE_USING_HIP
180 if (device_arrays_.ncols_device) {
181 hypre_TFree(device_arrays_.ncols_device, HYPRE_MEMORY_DEVICE);
183 if (device_arrays_.rows_device) {
184 hypre_TFree(device_arrays_.rows_device, HYPRE_MEMORY_DEVICE);
186 if (device_arrays_.cols_device) {
187 hypre_TFree(device_arrays_.cols_device, HYPRE_MEMORY_DEVICE);
189 if (device_arrays_.row_indexes_device) {
190 hypre_TFree(device_arrays_.row_indexes_device, HYPRE_MEMORY_DEVICE);
192 if (device_arrays_.indices_device) {
193 hypre_TFree(device_arrays_.indices_device, HYPRE_MEMORY_DEVICE);
195 if (device_arrays_.vector_buffer_device) {
196 hypre_TFree(device_arrays_.vector_buffer_device, HYPRE_MEMORY_DEVICE);
198 if (device_arrays_.matrix_buffer_device) {
199 hypre_TFree(device_arrays_.matrix_buffer_device, HYPRE_MEMORY_DEVICE);
204 HypreInterface::destroySolver(solver_);
205 HypreInterface::destroyVector(x_hypre_);
206 HypreInterface::destroyVector(b_hypre_);
207 HypreInterface::destroyMatrix(A_hypre_);
217 OPM_TIMEBLOCK(prec_update);
220 HypreInterface::updateMatrixValues(
221 A_, A_hypre_, sparsity_pattern_, host_arrays_, device_arrays_, use_gpu_backend_);
224 HYPRE_ParCSRMatrix parcsr_A;
225 HYPRE_SAFE_CALL(HYPRE_IJMatrixGetObject(A_hypre_,
reinterpret_cast<void**
>(&parcsr_A)));
228 HYPRE_ParVector par_x, par_b;
229 HYPRE_SAFE_CALL(HYPRE_IJVectorGetObject(x_hypre_,
reinterpret_cast<void**
>(&par_x)));
230 HYPRE_SAFE_CALL(HYPRE_IJVectorGetObject(b_hypre_,
reinterpret_cast<void**
>(&par_b)));
233 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetup(solver_, parcsr_A, par_b, par_x));
259 void apply(X& v,
const Y& d)
override
261 OPM_TIMEBLOCK(prec_apply);
264 HypreInterface::transferVectorToHypre(v, x_hypre_, host_arrays_, device_arrays_, par_info_, use_gpu_backend_);
265 HypreInterface::transferVectorToHypre(d, b_hypre_, host_arrays_, device_arrays_, par_info_, use_gpu_backend_);
268 HYPRE_ParCSRMatrix parcsr_A;
269 HYPRE_ParVector par_x, par_b;
270 HYPRE_SAFE_CALL(HYPRE_IJMatrixGetObject(A_hypre_,
reinterpret_cast<void**
>(&parcsr_A)));
271 HYPRE_SAFE_CALL(HYPRE_IJVectorGetObject(x_hypre_,
reinterpret_cast<void**
>(&par_x)));
272 HYPRE_SAFE_CALL(HYPRE_IJVectorGetObject(b_hypre_,
reinterpret_cast<void**
>(&par_b)));
275 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSolve(solver_, parcsr_A, par_b, par_x));
278 HypreInterface::transferVectorFromHypre(x_hypre_, v, host_arrays_, device_arrays_, par_info_, use_gpu_backend_);
281 comm_.copyOwnerToAll(v, v);