From 4f59eaa71c36b921ecd9762ba752ad53b924e4f3 Mon Sep 17 00:00:00 2001 From: Antonella Ritorto Date: Mon, 25 Nov 2024 08:52:01 +0100 Subject: [PATCH 1/2] Move id from addLgrs...(..) to adapt(..), allow global refine (once) --- opm/grid/CpGrid.hpp | 29 +- opm/grid/cpgrid/CpGrid.cpp | 264 ++++++++++-------- .../cpgrid/addLgrsOnDistributedGrid_test.cpp | 134 +++++++-- 3 files changed, 283 insertions(+), 144 deletions(-) diff --git a/opm/grid/CpGrid.hpp b/opm/grid/CpGrid.hpp index 73d310590..2a1ae600c 100644 --- a/opm/grid/CpGrid.hpp +++ b/opm/grid/CpGrid.hpp @@ -1083,23 +1083,26 @@ namespace Dune bool nonNNCsSelectedCellsLGR( const std::vector>& startIJK_vec, const std::vector>& endIJK_vec) const; - /// @brief Mark selected elements and assign them their corresponding level. + /// @brief Detect active LGRs, mark element and assign their level. /// - /// Given blocks of cells selected for refinement, Mark selected elements and assign them their corresponding - /// (refined) level (grid). When level zero grid is distributed before refinement, detect which LGRs are active - /// in each process. + /// Given blocks of cells selected for refinement on a level zero distributed grid, detect which LGRs are active + /// in each process. If "onlyDetectLgrs = false", additionally, mark selected elements and assign them their corresponding + /// (refined) level (grid). /// /// @param [in] startIJK_vec Vector of ijk values denoting the start of each block of cells selected for refinement. /// @param [in] endIJK_vec Vector of ijk values denoting the end of each block of cells selected for refinement. - /// @param [out] assignRefinedLevel Assign level for the refinement of each marked cell. Example: refined element from - /// LGR1 have level 1, refined element rfom LGR2 have level 2, etc. - /// @param [out] lgr_with_at_least_one_active_cell Determine if an LGR is not empty in a given process, we set - /// lgr_with_at_least_one_active_cell[in that level] to 1 if it contains - /// at least one active cell, and to 0 otherwise. - void markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, - const std::vector>& endIJK_vec, - std::vector& assignRefinedLevel, - std::vector& lgr_with_at_least_one_active_cell); + /// @param [in] onlyDetectLgrs If true, the return the second vector of the return pair, vector> is empty + /// since the method will only detect the active lgrs, and skip marking elements and assigning + /// their levels. If false, both pair.fisrt (representing active lgrs) and pair.second + /// (representing the assigned refined levels) will be populated + /// @return [lgr_with_at_least_one_active_cell, Determine if an LGR is not empty in a given process, we set + /// assignRefinedLevel] lgr_with_at_least_one_active_cell[in that level] to 1 if it contains + /// at least one active cell, and to 0 otherwise. + /// Assign level for the refinement of each marked cell. Example: refined + /// element from LGR1 have level 1, refined element rfom LGR2 have level 2, etc. + std::pair, std::vector> markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + bool onlyDetectLgrs); /// @brief Predict minimum cell and point global ids per process. /// diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index f75af7c0c..10ee936d4 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -1020,19 +1020,28 @@ void CpGrid::globalRefine (int refCount) OPM_THROW(std::logic_error, "Global refinement of a mixed grid with coarse and refined cells is not supported yet."); } } - // Preventcalls of globalRefine on a distributed grid. - if ( !distributed_data_.empty() ) { - OPM_THROW(std::logic_error, "Global refinement of a distributed grid is not supported yet."); + // For a distributed grid, global refinement can be called only once - for now. + if ( (refCount>1) && (!distributed_data_.empty()) ) { + OPM_THROW(std::logic_error, "Multiple calls of global refinement on a distributed grid are not supported yet."); } if (refCount>0) { + const std::vector>& cells_per_dim_vec = {{2,2,2}}; // Arbitrary chosen values. for (int refinedLevel = 0; refinedLevel < refCount; ++refinedLevel) { - // Mark all the elements of the current leaf grid view for refinement - const auto& grid_view = this-> leafGridView(); - for(const auto& element: elements(grid_view)) { + + std::vector assignRefinedLevel(current_view_data_-> size(0)); + const auto& preAdaptMaxLevel = this ->maxLevel(); + + for(const auto& element: elements(this-> leafGridView())) { + // Mark all the elements of the current leaf grid view for refinement mark(1, element); + assignRefinedLevel[element.index()] = preAdaptMaxLevel +1; } + + std::vector lgr_name_vec = { "GR" + std::to_string(preAdaptMaxLevel +1) }; + bool isCARFIN = true; + const std::array& endIJK = currentData().back()->logicalCartesianSize(); preAdapt(); - adapt(); + adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec, isCARFIN, {{0,0,0}}, {endIJK}); postAdapt(); } } @@ -1251,11 +1260,30 @@ bool CpGrid::nonNNCsSelectedCellsLGR( const std::vector>& star return true; } -void CpGrid::markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, - const std::vector>& endIJK_vec, - std::vector& assignRefinedLevel, - std::vector& lgr_with_at_least_one_active_cell) +std::pair, std::vector> CpGrid::markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + bool onlyDetectLgrs) { + // Auxiliary function. + // If onlyDetectLgrs == true, then only activeLgrs gets populated. + // Otherwise, both activeLgrs and assignLevel get populated. + auto detectLgrs_maybeAssignLevel = [this](const Dune::cpgrid::Entity<0>& element, + int level, + std::vector& active_lgrs, + std::vector& assign_level, + bool only_detect_lgrs) + { + // shifted since starting grid is level 0, and refined grids levels are >= 1. + active_lgrs[level] = 1; + if (!only_detect_lgrs) + { + this-> mark(1, element); + assign_level[element.index()] = level+1; // shifted since starting grid is level 0, and refined grids levels are >= 1. + } + }; + + std::vector activeLgrs(startIJK_vec.size()); + std::vector assignLevel(onlyDetectLgrs ? 0 : currentData().back()->size(0)); // Find out which (ACTIVE) elements belong to the block cells defined by startIJK and endIJK values. for(const auto& element: elements(this->leafGridView())) { std::array ijk; @@ -1268,12 +1296,11 @@ void CpGrid::markElemAssignLevelDetectActiveLgrs(const std::vector mark(1, element); - assignRefinedLevel[element.index()] = level+1; // shifted since starting grid is level 0, and refined grids levels are >= 1. - lgr_with_at_least_one_active_cell[level] = 1; + detectLgrs_maybeAssignLevel(element, level, activeLgrs, assignLevel, onlyDetectLgrs); } } } + return std::make_pair, std::vector>(std::move(activeLgrs), std::move(assignLevel)); } void CpGrid::predictMinCellAndPointGlobalIdPerProcess([[maybe_unused]] const std::vector& assignRefinedLevel, @@ -1914,12 +1941,13 @@ bool CpGrid::adapt() const std::vector>& cells_per_dim_vec = {{2,2,2}}; // Arbitrary chosen values. std::vector assignRefinedLevel(current_view_data_-> size(0)); const auto& preAdaptMaxLevel = this ->maxLevel(); + for (int elemIdx = 0; elemIdx < current_view_data_->size(0); ++elemIdx) { const auto& element = cpgrid::Entity<0>(*current_view_data_, elemIdx, true); assignRefinedLevel[elemIdx] = (this->getMark(element) == 1) ? (preAdaptMaxLevel +1) : 0; } - const std::vector& lgr_name_vec = { "LGR" + std::to_string(preAdaptMaxLevel +1) }; - return this-> adapt(cells_per_dim_vec, assignRefinedLevel,lgr_name_vec); + std::vector lgr_name_vec = { "LGR" + std::to_string(preAdaptMaxLevel +1) }; + return this-> adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec); } bool CpGrid::adapt(const std::vector>& cells_per_dim_vec, @@ -1941,7 +1969,16 @@ bool CpGrid::adapt(const std::vector>& cells_per_dim_vec, const auto& preAdaptGrid_corner_history = (preAdaptMaxLevel>0) ? current_view_data_->corner_history_ : std::vector>(); auto& data = currentData(); // data pointed by current_view_data_ (data_ or distributed_data_[if loadBalance() has been invoked before adapt()]). - + + // To determine if an LGR is not empty in a given process, we set + // lgr_with_at_least_one_active_cell[in that level] to 1 if it contains + // at least one active cell, and to 0 otherwise. + // "rubissh" denotes an empty vector. The method 'markElemAssignLevelDetectActiveLgrs' skips marking elements and the assignment of their + // level when the passed boolean is 'true' ("onlyDetectLgrs"). + const auto& [lgr_with_at_least_one_active_cell, rubissh] = markElemAssignLevelDetectActiveLgrs(startIJK_vec, + endIJK_vec, + true /*onlyDetectLgrs*/); + // To store/build refined level grids. std::vector>> refined_data_vec(levels, data); std::vector> refined_grid_ptr_vec(levels); @@ -2366,104 +2403,6 @@ bool CpGrid::adapt(const std::vector>& cells_per_dim_vec, this->global_id_set_ptr_->insertIdSet(*data[refinedLevelGridIdx]); } - // Print total amount of cells on the adapted grid - Opm::OpmLog::info(std::to_string(markedElem_count) + " elements have been marked (in " + std::to_string(comm().rank()) + " rank).\n"); - Opm::OpmLog::info(std::to_string(levels) + " (new) refined level grid(s) (in " + std::to_string(comm().rank()) + " rank).\n"); - Opm::OpmLog::info(std::to_string(cell_count) + " total cells on the leaf grid view (in " + std::to_string(comm().rank()) + " rank).\n"); - - return preAdapt(); -} - -void CpGrid::postAdapt() -{ - // - Resize with the new amount of cells on the leaf grid view - // - Set marks equal to zero (representing 'doing nothing') - current_view_data_ -> postAdapt(); -} - -void CpGrid::addLgrsUpdateLeafView(const std::vector>& cells_per_dim_vec, - const std::vector>& startIJK_vec, - const std::vector>& endIJK_vec, - const std::vector& lgr_name_vec) -{ - // For parallel run, level zero grid is stored in distributed_data_[0]. If CpGrid::scatterGrid has been invoked, - // then current_view_data_ == distributed_data_[0]. - // For serial run, level zero grid is stored in data_[0]. In this case, current_view_data_ == data_[0]. - // Note: currentData() returns data_ (if grid is not distributed) or distributed_data_ otherwise. - - // Check startIJK_vec and endIJK_vec have same size, and "startIJK[patch][coordinate] < endIJK[patch][coordinate]" - current_view_data_->validStartEndIJKs(startIJK_vec, endIJK_vec); - - // Sizes of provided vectors (number of subivisions per cells and lgrs name) should coincide. - bool matchingSizeHasFailed = false; - if ( (cells_per_dim_vec.size() != startIJK_vec.size()) || (lgr_name_vec.size() != startIJK_vec.size())) { - matchingSizeHasFailed = true; - } - matchingSizeHasFailed = comm().max(matchingSizeHasFailed); - if (matchingSizeHasFailed) { - OPM_THROW(std::invalid_argument, "Sizes of provided vectors with subdivisions per cell and LGR names need to match."); - } - - // Compatibility of number of subdivisions of neighboring LGRs: Check shared faces on boundaries of LGRs. - // Not optimal since the code below does not take into account - // active/inactive cells, instead, relies on "ijk-computations". - // TO DO: improve/remove. - // To check "Compatibility of numbers of subdivisions of neighboring LGRs". - // The method compatibleSubdivision returns a bool. We convert it into an int since MPI within DUNE does not support bool directly. - int compatibleSubdivisions = current_view_data_->compatibleSubdivisions(cells_per_dim_vec, startIJK_vec, endIJK_vec); - compatibleSubdivisions = comm().min(compatibleSubdivisions); // 0 when at least one process returns false (un-compatible subdivisions). - if(!compatibleSubdivisions) { - if (comm().rank()==0){ - OPM_THROW(std::logic_error, "Subdivisions of neighboring LGRs sharing at least one face do not coincide. Not suppported yet."); - } - else{ - OPM_THROW_NOLOG(std::logic_error, "Subdivisions of neighboring LGRs sharing at least one face do not coincide. Not suppported yet."); - } - } - - // Non neighboring connections: Currently, adding LGRs whose cells have NNCs is not supported yet. - // The method nonNNCsSelectedCellsLGR returns a bool. We convert it into an int since MPI within DUNE does not support bool directly. - int nonNNCs = this->nonNNCsSelectedCellsLGR(startIJK_vec, endIJK_vec); - // To check "Non-NNCs (non non neighboring connections)" for all processes. - nonNNCs = comm().min(nonNNCs); // 0 when at least one process returns false (there are NNCs on a selected cell for refinement). - if(!nonNNCs) { - OPM_THROW(std::logic_error, "NNC face on a cell containing LGR is not supported yet."); - } - - // To determine if an LGR is not empty in a given process, we set - // lgr_with_at_least_one_active_cell[in that level] to 1 if it contains - // at least one active cell, and to 0 otherwise. - std::vector lgr_with_at_least_one_active_cell(startIJK_vec.size()); - // Determine the assigned level for the refinement of each marked cell - std::vector assignRefinedLevel(current_view_data_->size(0)); - - markElemAssignLevelDetectActiveLgrs(startIJK_vec, - endIJK_vec, - assignRefinedLevel, - lgr_with_at_least_one_active_cell); - - int non_empty_lgrs = 0; - for (std::size_t level = 0; level < startIJK_vec.size(); ++level) { - // Do not throw if all cells of an LGR are inactive in a parallel run (The process might not 'see' those cells.) - if (lgr_with_at_least_one_active_cell[level] == 0) { - Opm::OpmLog::warning("LGR" + std::to_string(level+1) + " contains only inactive cells (in " + std::to_string(comm().rank()) + " rank).\n"); - } - else { - ++non_empty_lgrs; - } - } - - // Notice that in a parallel run, non_empty_lgrs represents the local active lgrs, i.e. the lgrs containing active cells which also belong - // to the current process. - auto globalActiveLgrs = comm().sum(non_empty_lgrs); - if(globalActiveLgrs == 0) { - Opm::OpmLog::warning("All the LGRs contain only inactive cells.\n"); - } - - preAdapt(); - adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec, true, startIJK_vec, endIJK_vec); - postAdapt(); - // Only for parallel runs // - Define global ids for refined level grids (level 1, 2, ..., maxLevel) // - Define GlobalIdMapping (cellMapping, faceMapping, pointMapping required per level) @@ -2590,6 +2529,101 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector>& cells_p #endif } // end-if-comm().size()>1 + + // Print total amount of cells on the adapted grid + Opm::OpmLog::info(std::to_string(markedElem_count) + " elements have been marked (in " + std::to_string(comm().rank()) + " rank).\n"); + Opm::OpmLog::info(std::to_string(levels) + " (new) refined level grid(s) (in " + std::to_string(comm().rank()) + " rank).\n"); + Opm::OpmLog::info(std::to_string(cell_count) + " total cells on the leaf grid view (in " + std::to_string(comm().rank()) + " rank).\n"); + + return preAdapt(); +} + +void CpGrid::postAdapt() +{ + // - Resize with the new amount of cells on the leaf grid view + // - Set marks equal to zero (representing 'doing nothing') + current_view_data_ -> postAdapt(); +} + +void CpGrid::addLgrsUpdateLeafView(const std::vector>& cells_per_dim_vec, + const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + const std::vector& lgr_name_vec) +{ + // For parallel run, level zero grid is stored in distributed_data_[0]. If CpGrid::scatterGrid has been invoked, + // then current_view_data_ == distributed_data_[0]. + // For serial run, level zero grid is stored in data_[0]. In this case, current_view_data_ == data_[0]. + // Note: currentData() returns data_ (if grid is not distributed) or distributed_data_ otherwise. + + // Check startIJK_vec and endIJK_vec have same size, and "startIJK[patch][coordinate] < endIJK[patch][coordinate]" + current_view_data_->validStartEndIJKs(startIJK_vec, endIJK_vec); + + // Sizes of provided vectors (number of subivisions per cells and lgrs name) should coincide. + bool matchingSizeHasFailed = false; + if ( (cells_per_dim_vec.size() != startIJK_vec.size()) || (lgr_name_vec.size() != startIJK_vec.size())) { + matchingSizeHasFailed = true; + } + matchingSizeHasFailed = comm().max(matchingSizeHasFailed); + if (matchingSizeHasFailed) { + OPM_THROW(std::invalid_argument, "Sizes of provided vectors with subdivisions per cell and LGR names need to match."); + } + + // Compatibility of number of subdivisions of neighboring LGRs: Check shared faces on boundaries of LGRs. + // Not optimal since the code below does not take into account + // active/inactive cells, instead, relies on "ijk-computations". + // TO DO: improve/remove. + // To check "Compatibility of numbers of subdivisions of neighboring LGRs". + // The method compatibleSubdivision returns a bool. We convert it into an int since MPI within DUNE does not support bool directly. + int compatibleSubdivisions = current_view_data_->compatibleSubdivisions(cells_per_dim_vec, startIJK_vec, endIJK_vec); + compatibleSubdivisions = comm().min(compatibleSubdivisions); // 0 when at least one process returns false (un-compatible subdivisions). + if(!compatibleSubdivisions) { + if (comm().rank()==0){ + OPM_THROW(std::logic_error, "Subdivisions of neighboring LGRs sharing at least one face do not coincide. Not suppported yet."); + } + else{ + OPM_THROW_NOLOG(std::logic_error, "Subdivisions of neighboring LGRs sharing at least one face do not coincide. Not suppported yet."); + } + } + + // Non neighboring connections: Currently, adding LGRs whose cells have NNCs is not supported yet. + // The method nonNNCsSelectedCellsLGR returns a bool. We convert it into an int since MPI within DUNE does not support bool directly. + int nonNNCs = this->nonNNCsSelectedCellsLGR(startIJK_vec, endIJK_vec); + // To check "Non-NNCs (non non neighboring connections)" for all processes. + nonNNCs = comm().min(nonNNCs); // 0 when at least one process returns false (there are NNCs on a selected cell for refinement). + if(!nonNNCs) { + OPM_THROW(std::logic_error, "NNC face on a cell containing LGR is not supported yet."); + } + + // To determine if an LGR is not empty in a given process, we set + // lgr_with_at_least_one_active_cell[in that level] to 1 if it contains + // at least one active cell, and to 0 otherwise. + // Determine the assigned level for the refinement of each marked cell + const auto& [lgr_with_at_least_one_active_cell, assignRefinedLevel] = markElemAssignLevelDetectActiveLgrs(startIJK_vec, + endIJK_vec, + false /*onlydetecLgrs*/); + + int non_empty_lgrs = 0; + for (std::size_t level = 0; level < startIJK_vec.size(); ++level) { + // Do not throw if all cells of an LGR are inactive in a parallel run (The process might not 'see' those cells.) + if (lgr_with_at_least_one_active_cell[level] == 0) { + Opm::OpmLog::warning("LGR" + std::to_string(level+1) + " contains only inactive cells (in " + std::to_string(comm().rank()) + " rank).\n"); + } + else { + ++non_empty_lgrs; + } + } + + // Notice that in a parallel run, non_empty_lgrs represents the local active lgrs, i.e. the lgrs containing active cells which also belong + // to the current process. + auto globalActiveLgrs = comm().sum(non_empty_lgrs); + if(globalActiveLgrs == 0) { + Opm::OpmLog::warning("All the LGRs contain only inactive cells.\n"); + } + + preAdapt(); + adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec, true, startIJK_vec, endIJK_vec); + postAdapt(); + // Print total refined level grids and total cells on the leaf grid view Opm::OpmLog::info(std::to_string(non_empty_lgrs) + " (new) refined level grid(s) (in " + std::to_string(comm().rank()) + " rank).\n"); Opm::OpmLog::info(std::to_string(current_view_data_->size(0)) + " total cells on the leaf grid view (in " + std::to_string(comm().rank()) + " rank).\n"); diff --git a/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp b/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp index d3a523ca2..2bd36af85 100644 --- a/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp +++ b/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp @@ -182,7 +182,6 @@ void refinePatch_and_check(Dune::CpGrid& coarse_grid, BOOST_CHECK( *itMax < maxCartesianIdxLevel); } - // LGRs for (int cell = 0; cell < data[level]-> size(0); ++cell) { @@ -572,7 +571,7 @@ BOOST_AUTO_TEST_CASE(atLeastOneLgr_per_process_attempt) // Total global ids in leaf grid view for cells: 36-(6 marked cells) + 16 + 27 + 64 + 16 = 153 // Total global ids in leaf grid view for points: 80 + 33 + 56 + 117 + 33 = 319 refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); - + // Check global id is not duplicated for points for each LGR // LGR1 dim 2x4x2 -> 3x5x3 = 45 points // LGR2 dim 3x3x3 -> 4x4x4 = 64 points @@ -677,7 +676,37 @@ BOOST_AUTO_TEST_CASE(throw_not_fully_interior_lgr) } } -//Calling globalRefine on a distributed grid is not supported yet. +//Calling (only once, with argument equal to 1) globalRefine on a distributed grid is supported now. +BOOST_AUTO_TEST_CASE(globalRefine1) +{ + // Create a grid + Dune::CpGrid grid; + const std::array cell_sizes = {1.0, 1.0, 1.0}; + const std::array grid_dim = {4,2,1}; + grid.createCartesian(grid_dim, cell_sizes); + + std::vector parts(8); + std::vector> cells_per_rank = {{0,4},{1,5},{2,6}, {3,7}}; + for (int rank = 0; rank < 4; ++rank) { + for (const auto& elemIdx : cells_per_rank[rank]) { + parts[elemIdx] = rank; + } + } + // Distribute the grid + if(grid.comm().size()>1) + { + grid.loadBalance(); + + grid.globalRefine(1); + const std::vector> cells_per_dim_vec = {{2,2,2}}; + const std::vector> startIJK_vec = {{0,0,0}}; + const std::vector> endIJK_vec = {{4,2,1}}; + const std::vector lgr_name_vec = {"GR1"}; // GR stands for GLOBAL REFINEMENT + + refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); + } +} + BOOST_AUTO_TEST_CASE(globalRefine2) { // Create a grid @@ -685,12 +714,51 @@ BOOST_AUTO_TEST_CASE(globalRefine2) const std::array cell_sizes = {1.0, 1.0, 1.0}; const std::array grid_dim = {4,3,3}; grid.createCartesian(grid_dim, cell_sizes); + + std::vector parts(36); + std::vector> cells_per_rank = { {0,1,4,5,8,9,16,20,21}, + {12,13,17,24,25,28,29,32,33}, + {2,3,6,7,10,11,18,22,23}, + {14,15,19,26,27,30,31,34,35} }; + for (int rank = 0; rank < 4; ++rank) { + for (const auto& elemIdx : cells_per_rank[rank]) { + parts[elemIdx] = rank; + } + } // Distribute the grid if(grid.comm().size()>1) { grid.loadBalance(); - BOOST_CHECK_THROW(grid.globalRefine(1), std::logic_error); + grid.globalRefine(1); + const std::vector> cells_per_dim_vec = {{2,2,2}}; + const std::vector> startIJK_vec = {{0,0,0}}; + const std::vector> endIJK_vec = {{4,3,3}}; + const std::vector lgr_name_vec = {"GR1"}; // GR stands for GLOBAL REFINEMENT + refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); + } +} + +BOOST_AUTO_TEST_CASE(globalRefine_throw) +{ + // Create a grid + Dune::CpGrid grid; + const std::array cell_sizes = {1.0, 1.0, 1.0}; + const std::array grid_dim = {4,2,1}; + grid.createCartesian(grid_dim, cell_sizes); + + std::vector parts(8); + std::vector> cells_per_rank = {{0,4},{1,5},{2,6}, {3,7}}; + for (int rank = 0; rank < 4; ++rank) { + for (const auto& elemIdx : cells_per_rank[rank]) { + parts[elemIdx] = rank; + } + } + // Distribute the grid + if(grid.comm().size()>1) + { + grid.loadBalance(); + BOOST_CHECK_THROW(grid.globalRefine(2), std::logic_error); } } @@ -756,8 +824,8 @@ BOOST_AUTO_TEST_CASE(distributed_lgr) // That means that the "unfortunate" expected point ids count is 45 (desired value) + 5 (duplicated laying on // shared I_FACE) = 50. BOOST_CHECK( static_cast(all_point_ids_set.size()) == 50); - - /** Current approach avoids duplicated point ids when + + // Current approach avoids duplicated point ids when // 1. the LGR is distributed in P_{i_0}, ..., P_{i_n}, with n+1 < grid.comm().size(), // AND // 2. there is no coarse cell seen by a process P with P != P_{i_j}, j = 0, ..., n. @@ -934,8 +1002,7 @@ BOOST_AUTO_TEST_CASE(distributed_in_all_ranks_lgr) } } - -BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) +BOOST_AUTO_TEST_CASE(call_adapt_with_args_on_distributed_grid) { // Only for testing assignment of new global ids for refined entities (cells and point belonging to // refined level grids). @@ -958,11 +1025,6 @@ BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) if(grid.comm().size()>1) { grid.loadBalance(parts); - // grid.adapt(); It does not throw an exeption. Note: adapt() implements global refinement. - // - // The following test fails. TODO: Move the assignment of global IDs for refined level grids and the leaf grid view - // into the adapt(...) function, as it is invoked within 'addLgrsUpdateLeafGridView', not the other way around. - // refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); const std::vector> cells_per_dim_vec = {{2,2,2}}; const std::vector> startIJK_vec = {{1,0,0}}; @@ -986,9 +1048,49 @@ BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) true, startIJK_vec, endIJK_vec); - // The following test fails. TODO: Move the assignment of global IDs for refined level grids and the leaf grid view - // into the adapt(...) function, as it is invoked within 'addLgrsUpdateLeafGridView', not the other way around. - // refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); + + refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); } } +// Cannot call adapt() on a distribted grid +/*BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) +{ + // Only for testing assignment of new global ids for refined entities (cells and point belonging to + // refined level grids). + + // Create a grid + Dune::CpGrid grid; + const std::array cell_sizes = {1.0, 1.0, 1.0}; + const std::array grid_dim = {4,3,3}; + grid.createCartesian(grid_dim, cell_sizes); + std::vector parts(36); + std::vector> cells_per_rank = { {0,1,4,5,8,9,16,20,21}, + {12,13,17,24,25,28,29,32,33}, + {2,3,6,7,10,11,18,22,23}, + {14,15,19,26,27,30,31,34,35} }; + for (int rank = 0; rank < 4; ++rank) { + for (const auto& elemIdx : cells_per_rank[rank]) { + parts[elemIdx] = rank; + } + } + if(grid.comm().size()>1) + { + grid.loadBalance(parts); + // Mark all elements -> indirect global refinement + for (const auto& element : elements(grid.leafGridView())){ + grid.mark(1, element); + } + grid.preAdapt(); + grid.adapt(); + grid.postAdapt(); + + const std::vector> cells_per_dim_vec = {{2,2,2}}; + const std::vector> startIJK_vec = {{1,0,0}}; + const std::vector> endIJK_vec = {{4,3,3}}; + const std::vector lgr_name_vec = {"GR1"}; + + refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); + } + }*/ + From ae2fc6643b0c82369b9f9e41119b9b9abe164701 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Tue, 3 Dec 2024 07:41:39 +0100 Subject: [PATCH 2/2] Use lambda without if (#17) * Revert "Reduce code duplication in markElem... detectActiveLgrs" This reverts commit bffee42652ab2f36de068ef6f67dec278856a902. * Use helper function with lambda --- opm/grid/CpGrid.hpp | 51 +++++--- opm/grid/cpgrid/CpGrid.cpp | 113 +++++++++++------- .../cpgrid/addLgrsOnDistributedGrid_test.cpp | 41 ++----- 3 files changed, 116 insertions(+), 89 deletions(-) diff --git a/opm/grid/CpGrid.hpp b/opm/grid/CpGrid.hpp index 2a1ae600c..8b599395d 100644 --- a/opm/grid/CpGrid.hpp +++ b/opm/grid/CpGrid.hpp @@ -1083,26 +1083,47 @@ namespace Dune bool nonNNCsSelectedCellsLGR( const std::vector>& startIJK_vec, const std::vector>& endIJK_vec) const; - /// @brief Detect active LGRs, mark element and assign their level. + /// @brief Detect active LGRs in each process. /// /// Given blocks of cells selected for refinement on a level zero distributed grid, detect which LGRs are active - /// in each process. If "onlyDetectLgrs = false", additionally, mark selected elements and assign them their corresponding - /// (refined) level (grid). + /// in each process. /// /// @param [in] startIJK_vec Vector of ijk values denoting the start of each block of cells selected for refinement. /// @param [in] endIJK_vec Vector of ijk values denoting the end of each block of cells selected for refinement. - /// @param [in] onlyDetectLgrs If true, the return the second vector of the return pair, vector> is empty - /// since the method will only detect the active lgrs, and skip marking elements and assigning - /// their levels. If false, both pair.fisrt (representing active lgrs) and pair.second - /// (representing the assigned refined levels) will be populated - /// @return [lgr_with_at_least_one_active_cell, Determine if an LGR is not empty in a given process, we set - /// assignRefinedLevel] lgr_with_at_least_one_active_cell[in that level] to 1 if it contains - /// at least one active cell, and to 0 otherwise. - /// Assign level for the refinement of each marked cell. Example: refined - /// element from LGR1 have level 1, refined element rfom LGR2 have level 2, etc. - std::pair, std::vector> markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, - const std::vector>& endIJK_vec, - bool onlyDetectLgrs); + /// @param [out] lgr_with_at_least_one_active_cell Determine if an LGR is not empty in a given process, we set + /// lgr_with_at_least_one_active_cell[in that level] to 1 if it contains + /// at least one active cell, and to 0 otherwise. + void detectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + std::vector& lgr_with_at_least_one_active_cell); + + /// @brief Mark selected elements, assign them their corresponding level, and detect active LGRs. + /// + /// Given blocks of cells selected for refinement, mark selected elements and assign them their corresponding + /// (refined) level (grid). When level zero grid is distributed before refinement, detect which LGRs are active + /// in each process. + /// + /// @param [in] startIJK_vec Vector of ijk values denoting the start of each block of cells selected for refinement. + /// @param [in] endIJK_vec Vector of ijk values denoting the end of each block of cells selected for refinement. + /// @param [out] assignRefinedLevel Assign level for the refinement of each marked cell. Example: refined element from + /// LGR1 have level 1, refined element rfom LGR2 have level 2, etc. + /// @param [out] lgr_with_at_least_one_active_cell Determine if an LGR is not empty in a given process, we set + /// lgr_with_at_least_one_active_cell[in that level] to 1 if it contains + /// at least one active cell, and to 0 otherwise. + void markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + std::vector& assignRefinedLevel, + std::vector& lgr_with_at_least_one_active_cell); + + /// @brief Auxilliary function to compute one or more properties on selected block of parent cells. + /// + /// @param [in] startIJK_vec Vector of ijk values denoting the start of each block of cells selected for refinement. + /// @param [in] endIJK_vec Vector of ijk values denoting the end of each block of cells selected for refinement. + /// @param [in] function Lambda expression/function that computes the desired properties for each parent cell. + template + void computeOnLgrParents(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + T func); /// @brief Predict minimum cell and point global ids per process. /// diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index 10ee936d4..d7c035f31 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -1020,16 +1020,15 @@ void CpGrid::globalRefine (int refCount) OPM_THROW(std::logic_error, "Global refinement of a mixed grid with coarse and refined cells is not supported yet."); } } - // For a distributed grid, global refinement can be called only once - for now. - if ( (refCount>1) && (!distributed_data_.empty()) ) { - OPM_THROW(std::logic_error, "Multiple calls of global refinement on a distributed grid are not supported yet."); - } if (refCount>0) { const std::vector>& cells_per_dim_vec = {{2,2,2}}; // Arbitrary chosen values. for (int refinedLevel = 0; refinedLevel < refCount; ++refinedLevel) { std::vector assignRefinedLevel(current_view_data_-> size(0)); const auto& preAdaptMaxLevel = this ->maxLevel(); + std::vector lgr_name_vec = { "GR" + std::to_string(preAdaptMaxLevel +1) }; + bool isCARFIN = true; + const std::array& endIJK = currentData().back()->logicalCartesianSize(); for(const auto& element: elements(this-> leafGridView())) { // Mark all the elements of the current leaf grid view for refinement @@ -1037,9 +1036,6 @@ void CpGrid::globalRefine (int refCount) assignRefinedLevel[element.index()] = preAdaptMaxLevel +1; } - std::vector lgr_name_vec = { "GR" + std::to_string(preAdaptMaxLevel +1) }; - bool isCARFIN = true; - const std::array& endIJK = currentData().back()->logicalCartesianSize(); preAdapt(); adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec, isCARFIN, {{0,0,0}}, {endIJK}); postAdapt(); @@ -1260,30 +1256,11 @@ bool CpGrid::nonNNCsSelectedCellsLGR( const std::vector>& star return true; } -std::pair, std::vector> CpGrid::markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, - const std::vector>& endIJK_vec, - bool onlyDetectLgrs) +template +void CpGrid::computeOnLgrParents(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + T func) { - // Auxiliary function. - // If onlyDetectLgrs == true, then only activeLgrs gets populated. - // Otherwise, both activeLgrs and assignLevel get populated. - auto detectLgrs_maybeAssignLevel = [this](const Dune::cpgrid::Entity<0>& element, - int level, - std::vector& active_lgrs, - std::vector& assign_level, - bool only_detect_lgrs) - { - // shifted since starting grid is level 0, and refined grids levels are >= 1. - active_lgrs[level] = 1; - if (!only_detect_lgrs) - { - this-> mark(1, element); - assign_level[element.index()] = level+1; // shifted since starting grid is level 0, and refined grids levels are >= 1. - } - }; - - std::vector activeLgrs(startIJK_vec.size()); - std::vector assignLevel(onlyDetectLgrs ? 0 : currentData().back()->size(0)); // Find out which (ACTIVE) elements belong to the block cells defined by startIJK and endIJK values. for(const auto& element: elements(this->leafGridView())) { std::array ijk; @@ -1296,11 +1273,37 @@ std::pair, std::vector> CpGrid::markElemAssignLevelDetectA break; } if(belongsToLevel) { - detectLgrs_maybeAssignLevel(element, level, activeLgrs, assignLevel, onlyDetectLgrs); + func(element, level); } } } - return std::make_pair, std::vector>(std::move(activeLgrs), std::move(assignLevel)); +} + +void CpGrid::detectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + std::vector& lgr_with_at_least_one_active_cell) +{ + auto markLgr = [&lgr_with_at_least_one_active_cell]([[maybe_unused]] const cpgrid::Entity<0>& element, int level) + { + // shifted since starting grid is level 0, and refined grids levels are >= 1. + lgr_with_at_least_one_active_cell[level] = 1; + }; + computeOnLgrParents(startIJK_vec, endIJK_vec, markLgr); +} + +void CpGrid::markElemAssignLevelDetectActiveLgrs(const std::vector>& startIJK_vec, + const std::vector>& endIJK_vec, + std::vector& assignRefinedLevel, + std::vector& lgr_with_at_least_one_active_cell) +{ + auto assignAndDetect = [this, &assignRefinedLevel, &lgr_with_at_least_one_active_cell](const cpgrid::Entity<0>& element, int level) + { + mark(1, element); + assignRefinedLevel[element.index()] = level+1; + // shifted since starting grid is level 0, and refined grids levels are >= 1. + lgr_with_at_least_one_active_cell[level] = 1; + }; + computeOnLgrParents(startIJK_vec, endIJK_vec, assignAndDetect); } void CpGrid::predictMinCellAndPointGlobalIdPerProcess([[maybe_unused]] const std::vector& assignRefinedLevel, @@ -1942,11 +1945,37 @@ bool CpGrid::adapt() std::vector assignRefinedLevel(current_view_data_-> size(0)); const auto& preAdaptMaxLevel = this ->maxLevel(); + int local_marked_elem_count = 0; for (int elemIdx = 0; elemIdx < current_view_data_->size(0); ++elemIdx) { const auto& element = cpgrid::Entity<0>(*current_view_data_, elemIdx, true); assignRefinedLevel[elemIdx] = (this->getMark(element) == 1) ? (preAdaptMaxLevel +1) : 0; + if (this->getMark(element) == 1) { + ++local_marked_elem_count; + } } + + // Check if its a global refinement + bool is_global_refine = false; std::vector lgr_name_vec = { "LGR" + std::to_string(preAdaptMaxLevel +1) }; + // Rewrite if global refinement +#if HAVE_MPI + auto global_marked_elem_count = comm().sum(local_marked_elem_count); + auto global_cell_count_before_adapt = comm().sum(current_view_data_-> size(0)); // Recall overlap cells are also marked + if (global_marked_elem_count == global_cell_count_before_adapt) { + // GR stands for GLOBAL REFINEMET + lgr_name_vec = { "GR" + std::to_string(preAdaptMaxLevel +1) }; + is_global_refine = true; // parallel + } +#endif + if ( (comm().size() == 0) && (local_marked_elem_count == current_view_data_-> size(0)) ) { + // GR stands for GLOBAL REFINEMET + lgr_name_vec = { "GR" + std::to_string(preAdaptMaxLevel +1) }; + is_global_refine = true; // sequential + } + if (is_global_refine) { // parallel or sequential + const std::array& endIJK = currentData().back()->logicalCartesianSize(); + return this->adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec, is_global_refine, {{0,0,0}}, {endIJK}); + } return this-> adapt(cells_per_dim_vec, assignRefinedLevel, lgr_name_vec); } @@ -1973,11 +2002,8 @@ bool CpGrid::adapt(const std::vector>& cells_per_dim_vec, // To determine if an LGR is not empty in a given process, we set // lgr_with_at_least_one_active_cell[in that level] to 1 if it contains // at least one active cell, and to 0 otherwise. - // "rubissh" denotes an empty vector. The method 'markElemAssignLevelDetectActiveLgrs' skips marking elements and the assignment of their - // level when the passed boolean is 'true' ("onlyDetectLgrs"). - const auto& [lgr_with_at_least_one_active_cell, rubissh] = markElemAssignLevelDetectActiveLgrs(startIJK_vec, - endIJK_vec, - true /*onlyDetectLgrs*/); + std::vector lgr_with_at_least_one_active_cell(levels); + detectActiveLgrs(startIJK_vec, endIJK_vec, lgr_with_at_least_one_active_cell); // To store/build refined level grids. std::vector>> refined_data_vec(levels, data); @@ -2403,7 +2429,7 @@ bool CpGrid::adapt(const std::vector>& cells_per_dim_vec, this->global_id_set_ptr_->insertIdSet(*data[refinedLevelGridIdx]); } - // Only for parallel runs + // Only for parallel runs // - Define global ids for refined level grids (level 1, 2, ..., maxLevel) // - Define GlobalIdMapping (cellMapping, faceMapping, pointMapping required per level) // - Define ParallelIndex for overlap cells and their neighbors @@ -2594,13 +2620,16 @@ void CpGrid::addLgrsUpdateLeafView(const std::vector>& cells_p OPM_THROW(std::logic_error, "NNC face on a cell containing LGR is not supported yet."); } + // Determine the assigned level for the refinement of each marked cell + std::vector assignRefinedLevel(current_view_data_->size(0)); // To determine if an LGR is not empty in a given process, we set // lgr_with_at_least_one_active_cell[in that level] to 1 if it contains // at least one active cell, and to 0 otherwise. - // Determine the assigned level for the refinement of each marked cell - const auto& [lgr_with_at_least_one_active_cell, assignRefinedLevel] = markElemAssignLevelDetectActiveLgrs(startIJK_vec, - endIJK_vec, - false /*onlydetecLgrs*/); + std::vector lgr_with_at_least_one_active_cell(startIJK_vec.size()); + markElemAssignLevelDetectActiveLgrs(startIJK_vec, + endIJK_vec, + assignRefinedLevel, + lgr_with_at_least_one_active_cell); int non_empty_lgrs = 0; for (std::size_t level = 0; level < startIJK_vec.size(); ++level) { diff --git a/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp b/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp index 2bd36af85..73824abda 100644 --- a/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp +++ b/tests/cpgrid/addLgrsOnDistributedGrid_test.cpp @@ -676,7 +676,7 @@ BOOST_AUTO_TEST_CASE(throw_not_fully_interior_lgr) } } -//Calling (only once, with argument equal to 1) globalRefine on a distributed grid is supported now. +//Calling globalRefine on a distributed grid is supported now. BOOST_AUTO_TEST_CASE(globalRefine1) { // Create a grid @@ -715,7 +715,7 @@ BOOST_AUTO_TEST_CASE(globalRefine2) const std::array grid_dim = {4,3,3}; grid.createCartesian(grid_dim, cell_sizes); - std::vector parts(36); + std::vector parts(36); std::vector> cells_per_rank = { {0,1,4,5,8,9,16,20,21}, {12,13,17,24,25,28,29,32,33}, {2,3,6,7,10,11,18,22,23}, @@ -739,29 +739,6 @@ BOOST_AUTO_TEST_CASE(globalRefine2) } } -BOOST_AUTO_TEST_CASE(globalRefine_throw) -{ - // Create a grid - Dune::CpGrid grid; - const std::array cell_sizes = {1.0, 1.0, 1.0}; - const std::array grid_dim = {4,2,1}; - grid.createCartesian(grid_dim, cell_sizes); - - std::vector parts(8); - std::vector> cells_per_rank = {{0,4},{1,5},{2,6}, {3,7}}; - for (int rank = 0; rank < 4; ++rank) { - for (const auto& elemIdx : cells_per_rank[rank]) { - parts[elemIdx] = rank; - } - } - // Distribute the grid - if(grid.comm().size()>1) - { - grid.loadBalance(); - BOOST_CHECK_THROW(grid.globalRefine(2), std::logic_error); - } -} - BOOST_AUTO_TEST_CASE(distributed_lgr) { // Create a grid @@ -1053,8 +1030,8 @@ BOOST_AUTO_TEST_CASE(call_adapt_with_args_on_distributed_grid) } } -// Cannot call adapt() on a distribted grid -/*BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) +// Call adapt() on a distribted grid, marking all the elements (equivalent to call globalRefine). +BOOST_AUTO_TEST_CASE(call_adapt_on_distributed_grid) { // Only for testing assignment of new global ids for refined entities (cells and point belonging to // refined level grids). @@ -1066,9 +1043,9 @@ BOOST_AUTO_TEST_CASE(call_adapt_with_args_on_distributed_grid) grid.createCartesian(grid_dim, cell_sizes); std::vector parts(36); std::vector> cells_per_rank = { {0,1,4,5,8,9,16,20,21}, - {12,13,17,24,25,28,29,32,33}, - {2,3,6,7,10,11,18,22,23}, - {14,15,19,26,27,30,31,34,35} }; + {12,13,17,24,25,28,29,32,33}, + {2,3,6,7,10,11,18,22,23}, + {14,15,19,26,27,30,31,34,35} }; for (int rank = 0; rank < 4; ++rank) { for (const auto& elemIdx : cells_per_rank[rank]) { parts[elemIdx] = rank; @@ -1077,7 +1054,7 @@ BOOST_AUTO_TEST_CASE(call_adapt_with_args_on_distributed_grid) if(grid.comm().size()>1) { grid.loadBalance(parts); - // Mark all elements -> indirect global refinement + // Mark all elements -> 'indirect' global refinement for (const auto& element : elements(grid.leafGridView())){ grid.mark(1, element); } @@ -1092,5 +1069,5 @@ BOOST_AUTO_TEST_CASE(call_adapt_with_args_on_distributed_grid) refinePatch_and_check(grid, cells_per_dim_vec, startIJK_vec, endIJK_vec, lgr_name_vec); } - }*/ +}