ThunderEgg  1.0.0
InterLevelComm.h
Go to the documentation of this file.
1 /***************************************************************************
2  * ThunderEgg, a library for solvers on adaptively refined block-structured
3  * Cartesian grids.
4  *
5  * Copyright (c) 2018-2021 Scott Aiton
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <https://www.gnu.org/licenses/>.
19  ***************************************************************************/
20 
21 #ifndef THUNDEREGG_GMG_INTERLEVELCOMM_H
22 #define THUNDEREGG_GMG_INTERLEVELCOMM_H
23 
30 #include <ThunderEgg/Vector.h>
31 
32 namespace ThunderEgg::GMG {
51 template<int D>
53 {
54 private:
58  Communicator comm;
62  Domain<D> coarser_domain;
66  Domain<D> finer_domain;
70  std::array<int, D> ns;
74  int num_ghost_cells;
78  int num_ghost_patches;
82  int patch_size;
89  std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> patches_with_local_parent;
96  std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> patches_with_ghost_parent;
101  std::vector<std::pair<int, std::vector<int>>> rank_and_local_indexes_for_vector;
106  std::vector<std::pair<int, std::vector<int>>> rank_and_local_indexes_for_ghost_vector;
107  bool communicating = false;
108  bool sending = false;
109 
110  const Vector<D>* current_vector = nullptr;
111  const Vector<D>* current_ghost_vector = nullptr;
112 
113  std::vector<std::vector<double>> recv_buffers;
114  std::vector<MPI_Request> recv_requests;
115  std::vector<std::vector<double>> send_buffers;
116  std::vector<MPI_Request> send_requests;
117 
118 public:
125  InterLevelComm(const Domain<D>& coarser_domain, const Domain<D>& finer_domain)
126  : comm(coarser_domain.getCommunicator())
127  , coarser_domain(coarser_domain)
128  , finer_domain(finer_domain)
129  , ns(finer_domain.getNs())
130  , num_ghost_cells(finer_domain.getNumGhostCells())
131  {
132  int my_patch_size = 1;
133  for (size_t axis = 0; axis < D; axis++) {
134  my_patch_size *= ns[axis] + 2 * num_ghost_cells;
135  }
136  patch_size = my_patch_size;
137 
138  // sort into patches with local parents and patches with ghost parents
139  std::deque<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> local_parents;
140  std::deque<std::reference_wrapper<const PatchInfo<D>>> ghost_parents;
141  std::set<int> ghost_parents_ids;
142 
143  int rank;
144  MPI_Comm_rank(comm.getMPIComm(), &rank);
145  // TODO this has to be changed when domain class is updated
146  std::map<int, int> coarser_domain_id_to_local_index_map;
147  for (const PatchInfo<D>& pinfo : this->coarser_domain.getPatchInfoVector()) {
148  coarser_domain_id_to_local_index_map[pinfo.id] = pinfo.local_index;
149  }
150  for (const PatchInfo<D>& patch : this->finer_domain.getPatchInfoVector()) {
151  if (patch.parent_rank == rank) {
152  local_parents.emplace_back(coarser_domain_id_to_local_index_map[patch.parent_id], patch);
153  } else {
154  ghost_parents.push_back(patch);
155  ghost_parents_ids.insert(patch.parent_id);
156  }
157  }
158  num_ghost_patches = ghost_parents_ids.size();
159 
160  // fill in local vector
161  patches_with_local_parent =
162  std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>(local_parents.begin(),
163  local_parents.end());
164  // find local coarse patches that are ghost paches on other ranks
165  std::map<int, std::map<int, int>>
166  ranks_and_local_patches; // map from rank -> (map of ids -> local
167  // indexes). The second map is for when
168  // local indexes are iterated over, they
169  // are sorted by their cooresponding id
170  for (const PatchInfo<D>& pinfo : this->coarser_domain.getPatchInfoVector()) {
171  for (int child_rank : pinfo.child_ranks) {
172  if (child_rank != -1 && child_rank != rank)
173  ranks_and_local_patches[child_rank][pinfo.id] = pinfo.local_index;
174  }
175  }
176 
177  rank_and_local_indexes_for_vector.reserve(ranks_and_local_patches.size());
178  for (auto pair : ranks_and_local_patches) {
179  std::vector<int> local_indexes;
180  // the map sould have sorted patches by id, the other processors will expect things in
181  // this order
182  local_indexes.reserve(pair.second.size());
183  for (auto id_local_index_pair : pair.second) {
184  local_indexes.push_back(id_local_index_pair.second);
185  }
186  rank_and_local_indexes_for_vector.emplace_back(pair.first, local_indexes);
187  }
188 
189  // fill in ghost vector
190  // first, assign local indexes in ghost vector for ghost patches
191  std::map<int, int> id_ghost_vector_local_index_map;
192  int index = 0;
193  for (int id : ghost_parents_ids) {
194  id_ghost_vector_local_index_map[id] = index;
195  index++;
196  }
197 
198  std::map<int, std::map<int, int>>
199  ranks_and_ghost_patches; // map from rank -> (map of ids -> ghost
200  // indexes). The second map is for when
201  // local indexes are iterated over, they
202  // are sorted by their cooresponding id
203  patches_with_ghost_parent.reserve(ghost_parents.size());
204  for (auto patch_ref_wrap : ghost_parents) {
205  const PatchInfo<D>& patch = patch_ref_wrap.get();
206  int ghost_local_index = id_ghost_vector_local_index_map[patch.parent_id];
207 
208  ranks_and_ghost_patches[patch.parent_rank][patch.parent_id] = ghost_local_index;
209 
210  patches_with_ghost_parent.emplace_back(ghost_local_index, patch);
211  }
212 
213  rank_and_local_indexes_for_ghost_vector.reserve(ranks_and_local_patches.size());
214  for (auto pair : ranks_and_ghost_patches) {
215  // the map sould have sorted patches by id, the other processors will expect things in
216  // this order
217  std::vector<int> local_indexes;
218  local_indexes.reserve(pair.second.size());
219  for (auto id_local_index_pair : pair.second) {
220  local_indexes.push_back(id_local_index_pair.second);
221  }
222  rank_and_local_indexes_for_ghost_vector.emplace_back(pair.first, local_indexes);
223  }
224  }
229  {
230  if (!(send_requests.empty() && recv_requests.empty())) {
231  // destructor is being called with unfinished communication
232  // finish communication (this will free mpi allocated stuff)
233  MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
234  MPI_Waitall(recv_requests.size(), recv_requests.data(), MPI_STATUS_IGNORE);
235  }
236  }
237 
243  Vector<D> getNewGhostVector(int num_components) const
244  {
245  return Vector<D>(
246  finer_domain.getCommunicator(), ns, num_components, num_ghost_patches, num_ghost_cells);
247  }
248 
259  const std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>&
261  {
262  return patches_with_local_parent;
263  }
264 
275  const std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>&
277  {
278  return patches_with_ghost_parent;
279  }
280 
293  void sendGhostPatchesStart(Vector<D>& vector, const Vector<D>& ghost_vector)
294  {
295  if (communicating) {
296  if (sending) {
297  throw RuntimeError("InterLevelComm has a sendGhostPatches posted that is unfinished");
298  } else {
299  throw RuntimeError("InterLevelComm has a getGhostPatches posted that is unfinished");
300  }
301  }
302 
303  // keep track of what vectors we are using
304  current_ghost_vector = &ghost_vector;
305  current_vector = &vector;
306 
307  // post receives
308  recv_buffers.reserve(rank_and_local_indexes_for_vector.size());
309  recv_requests.reserve(rank_and_local_indexes_for_vector.size());
310  for (auto rank_indexes_pair : rank_and_local_indexes_for_vector) {
311  // allocate buffer
312  recv_buffers.emplace_back(vector.getNumComponents() * patch_size *
313  rank_indexes_pair.second.size());
314 
315  // post the receive
316  int rank = rank_indexes_pair.first;
317  recv_requests.emplace_back();
318  MPI_Irecv(recv_buffers.back().data(),
319  recv_buffers.back().size(),
320  MPI_DOUBLE,
321  rank,
322  0,
323  comm.getMPIComm(),
324  &recv_requests.back());
325  }
326  send_buffers.reserve(rank_and_local_indexes_for_ghost_vector.size());
327  send_requests.reserve(rank_and_local_indexes_for_ghost_vector.size());
328  // post sends
329  for (auto rank_indexes_pair : rank_and_local_indexes_for_ghost_vector) {
330  // allocate buffer
331  send_buffers.emplace_back(vector.getNumComponents() * patch_size *
332  rank_indexes_pair.second.size());
333 
334  // fill buffer with values
335  int buffer_idx = 0;
336  for (int local_index : rank_indexes_pair.second) {
337  PatchView<const double, D> view = ghost_vector.getPatchView(local_index);
338  Loop::OverAllIndexes<D + 1>(view, [&](const std::array<int, D + 1>& coord) {
339  send_buffers.back()[buffer_idx] = view[coord];
340  buffer_idx++;
341  });
342  }
343 
344  // post the send
345  int rank = rank_indexes_pair.first;
346  send_requests.emplace_back();
347  MPI_Isend(send_buffers.back().data(),
348  send_buffers.back().size(),
349  MPI_DOUBLE,
350  rank,
351  0,
352  comm.getMPIComm(),
353  &send_requests.back());
354  }
355 
356  // set state
357  communicating = true;
358  sending = true;
359  }
372  void sendGhostPatchesFinish(Vector<D>& vector, const Vector<D>& ghost_vector)
373  {
374  if (!communicating) {
375  throw RuntimeError(
376  "InterLevelComm cannot finish sendGhostPatches since communication was not started");
377  } else if (!sending) {
378  throw RuntimeError("InterLevelComm sendGhostPatchesFinish is being called after "
379  "getGhostPatchesStart was called");
380  }
381  if (&vector != current_vector) {
382  throw RuntimeError("InterLevelComm sendGhostPatchesFinish is being called with a different "
383  "vector than when sendGhostPatchesStart was called");
384  }
385  if (&ghost_vector != current_ghost_vector) {
386  throw RuntimeError("InterLevelComm senGhostPatchesFinish is being called with a different "
387  "ghost vector than when sendGhostPatchesStart was called");
388  }
389 
390  // finish recvs
391  for (size_t i = 0; i < rank_and_local_indexes_for_vector.size(); i++) {
392  int finished_idx;
393  MPI_Waitany(recv_requests.size(), recv_requests.data(), &finished_idx, MPI_STATUS_IGNORE);
394 
395  // get local indexes for the buffer that was received
396  const std::vector<int>& local_indexes =
397  rank_and_local_indexes_for_vector.at(finished_idx).second;
398 
399  // add the values in the buffer to the vector
400  std::vector<double>& buffer = recv_buffers.at(finished_idx);
401  int buffer_idx = 0;
402  for (int local_index : local_indexes) {
403  PatchView<double, D> view = vector.getPatchView(local_index);
404  Loop::OverAllIndexes<D + 1>(view, [&](const std::array<int, D + 1>& coord) {
405  view[coord] += buffer[buffer_idx];
406  buffer_idx++;
407  });
408  }
409  }
410 
411  // wait for sends for finish
412  MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
413 
414  // clear buffers
415  recv_requests.clear();
416  recv_buffers.clear();
417  send_requests.clear();
418  send_buffers.clear();
419 
420  // set state
421  communicating = false;
422  current_ghost_vector = nullptr;
423  current_vector = nullptr;
424  }
437  void getGhostPatchesStart(const Vector<D>& vector, Vector<D>& ghost_vector)
438  {
439  if (communicating) {
440  if (sending) {
441  throw RuntimeError("InterLevelComm has a sendGhostPatches posted that is unfinished");
442  } else {
443  throw RuntimeError("InterLevelComm has a getGhostPatches posted that is unfinished");
444  }
445  }
446 
447  // keep track of what vectors we are using
448  current_ghost_vector = &ghost_vector;
449  current_vector = &vector;
450 
451  // post receives
452  recv_buffers.reserve(rank_and_local_indexes_for_ghost_vector.size());
453  recv_requests.reserve(rank_and_local_indexes_for_ghost_vector.size());
454  for (auto rank_indexes_pair : rank_and_local_indexes_for_ghost_vector) {
455  // allocate buffer
456  recv_buffers.emplace_back(vector.getNumComponents() * patch_size *
457  rank_indexes_pair.second.size());
458 
459  // post the recieve
460  int rank = rank_indexes_pair.first;
461  recv_requests.emplace_back();
462  MPI_Irecv(recv_buffers.back().data(),
463  recv_buffers.back().size(),
464  MPI_DOUBLE,
465  rank,
466  0,
467  comm.getMPIComm(),
468  &recv_requests.back());
469  }
470  send_buffers.reserve(rank_and_local_indexes_for_vector.size());
471  send_requests.reserve(rank_and_local_indexes_for_vector.size());
472  // post sends
473  for (auto rank_indexes_pair : rank_and_local_indexes_for_vector) {
474  // allocate buffer
475  send_buffers.emplace_back(vector.getNumComponents() * patch_size *
476  rank_indexes_pair.second.size());
477 
478  // fill buffer with values
479  int buffer_idx = 0;
480  for (int local_index : rank_indexes_pair.second) {
481  PatchView<const double, D> local_view = vector.getPatchView(local_index);
482  Loop::OverAllIndexes<D + 1>(local_view, [&](const std::array<int, D + 1>& coord) {
483  send_buffers.back()[buffer_idx] = local_view[coord];
484  buffer_idx++;
485  });
486  }
487 
488  // post the send
489  int rank = rank_indexes_pair.first;
490  send_requests.emplace_back();
491  MPI_Isend(send_buffers.back().data(),
492  send_buffers.back().size(),
493  MPI_DOUBLE,
494  rank,
495  0,
496  comm.getMPIComm(),
497  &send_requests.back());
498  }
499 
500  // set state
501  communicating = true;
502  sending = false;
503  }
516  void getGhostPatchesFinish(const Vector<D>& vector, Vector<D>& ghost_vector)
517  {
518  if (!communicating) {
519  throw RuntimeError(
520  "InterLevelComm cannot finish sendGhostPatches since communication was not started");
521  } else if (sending) {
522  throw RuntimeError("InterLevelComm getGhostPatchesFinish is being called after "
523  "sendGhostPatchesStart was called");
524  }
525  if (&vector != current_vector) {
526  throw RuntimeError("InterLevelComm getGhostPatchesFinish is being called with a different "
527  "vector than when getGhostPatchesStart was called");
528  }
529  if (&ghost_vector != current_ghost_vector) {
530  throw RuntimeError("InterLevelComm getGhostPatchesFinish is being called with a different "
531  "ghost vector than when getGhostPatchesStart was called");
532  }
533 
534  // finish recvs
535  for (size_t i = 0; i < rank_and_local_indexes_for_ghost_vector.size(); i++) {
536  int finished_idx;
537  MPI_Waitany(recv_requests.size(), recv_requests.data(), &finished_idx, MPI_STATUS_IGNORE);
538 
539  // get local indexes for the buffer that was recieved
540  const std::vector<int>& local_indexes =
541  rank_and_local_indexes_for_ghost_vector.at(finished_idx).second;
542 
543  // add the values in the buffer to the vector
544  std::vector<double>& buffer = recv_buffers.at(finished_idx);
545  int buffer_idx = 0;
546  for (int local_index : local_indexes) {
547  PatchView<double, D> local_view = ghost_vector.getPatchView(local_index);
548  Loop::OverAllIndexes<D + 1>(local_view, [&](const std::array<int, D + 1>& coord) {
549  local_view[coord] = buffer[buffer_idx];
550  buffer_idx++;
551  });
552  }
553  }
554 
555  // wait for sends for finish
556  MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
557 
558  // clear buffers
559  recv_requests.clear();
560  recv_buffers.clear();
561  send_requests.clear();
562  send_buffers.clear();
563 
564  // set state
565  communicating = false;
566  current_ghost_vector = nullptr;
567  current_vector = nullptr;
568  }
574  const Domain<D>& getCoarserDomain() const { return coarser_domain; }
580  const Domain<D>& getFinerDomain() const { return finer_domain; }
581 };
582 extern template class InterLevelComm<2>;
583 extern template class InterLevelComm<3>;
584 } // namespace ThunderEgg::GMG
585 #endif
RuntimeError.h
RuntimeError struct.
ThunderEgg::GMG::InterLevelComm
Facilitates communication between a finer domain and a coarser domain.
Definition: InterLevelComm.h:52
ThunderEgg::GMG::InterLevelComm::getPatchesWithGhostParent
const std::vector< std::pair< int, std::reference_wrapper< const PatchInfo< D > > > > & getPatchesWithGhostParent() const
Get the vector of finer patches that have a ghost parent.
Definition: InterLevelComm.h:276
ThunderEgg::GMG::InterLevelComm::~InterLevelComm
~InterLevelComm()
Destroy the InterLevelComm object.
Definition: InterLevelComm.h:228
Vector.h
Vector class.
ThunderEgg::Communicator::getMPIComm
MPI_Comm getMPIComm() const
Get the raw MPI_Comm object.
ThunderEgg::PatchInfo::parent_id
int parent_id
The id of the parent patch.
Definition: PatchInfo.h:83
ThunderEgg::Domain
Uses a collection of PatchInfo objects to represent the domain of the problem.
Definition: Domain.h:50
ThunderEgg::Domain::getCommunicator
const Communicator & getCommunicator() const
Get the Communicator object associated with this domain.
Definition: Domain.h:254
ThunderEgg::GMG::InterLevelComm::sendGhostPatchesFinish
void sendGhostPatchesFinish(Vector< D > &vector, const Vector< D > &ghost_vector)
Finish the communication for sending ghost values.
Definition: InterLevelComm.h:372
ThunderEgg::GMG::InterLevelComm::getGhostPatchesFinish
void getGhostPatchesFinish(const Vector< D > &vector, Vector< D > &ghost_vector)
Finish the communication for getting ghost values.
Definition: InterLevelComm.h:516
ThunderEgg::PatchView
View for accessing data of a patch. It supports variable striding.
Definition: PatchView.h:37
ThunderEgg::GMG::InterLevelComm::getFinerDomain
const Domain< D > & getFinerDomain() const
Get the finer Domain.
Definition: InterLevelComm.h:580
ThunderEgg::PatchInfo
Contains metadata for a patch.
Definition: PatchInfo.h:51
ThunderEgg::GMG::InterLevelComm::InterLevelComm
InterLevelComm(const Domain< D > &coarser_domain, const Domain< D > &finer_domain)
Create a new InterLevelComm object.
Definition: InterLevelComm.h:125
ThunderEgg::GMG::InterLevelComm::getPatchesWithLocalParent
const std::vector< std::pair< int, std::reference_wrapper< const PatchInfo< D > > > > & getPatchesWithLocalParent() const
Get the vector of finer patches that have a local parent.
Definition: InterLevelComm.h:260
ThunderEgg::GMG::InterLevelComm::getCoarserDomain
const Domain< D > & getCoarserDomain() const
Get the coarser Domain.
Definition: InterLevelComm.h:574
ThunderEgg::GMG::InterLevelComm::sendGhostPatchesStart
void sendGhostPatchesStart(Vector< D > &vector, const Vector< D > &ghost_vector)
Start the communication for sending ghost values.
Definition: InterLevelComm.h:293
ThunderEgg::Vector::getPatchView
PatchView< double, D > getPatchView(int patch_local_index)
Get the View objects for the specified patch index of View object will correspond to component index.
Definition: Vector.h:358
ThunderEgg::Communicator
wrapper arount MPI_Comm, provides proper copy operators. Classes that have a communicator are meant t...
Definition: Communicator.h:36
ThunderEgg::Vector
Vector class for use in thunderegg.
Definition: Vector.h:42
ThunderEgg::Domain::getPatchInfoVector
const std::vector< PatchInfo< D > > & getPatchInfoVector() const
Get a vector of PatchInfo pointers where index in the vector corresponds to the patch's local index.
Definition: Domain.h:259
ThunderEgg::GMG
Geometric-Multigrid classes.
Definition: Cycle.h:33
ThunderEgg::PatchInfo::parent_rank
int parent_rank
the rank that the parent patch resides on
Definition: PatchInfo.h:87
ThunderEgg::GMG::InterLevelComm::getNewGhostVector
Vector< D > getNewGhostVector(int num_components) const
Allocate a new vector for ghost patch values.
Definition: InterLevelComm.h:243
ThunderEgg::RuntimeError
ThunderEgg runtime exception.
Definition: RuntimeError.h:36
ThunderEgg::GMG::InterLevelComm::getGhostPatchesStart
void getGhostPatchesStart(const Vector< D > &vector, Vector< D > &ghost_vector)
Start the communication for getting ghost values.
Definition: InterLevelComm.h:437