21 #ifndef THUNDEREGG_GMG_INTERLEVELCOMM_H
22 #define THUNDEREGG_GMG_INTERLEVELCOMM_H
70 std::array<int, D> ns;
78 int num_ghost_patches;
89 std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> patches_with_local_parent;
96 std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> patches_with_ghost_parent;
101 std::vector<std::pair<int, std::vector<int>>> rank_and_local_indexes_for_vector;
106 std::vector<std::pair<int, std::vector<int>>> rank_and_local_indexes_for_ghost_vector;
107 bool communicating =
false;
108 bool sending =
false;
110 const Vector<D>* current_vector =
nullptr;
111 const Vector<D>* current_ghost_vector =
nullptr;
113 std::vector<std::vector<double>> recv_buffers;
114 std::vector<MPI_Request> recv_requests;
115 std::vector<std::vector<double>> send_buffers;
116 std::vector<MPI_Request> send_requests;
126 : comm(coarser_domain.getCommunicator())
127 , coarser_domain(coarser_domain)
128 , finer_domain(finer_domain)
129 , ns(finer_domain.getNs())
130 , num_ghost_cells(finer_domain.getNumGhostCells())
132 int my_patch_size = 1;
133 for (
size_t axis = 0; axis < D; axis++) {
134 my_patch_size *= ns[axis] + 2 * num_ghost_cells;
136 patch_size = my_patch_size;
139 std::deque<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>> local_parents;
140 std::deque<std::reference_wrapper<const PatchInfo<D>>> ghost_parents;
141 std::set<int> ghost_parents_ids;
146 std::map<int, int> coarser_domain_id_to_local_index_map;
148 coarser_domain_id_to_local_index_map[pinfo.id] = pinfo.local_index;
151 if (patch.parent_rank == rank) {
152 local_parents.emplace_back(coarser_domain_id_to_local_index_map[patch.parent_id], patch);
154 ghost_parents.push_back(patch);
155 ghost_parents_ids.insert(patch.parent_id);
158 num_ghost_patches = ghost_parents_ids.size();
161 patches_with_local_parent =
162 std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>(local_parents.begin(),
163 local_parents.end());
165 std::map<int, std::map<int, int>>
166 ranks_and_local_patches;
171 for (
int child_rank : pinfo.child_ranks) {
172 if (child_rank != -1 && child_rank != rank)
173 ranks_and_local_patches[child_rank][pinfo.id] = pinfo.local_index;
177 rank_and_local_indexes_for_vector.reserve(ranks_and_local_patches.size());
178 for (
auto pair : ranks_and_local_patches) {
179 std::vector<int> local_indexes;
182 local_indexes.reserve(pair.second.size());
183 for (
auto id_local_index_pair : pair.second) {
184 local_indexes.push_back(id_local_index_pair.second);
186 rank_and_local_indexes_for_vector.emplace_back(pair.first, local_indexes);
191 std::map<int, int> id_ghost_vector_local_index_map;
193 for (
int id : ghost_parents_ids) {
194 id_ghost_vector_local_index_map[id] = index;
198 std::map<int, std::map<int, int>>
199 ranks_and_ghost_patches;
203 patches_with_ghost_parent.reserve(ghost_parents.size());
204 for (
auto patch_ref_wrap : ghost_parents) {
206 int ghost_local_index = id_ghost_vector_local_index_map[patch.
parent_id];
210 patches_with_ghost_parent.emplace_back(ghost_local_index, patch);
213 rank_and_local_indexes_for_ghost_vector.reserve(ranks_and_local_patches.size());
214 for (
auto pair : ranks_and_ghost_patches) {
217 std::vector<int> local_indexes;
218 local_indexes.reserve(pair.second.size());
219 for (
auto id_local_index_pair : pair.second) {
220 local_indexes.push_back(id_local_index_pair.second);
222 rank_and_local_indexes_for_ghost_vector.emplace_back(pair.first, local_indexes);
230 if (!(send_requests.empty() && recv_requests.empty())) {
233 MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
234 MPI_Waitall(recv_requests.size(), recv_requests.data(), MPI_STATUS_IGNORE);
246 finer_domain.
getCommunicator(), ns, num_components, num_ghost_patches, num_ghost_cells);
259 const std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>&
262 return patches_with_local_parent;
275 const std::vector<std::pair<int, std::reference_wrapper<const PatchInfo<D>>>>&
278 return patches_with_ghost_parent;
297 throw RuntimeError(
"InterLevelComm has a sendGhostPatches posted that is unfinished");
299 throw RuntimeError(
"InterLevelComm has a getGhostPatches posted that is unfinished");
304 current_ghost_vector = &ghost_vector;
305 current_vector = &vector;
308 recv_buffers.reserve(rank_and_local_indexes_for_vector.size());
309 recv_requests.reserve(rank_and_local_indexes_for_vector.size());
310 for (
auto rank_indexes_pair : rank_and_local_indexes_for_vector) {
312 recv_buffers.emplace_back(vector.getNumComponents() * patch_size *
313 rank_indexes_pair.second.size());
316 int rank = rank_indexes_pair.first;
317 recv_requests.emplace_back();
318 MPI_Irecv(recv_buffers.back().data(),
319 recv_buffers.back().size(),
324 &recv_requests.back());
326 send_buffers.reserve(rank_and_local_indexes_for_ghost_vector.size());
327 send_requests.reserve(rank_and_local_indexes_for_ghost_vector.size());
329 for (
auto rank_indexes_pair : rank_and_local_indexes_for_ghost_vector) {
331 send_buffers.emplace_back(vector.getNumComponents() * patch_size *
332 rank_indexes_pair.second.size());
336 for (
int local_index : rank_indexes_pair.second) {
338 Loop::OverAllIndexes<D + 1>(view, [&](
const std::array<int, D + 1>& coord) {
339 send_buffers.back()[buffer_idx] = view[coord];
345 int rank = rank_indexes_pair.first;
346 send_requests.emplace_back();
347 MPI_Isend(send_buffers.back().data(),
348 send_buffers.back().size(),
353 &send_requests.back());
357 communicating =
true;
374 if (!communicating) {
376 "InterLevelComm cannot finish sendGhostPatches since communication was not started");
377 }
else if (!sending) {
378 throw RuntimeError(
"InterLevelComm sendGhostPatchesFinish is being called after "
379 "getGhostPatchesStart was called");
381 if (&vector != current_vector) {
382 throw RuntimeError(
"InterLevelComm sendGhostPatchesFinish is being called with a different "
383 "vector than when sendGhostPatchesStart was called");
385 if (&ghost_vector != current_ghost_vector) {
386 throw RuntimeError(
"InterLevelComm senGhostPatchesFinish is being called with a different "
387 "ghost vector than when sendGhostPatchesStart was called");
391 for (
size_t i = 0; i < rank_and_local_indexes_for_vector.size(); i++) {
393 MPI_Waitany(recv_requests.size(), recv_requests.data(), &finished_idx, MPI_STATUS_IGNORE);
396 const std::vector<int>& local_indexes =
397 rank_and_local_indexes_for_vector.at(finished_idx).second;
400 std::vector<double>& buffer = recv_buffers.at(finished_idx);
402 for (
int local_index : local_indexes) {
404 Loop::OverAllIndexes<D + 1>(view, [&](
const std::array<int, D + 1>& coord) {
405 view[coord] += buffer[buffer_idx];
412 MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
415 recv_requests.clear();
416 recv_buffers.clear();
417 send_requests.clear();
418 send_buffers.clear();
421 communicating =
false;
422 current_ghost_vector =
nullptr;
423 current_vector =
nullptr;
441 throw RuntimeError(
"InterLevelComm has a sendGhostPatches posted that is unfinished");
443 throw RuntimeError(
"InterLevelComm has a getGhostPatches posted that is unfinished");
448 current_ghost_vector = &ghost_vector;
449 current_vector = &vector;
452 recv_buffers.reserve(rank_and_local_indexes_for_ghost_vector.size());
453 recv_requests.reserve(rank_and_local_indexes_for_ghost_vector.size());
454 for (
auto rank_indexes_pair : rank_and_local_indexes_for_ghost_vector) {
456 recv_buffers.emplace_back(vector.getNumComponents() * patch_size *
457 rank_indexes_pair.second.size());
460 int rank = rank_indexes_pair.first;
461 recv_requests.emplace_back();
462 MPI_Irecv(recv_buffers.back().data(),
463 recv_buffers.back().size(),
468 &recv_requests.back());
470 send_buffers.reserve(rank_and_local_indexes_for_vector.size());
471 send_requests.reserve(rank_and_local_indexes_for_vector.size());
473 for (
auto rank_indexes_pair : rank_and_local_indexes_for_vector) {
475 send_buffers.emplace_back(vector.getNumComponents() * patch_size *
476 rank_indexes_pair.second.size());
480 for (
int local_index : rank_indexes_pair.second) {
482 Loop::OverAllIndexes<D + 1>(local_view, [&](
const std::array<int, D + 1>& coord) {
483 send_buffers.back()[buffer_idx] = local_view[coord];
489 int rank = rank_indexes_pair.first;
490 send_requests.emplace_back();
491 MPI_Isend(send_buffers.back().data(),
492 send_buffers.back().size(),
497 &send_requests.back());
501 communicating =
true;
518 if (!communicating) {
520 "InterLevelComm cannot finish sendGhostPatches since communication was not started");
521 }
else if (sending) {
522 throw RuntimeError(
"InterLevelComm getGhostPatchesFinish is being called after "
523 "sendGhostPatchesStart was called");
525 if (&vector != current_vector) {
526 throw RuntimeError(
"InterLevelComm getGhostPatchesFinish is being called with a different "
527 "vector than when getGhostPatchesStart was called");
529 if (&ghost_vector != current_ghost_vector) {
530 throw RuntimeError(
"InterLevelComm getGhostPatchesFinish is being called with a different "
531 "ghost vector than when getGhostPatchesStart was called");
535 for (
size_t i = 0; i < rank_and_local_indexes_for_ghost_vector.size(); i++) {
537 MPI_Waitany(recv_requests.size(), recv_requests.data(), &finished_idx, MPI_STATUS_IGNORE);
540 const std::vector<int>& local_indexes =
541 rank_and_local_indexes_for_ghost_vector.at(finished_idx).second;
544 std::vector<double>& buffer = recv_buffers.at(finished_idx);
546 for (
int local_index : local_indexes) {
548 Loop::OverAllIndexes<D + 1>(local_view, [&](
const std::array<int, D + 1>& coord) {
549 local_view[coord] = buffer[buffer_idx];
556 MPI_Waitall(send_requests.size(), send_requests.data(), MPI_STATUS_IGNORE);
559 recv_requests.clear();
560 recv_buffers.clear();
561 send_requests.clear();
562 send_buffers.clear();
565 communicating =
false;
566 current_ghost_vector =
nullptr;
567 current_vector =
nullptr;
582 extern template class InterLevelComm<2>;
583 extern template class InterLevelComm<3>;