/home/runner/work/HiCR/HiCR/include/hicr/backends/mpi/communicationManager.hpp Source File

HiCR: /home/runner/work/HiCR/HiCR/include/hicr/backends/mpi/communicationManager.hpp Source File
HiCR
communicationManager.hpp
Go to the documentation of this file.
1/*
2 * Copyright 2025 Huawei Technologies Co., Ltd.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
24#pragma once
25
26#include <mpi.h>
27#include <set>
28#include <hicr/core/definitions.hpp>
31#include "localMemorySlot.hpp"
32#include "globalMemorySlot.hpp"
33
34namespace HiCR::backend::mpi
35{
36
43{
44 public:
45
52 CommunicationManager(MPI_Comm comm = MPI_COMM_WORLD)
53 : HiCR::CommunicationManager(),
54 _comm(comm)
55 {
56 MPI_Comm_size(_comm, &_size);
57 MPI_Comm_rank(_comm, &_rank);
58 }
59
60 ~CommunicationManager() override = default;
61
66 [[nodiscard]] const MPI_Comm getComm() const { return _comm; }
67
72 [[nodiscard]] const int getSize() const { return _size; }
73
78 [[nodiscard]] const int getRank() const { return _rank; }
79
80 private:
81
85 const MPI_Comm _comm;
86
90 int _size{};
91
95 int _rank{};
96
102 HiCR::CommunicationManager::globalMemorySlotTagKeyMap_t _deregisteredGlobalMemorySlotsTagKeyMap{};
103
104 __INLINE__ void lockMPIWindow(int rank, MPI_Win *window, int MPILockType, int MPIAssert)
105 {
106 // Locking MPI window to ensure the messages arrives before returning
107 int mpiStatus = 0;
108 do {
109 lock();
110 mpiStatus = MPI_Win_lock(MPILockType, rank, MPIAssert, *window) != MPI_SUCCESS;
111 unlock();
112 }
113 while (mpiStatus != MPI_SUCCESS);
114 }
115
116 __INLINE__ void unlockMPIWindow(int rank, MPI_Win *window)
117 {
118 // Unlocking window after copy is completed
119 int mpiStatus = 0;
120 do {
121 lock();
122 mpiStatus = MPI_Win_unlock(rank, *window) != MPI_SUCCESS;
123 unlock();
124 }
125 while (mpiStatus != MPI_SUCCESS);
126 }
127
128 __INLINE__ void increaseWindowCounter(int rank, MPI_Win *window)
129 {
130 // This operation should be possible to do in one go with MPI_Accumulate or MPI_Fetch_and_op. However, the current implementation of openMPI deadlocks
131 // on these operations, so I rather do the whole thing manually.
132
133 // Locking MPI window to ensure the messages arrives before returning
134 lockMPIWindow(rank, window, MPI_LOCK_EXCLUSIVE, 0);
135
136 // Use atomic MPI operation to increment counter
137 const size_t one = 1;
138 size_t value = 0;
139
140 // There is no datatype in MPI for size_t (the counters), but
141 // MPI_AINT is supposed to be large enough and portable
142 lock();
143 auto status = MPI_Fetch_and_op(&one, &value, MPI_AINT, rank, 0, MPI_SUM, *window);
144 unlock();
145
146 // Checking execution status
147 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to increase remote message counter (on operation: MPI_Put) for rank %d, MPI Window pointer %p", rank, window);
148
149 // Unlocking window after copy is completed
150 unlockMPIWindow(rank, window);
151 }
152
153 __INLINE__ void memcpyImpl(const std::shared_ptr<HiCR::LocalMemorySlot> &destination,
154 const size_t dst_offset,
155 const std::shared_ptr<HiCR::LocalMemorySlot> &source,
156 const size_t src_offset,
157 const size_t size) override
158 {
159 // Getting slot pointers
160 const auto srcPtr = source->getPointer();
161 const auto dstPtr = destination->getPointer();
162
163 // Calculating actual offsets
164 const auto actualSrcPtr = (void *)(static_cast<uint8_t *>(srcPtr) + src_offset);
165 const auto actualDstPtr = (void *)(static_cast<uint8_t *>(dstPtr) + dst_offset);
166
167 // Running memcpy now
168 std::memcpy(actualDstPtr, actualSrcPtr, size);
169
170 // Increasing recv/send counters
171 increaseMessageRecvCounter(*destination);
173 }
174
175 __INLINE__ void memcpyImpl(const std::shared_ptr<HiCR::LocalMemorySlot> &destinationSlot,
176 size_t dst_offset,
177 const std::shared_ptr<HiCR::GlobalMemorySlot> &sourceSlotPtr,
178 size_t sourceOffset,
179 size_t size) override
180 {
181 // Getting up-casted pointer for the execution unit
182 auto source = dynamic_pointer_cast<mpi::GlobalMemorySlot>(sourceSlotPtr);
183
184 // Checking whether the execution unit passed is compatible with this backend
185 if (source == nullptr) HICR_THROW_LOGIC("The passed source memory slot is not supported by this backend\n");
186
187 // Getting ranks for the involved processes
188 const auto sourceRank = source->getRank();
189
190 // Check if we already acquired a lock on the memory slots
191 bool isSourceSlotLockAcquired = source->getLockAcquiredValue();
192
193 // Calculating pointer
194 auto destinationPointer = (void *)(static_cast<uint8_t *>(destinationSlot->getPointer()) + dst_offset);
195
196 // Getting data window for the involved processes
197 auto sourceDataWindow = source->getDataWindow().get();
198
199 // Getting recv message count window for the involved processes
200 auto sourceSentMessageWindow = source->getSentMessageCountWindow().get();
201
202 // Locking MPI window to ensure the messages arrives before returning. This will not exclude other processes from accessing the data (MPI_LOCK_SHARED)
203 if (isSourceSlotLockAcquired == false) lockMPIWindow(sourceRank, sourceDataWindow, MPI_LOCK_SHARED, MPI_MODE_NOCHECK);
204
205 // Executing the get operation
206 {
207 lock();
208 auto status = MPI_Get(destinationPointer, (int)size, MPI_BYTE, sourceRank, (int)sourceOffset, (int)size, MPI_BYTE, *sourceDataWindow);
209 unlock();
210 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to run MPI_Get");
211 }
212
213 // Making sure the operation finished
214 {
215 lock();
216 auto status = MPI_Win_flush(sourceRank, *sourceDataWindow);
217 unlock();
218 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to run MPI_Win_flush");
219 }
220
221 // Unlocking window, if taken, after copy is completed
222 if (isSourceSlotLockAcquired == false) unlockMPIWindow(sourceRank, sourceDataWindow);
223
224 // Increasing the remote sent message counter and local destination received message counter
225 increaseWindowCounter(sourceRank, sourceSentMessageWindow);
226 increaseMessageRecvCounter(*destinationSlot);
227 }
228
229 __INLINE__ void memcpyImpl(const std::shared_ptr<HiCR::GlobalMemorySlot> &destinationSlotPtr,
230 size_t dst_offset,
231 const std::shared_ptr<HiCR::LocalMemorySlot> &sourceSlot,
232 size_t sourceOffset,
233 size_t size) override
234 {
235 // Getting up-casted pointer for the execution unit
236 auto destination = dynamic_pointer_cast<mpi::GlobalMemorySlot>(destinationSlotPtr);
237
238 // Checking whether the execution unit passed is compatible with this backend
239 if (destination == nullptr) HICR_THROW_LOGIC("The passed destination memory slot is not supported by this backend\n");
240
241 // Getting ranks for the involved processes
242 const auto destinationRank = destination->getRank();
243
244 // Check if we already acquired a lock on the memory slots
245 bool isDestinationSlotLockAcquired = destination->getLockAcquiredValue();
246
247 // Calculating pointers
248 auto sourcePointer = (void *)(static_cast<uint8_t *>(sourceSlot->getPointer()) + sourceOffset);
249
250 // Getting data window for the involved processes
251 auto destinationDataWindow = destination->getDataWindow().get();
252
253 // Getting recv message count windows for the involved process
254 auto destinationRecvMessageWindow = destination->getRecvMessageCountWindow().get();
255
256 // Locking MPI window to ensure the messages arrives before returning. This will not exclude other processes from accessing the data (MPI_LOCK_SHARED)
257 if (isDestinationSlotLockAcquired == false) lockMPIWindow(destinationRank, destinationDataWindow, MPI_LOCK_SHARED, MPI_MODE_NOCHECK);
258
259 // Executing the put operation
260 {
261 lock();
262 auto status = MPI_Put(sourcePointer, (int)size, MPI_BYTE, destinationRank, (int)dst_offset, (int)size, MPI_BYTE, *destinationDataWindow);
263 unlock();
264 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to run data MPI_Put");
265 }
266
267 // Making sure the operation finished
268 {
269 lock();
270 auto status = MPI_Win_flush(destinationRank, *destinationDataWindow);
271 unlock();
272 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to run data MPI_Win_flush");
273 }
274
275 // Unlocking window, if taken, after copy is completed
276 if (isDestinationSlotLockAcquired == false) unlockMPIWindow(destinationRank, destinationDataWindow);
277
278 // Increasing the remote received message counter and local sent message counter
279 increaseMessageSentCounter(*sourceSlot);
280 increaseWindowCounter(destinationRank, destinationRecvMessageWindow);
281 }
282
290 __INLINE__ void queryMemorySlotUpdatesImpl(std::shared_ptr<HiCR::LocalMemorySlot> memorySlot) override {}
291
305 __INLINE__ void deregisterGlobalMemorySlotImpl(const std::shared_ptr<HiCR::GlobalMemorySlot> &memorySlot) override
306 {
307 // Getting up-casted pointer for the slot
308 auto slot = dynamic_pointer_cast<mpi::GlobalMemorySlot>(memorySlot);
309
310 // Checking whether the slot passed is compatible with this backend
311 if (slot == nullptr) HICR_THROW_LOGIC("The memory slot is not supported by this backend\n");
312
313 // Getting the slot information
314 const auto tag = slot->getGlobalTag();
315 const auto key = slot->getGlobalKey();
316
317 // Storing the deregistered slot, and it is guaranteed that the (MPI) type is correct
318 _deregisteredGlobalMemorySlotsTagKeyMap[tag][key] = slot;
319 }
320
328 __INLINE__ void fenceImpl(HiCR::GlobalMemorySlot::tag_t tag) override
329 {
330 MPI_Barrier(_comm);
331
332 // Call the slot destruction collective routine
333 destroyGlobalMemorySlotsCollectiveImpl(tag);
334 }
335
336 __INLINE__ void destroyGlobalMemorySlotsCollectiveImpl(HiCR::GlobalMemorySlot::tag_t tag)
337 {
338 // Destruction of global memory slots marked for destruction
339 // note: MPI expects int, not size_t as the parameter for allgather which we use here, so we have to work with int
340 int localDestroySlotsCount = (int)getGlobalMemorySlotsToDestroyPerTag()[tag].size();
341 std::vector<int> perProcessDestroySlotCount(_size);
342
343 // Obtaining the number of slots to destroy per process in the communicator
344 MPI_Allgather(&localDestroySlotsCount, 1, MPI_INT, perProcessDestroySlotCount.data(), 1, MPI_INT, _comm);
345
346 // Calculating respective offsets; TODO fix offset types for both this method and exchangeGlobalMemorySlotsImpl
347 std::vector<int> perProcessSlotOffsets(_size);
348 int currentOffset = 0;
349 for (int i = 0; i < _size; i++)
350 {
351 perProcessSlotOffsets[i] += currentOffset;
352 currentOffset += perProcessDestroySlotCount[i];
353 }
354
355 // Calculating number of global slots to destroy
356 int globalDestroySlotsCount = 0;
357 for (const auto count : perProcessDestroySlotCount) globalDestroySlotsCount += count;
358
359 // If there are no slots to destroy from any instance, return to avoid a second round of collectives
360 if (globalDestroySlotsCount == 0) return;
361
362 // Allocating storage for global memory slot keys
363 std::vector<HiCR::GlobalMemorySlot::globalKey_t> localDestroySlotKeys(localDestroySlotsCount);
364 std::vector<HiCR::GlobalMemorySlot::globalKey_t> globalDestroySlotKeys(globalDestroySlotsCount);
365
366 // Filling in the local keys storage
367 for (auto i = 0; i < localDestroySlotsCount; i++)
368 {
369 const auto memorySlot = getGlobalMemorySlotsToDestroyPerTag()[tag][i];
370 const auto key = memorySlot->getGlobalKey();
371 localDestroySlotKeys[i] = key;
372 }
373
374 // Exchanging global keys
375 MPI_Allgatherv(localDestroySlotKeys.data(),
376 localDestroySlotsCount,
377 MPI_UNSIGNED_LONG,
378 globalDestroySlotKeys.data(),
379 perProcessDestroySlotCount.data(),
380 perProcessSlotOffsets.data(),
381 MPI_UNSIGNED_LONG,
382 _comm);
383
384 // Deduplicating the global keys, as more than one process might want to destroy the same key
385 std::set<HiCR::GlobalMemorySlot::globalKey_t> globalDestroySlotKeysSet(globalDestroySlotKeys.begin(), globalDestroySlotKeys.end());
386
387 // Now we can iterate over the global slots to destroy one by one
388 for (auto key : globalDestroySlotKeysSet)
389 {
390 std::shared_ptr<HiCR::GlobalMemorySlot> memorySlot = nullptr;
391 // Getting the memory slot to destroy
392 // First check the standard map
393 if (getGlobalMemorySlotTagKeyMap()[tag].contains(key))
394 {
395 memorySlot = getGlobalMemorySlotTagKeyMap()[tag].at(key);
396 // Deregister because a later destroy will try and fail to destroy
397 getGlobalMemorySlotTagKeyMap()[tag].erase(key);
398 }
399 // If not found, check the deregistered map
400 else if (_deregisteredGlobalMemorySlotsTagKeyMap[tag].contains(key))
401 {
402 memorySlot = _deregisteredGlobalMemorySlotsTagKeyMap[tag].at(key);
403 _deregisteredGlobalMemorySlotsTagKeyMap[tag].erase(key);
404 }
405 else
406 HICR_THROW_FATAL("Could not find memory slot to destroy in this backend. Tag: %d, Key: %lu", tag, key);
407
408 // Destroying the memory slot collectively; there might be a case where the slot is not found, due to double calls to destroy
409 destroyGlobalMemorySlotImpl(memorySlot);
410 }
411 }
412
413 __INLINE__ void exchangeGlobalMemorySlotsImpl(HiCR::GlobalMemorySlot::tag_t tag, const std::vector<globalKeyMemorySlotPair_t> &memorySlots) override
414 {
415 // Obtaining local slots to exchange
416 int localSlotCount = (int)memorySlots.size();
417
418 // Obtaining the local slots to exchange per process in the communicator
419 std::vector<int> perProcessSlotCount(_size);
420 lock();
421 MPI_Allgather(&localSlotCount, 1, MPI_INT, perProcessSlotCount.data(), 1, MPI_INT, _comm);
422 unlock();
423
424 // Calculating respective offsets
425 std::vector<int> perProcessSlotOffsets(_size);
426 int currentOffset = 0;
427 for (int i = 0; i < _size; i++)
428 {
429 perProcessSlotOffsets[i] += currentOffset;
430 currentOffset += perProcessSlotCount[i];
431 }
432
433 // Calculating number of global slots
434 int globalSlotCount = 0;
435 for (const auto count : perProcessSlotCount) globalSlotCount += count;
436
437 // Allocating storage for local and global memory slot sizes, keys and process id
438 std::vector<size_t> localSlotSizes(localSlotCount);
439 std::vector<size_t> globalSlotSizes(globalSlotCount);
440 std::vector<HiCR::GlobalMemorySlot::globalKey_t> localSlotKeys(localSlotCount);
441 std::vector<HiCR::GlobalMemorySlot::globalKey_t> globalSlotKeys(globalSlotCount);
442 std::vector<int> localSlotProcessId(localSlotCount);
443 std::vector<int> globalSlotProcessId(globalSlotCount);
444
445 // Filling in the local size and keys storage
446 for (size_t i = 0; i < memorySlots.size(); i++)
447 {
448 const auto key = memorySlots[i].first;
449 const auto memorySlot = std::dynamic_pointer_cast<HiCR::backend::mpi::LocalMemorySlot>(memorySlots[i].second);
450 if (memorySlot.get() == nullptr) HICR_THROW_LOGIC("Trying to use MPI to promote a non-MPI local memory slot.");
451 localSlotSizes[i] = memorySlot->getSize();
452 localSlotKeys[i] = key;
453 localSlotProcessId[i] = _rank;
454 }
455
456 // Exchanging global sizes, keys and process ids
457 lock();
458 MPI_Allgatherv(
459 localSlotSizes.data(), localSlotCount, MPI_UNSIGNED_LONG, globalSlotSizes.data(), perProcessSlotCount.data(), perProcessSlotOffsets.data(), MPI_UNSIGNED_LONG, _comm);
460 MPI_Allgatherv(
461 localSlotKeys.data(), localSlotCount, MPI_UNSIGNED_LONG, globalSlotKeys.data(), perProcessSlotCount.data(), perProcessSlotOffsets.data(), MPI_UNSIGNED_LONG, _comm);
462 MPI_Allgatherv(localSlotProcessId.data(), localSlotCount, MPI_INT, globalSlotProcessId.data(), perProcessSlotCount.data(), perProcessSlotOffsets.data(), MPI_INT, _comm);
463 unlock();
464
465 // Now also creating pointer vector to remember local pointers, when required for memcpys
466 std::vector<void **> globalSlotPointers(globalSlotCount);
467 std::vector<std::shared_ptr<HiCR::LocalMemorySlot>> globalSourceSlots(globalSlotCount);
468 size_t localPointerPos = 0;
469 for (size_t i = 0; i < globalSlotPointers.size(); i++)
470 {
471 // If the rank associated with this slot is remote, don't store the pointer, otherwise store it.
472 if (globalSlotProcessId[i] != _rank)
473 {
474 globalSlotPointers[i] = nullptr;
475 globalSourceSlots[i] = nullptr;
476 }
477 else
478 {
479 const auto memorySlot = memorySlots[localPointerPos++].second;
480 globalSlotPointers[i] = &memorySlot->getPointer();
481 globalSourceSlots[i] = memorySlot;
482 }
483 }
484
485 // Now creating global slots and their MPI windows
486 for (size_t i = 0; i < globalSlotProcessId.size(); i++)
487 {
488 // Creating new memory slot object
489 auto memorySlot = std::make_shared<mpi::GlobalMemorySlot>(globalSlotProcessId[i], tag, globalSlotKeys[i], globalSourceSlots[i]);
490
491 // Allocating MPI windows
492 memorySlot->getDataWindow() = std::make_unique<MPI_Win>();
493 memorySlot->getRecvMessageCountWindow() = std::make_unique<MPI_Win>();
494 memorySlot->getSentMessageCountWindow() = std::make_unique<MPI_Win>();
495
496 // Termporary storage for the pointer returned by MPI_Win_Allocate. We will assign this a new internal storage to the local memory slot
497 void *ptr = nullptr;
498
499 // Creating MPI window for data transferring
500 lock();
501 auto status = MPI_Win_allocate(globalSlotProcessId[i] == _rank ? (int)globalSlotSizes[i] : 0, 1, MPI_INFO_NULL, _comm, &ptr, memorySlot->getDataWindow().get());
502 MPI_Win_set_errhandler(*memorySlot->getDataWindow(), MPI_ERRORS_RETURN);
503 unlock();
504
505 // Unfortunately, we need to do an effective duplucation of the original local memory slot storage
506 // since no modern MPI library supports MPI_Win_create over user-allocated storage anymore
507 if (globalSlotProcessId[i] == _rank)
508 {
509 // Copying existing data over to the new storage
510 std::memcpy(ptr, *(globalSlotPointers[i]), globalSlotSizes[i]);
511
512 // Freeing up memory
513 lock();
514 MPI_Free_mem(*(globalSlotPointers[i]));
515 unlock();
516
517 // Swapping pointers
518 *(globalSlotPointers[i]) = ptr;
519 }
520
521 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to create MPI data window on exchange global memory slots.");
522
523 // Creating MPI window for message received count transferring
524 lock();
525 status = MPI_Win_allocate(globalSlotProcessId[i] == _rank ? sizeof(size_t) : 0, 1, MPI_INFO_NULL, _comm, &ptr, memorySlot->getRecvMessageCountWindow().get());
526 MPI_Win_set_errhandler(*memorySlot->getRecvMessageCountWindow(), MPI_ERRORS_RETURN);
527 unlock();
528
529 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to create MPI received message count window on exchange global memory slots.");
530
531 // Creating MPI window for message sent count transferring
532 lock();
533 status = MPI_Win_allocate(globalSlotProcessId[i] == _rank ? sizeof(size_t) : 0, 1, MPI_INFO_NULL, _comm, &ptr, memorySlot->getSentMessageCountWindow().get());
534 MPI_Win_set_errhandler(*memorySlot->getSentMessageCountWindow(), MPI_ERRORS_RETURN);
535 unlock();
536
537 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("Failed to create MPI sent message count window on exchange global memory slots.");
538
539 // Registering global slot
540 registerGlobalMemorySlot(memorySlot);
541 }
542 }
543
552 __INLINE__ void destroyGlobalMemorySlotImpl(std::shared_ptr<HiCR::GlobalMemorySlot> memorySlotPtr) override
553 {
554 // Getting up-casted pointer for the execution unit
555 auto memorySlot = dynamic_pointer_cast<mpi::GlobalMemorySlot>(memorySlotPtr);
556
557 // Checking whether the execution unit passed is compatible with this backend
558 if (memorySlot == nullptr) HICR_THROW_LOGIC("The memory slot is not supported by this backend\n");
559
560 auto status = MPI_Win_free(memorySlot->getDataWindow().get());
561 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("On deregister global memory slot, could not free MPI data window");
562
563 status = MPI_Win_free(memorySlot->getRecvMessageCountWindow().get());
564 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("On deregister global memory slot, could not free MPI recv message count window");
565
566 status = MPI_Win_free(memorySlot->getSentMessageCountWindow().get());
567 if (status != MPI_SUCCESS) HICR_THROW_RUNTIME("On deregister global memory slot, could not free MPI sent message count window");
568 }
569
570 __INLINE__ bool acquireGlobalLockImpl(std::shared_ptr<HiCR::GlobalMemorySlot> memorySlot) override
571 {
572 // Getting up-casted pointer for the execution unit
573 auto m = dynamic_pointer_cast<mpi::GlobalMemorySlot>(memorySlot);
574
575 // Checking whether the execution unit passed is compatible with this backend
576 if (m == nullptr) HICR_THROW_LOGIC("The passed memory slot is not supported by this backend\n");
577
578 // Locking access to all relevant memory slot windows
579 lockMPIWindow(m->getRank(), m->getDataWindow().get(), MPI_LOCK_EXCLUSIVE, 0);
580
581 // Setting memory slot lock as aquired
582 m->setLockAcquiredValue(true);
583
584 // This function is assumed to always succeed
585 return true;
586 }
587
588 __INLINE__ void releaseGlobalLockImpl(std::shared_ptr<HiCR::GlobalMemorySlot> memorySlot) override
589 {
590 // Getting up-casted pointer for the execution unit
591 auto m = dynamic_pointer_cast<mpi::GlobalMemorySlot>(memorySlot);
592
593 // Checking whether the execution unit passed is compatible with this backend
594 if (m == nullptr) HICR_THROW_LOGIC("The passed memory slot is not supported by this backend\n");
595
596 // Releasing access to all relevant memory slot windows
597 unlockMPIWindow(m->getRank(), m->getDataWindow().get());
598
599 // Setting memory slot lock as released
600 m->setLockAcquiredValue(false);
601 }
602
603 std::shared_ptr<HiCR::GlobalMemorySlot> getGlobalMemorySlotImpl(HiCR::GlobalMemorySlot::tag_t tag, HiCR::GlobalMemorySlot::globalKey_t globalKey) override { return nullptr; }
604};
605
606} // namespace HiCR::backend::mpi
Definition communicationManager.hpp:54
std::map< GlobalMemorySlot::tag_t, globalKeyToMemorySlotMap_t > globalMemorySlotTagKeyMap_t
Definition communicationManager.hpp:70
__INLINE__ auto & getGlobalMemorySlotTagKeyMap()
Definition communicationManager.hpp:643
virtual void unlock()
Definition communicationManager.hpp:92
virtual void lock()
Definition communicationManager.hpp:86
__INLINE__ void registerGlobalMemorySlot(const std::shared_ptr< GlobalMemorySlot > &memorySlot)
Definition communicationManager.hpp:503
__INLINE__ void increaseMessageRecvCounter(HiCR::LocalMemorySlot &memorySlot) noexcept
Definition communicationManager.hpp:652
__INLINE__ auto & getGlobalMemorySlotsToDestroyPerTag()
Definition communicationManager.hpp:637
__INLINE__ void increaseMessageSentCounter(HiCR::LocalMemorySlot &memorySlot) noexcept
Definition communicationManager.hpp:659
uint64_t tag_t
Definition globalMemorySlot.hpp:49
uint64_t globalKey_t
Definition globalMemorySlot.hpp:44
Definition communicationManager.hpp:43
const int getRank() const
Definition communicationManager.hpp:78
const MPI_Comm getComm() const
Definition communicationManager.hpp:66
const int getSize() const
Definition communicationManager.hpp:72
CommunicationManager(MPI_Comm comm=MPI_COMM_WORLD)
Definition communicationManager.hpp:52
Provides a definition for the base backend's communication manager class.
Provides a definition for a HiCR Global Memory Slot class.
Provides a definition for a HiCR Local Memory Slot class.
#define HICR_THROW_RUNTIME(...)
Definition exceptions.hpp:74
#define HICR_THROW_LOGIC(...)
Definition exceptions.hpp:67
#define HICR_THROW_FATAL(...)
Definition exceptions.hpp:81