GCC Code Coverage Report


Directory: ./
File: tasks/guseva_a_hypercube/mpi/include/hypercube.hpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 36 37 97.3%
Functions: 3 3 100.0%
Branches: 13 20 65.0%

Line Branch Exec Source
1 #pragma once
2
3 #include <mpi.h>
4
5 #include <cmath>
6 #include <memory>
7 #include <stdexcept>
8 namespace guseva_a_hypercube {
9
10 class Hypercube {
11 private:
12 int rank_ = 0;
13 int size_ = 0;
14 int ndims_ = 0;
15 MPI_Comm comm_{};
16
17 16 explicit Hypercube(MPI_Comm comm) : comm_(comm) {
18 16 MPI_Comm_rank(comm_, &rank_);
19 16 MPI_Comm_size(comm_, &size_);
20 // NOLINTNEXTLINE // to supress "ndims_ should be initialized in a member initializer of the constructor"
21 16 ndims_ = floor(log2(size_));
22 16 }
23
24 public:
25 16 static std::unique_ptr<Hypercube> Create() {
26 16 int world_size = 0;
27 16 int world_rank = 0;
28
29 16 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
30 16 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
31
32 16 int ndims = floor(log2(world_size));
33 16 int real_size = static_cast<int>(pow(2, ndims));
34
35
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
16 int color = (world_rank < real_size) ? 0 : MPI_UNDEFINED;
36 16 MPI_Comm hypercube_comm{};
37 16 MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, &hypercube_comm);
38
39
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
16 if (hypercube_comm == MPI_COMM_NULL) {
40 return nullptr;
41 }
42
43
1/2
✓ Branch 2 taken 16 times.
✗ Branch 3 not taken.
16 return std::unique_ptr<Hypercube>(new Hypercube(hypercube_comm));
44 }
45
46 [[nodiscard]] int GetRank() const {
47
1/2
✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
16 return rank_;
48 }
49
50 [[nodiscard]] int GetSize() const {
51
1/2
✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
16 return size_;
52 }
53
54 template <typename _datatype>
55 16 void ReduceSum(_datatype local_value, MPI_Datatype mpi_datatype, _datatype &result_buf, int root = 0) const {
56
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
16 if (comm_ == MPI_COMM_NULL) {
57 return;
58 }
59
60 16 _datatype current_value = local_value;
61
2/2
✓ Branch 0 taken 16 times.
✓ Branch 1 taken 16 times.
32 for (int dim = 0; dim < ndims_; ++dim) {
62 16 int neighbor = rank_ ^ (1 << dim);
63
64 16 _datatype received_value = 0;
65 MPI_Status status;
66
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if ((rank_ >> dim) & 1) {
67 8 MPI_Send(&current_value, 1, mpi_datatype, neighbor, dim, comm_);
68 8 MPI_Recv(&received_value, 1, mpi_datatype, neighbor, dim, comm_, &status);
69 } else {
70 8 MPI_Recv(&received_value, 1, mpi_datatype, neighbor, dim, comm_, &status);
71 8 MPI_Send(&current_value, 1, mpi_datatype, neighbor, dim, comm_);
72 }
73
74 16 current_value += received_value;
75 }
76
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (rank_ == root) {
77 8 result_buf = current_value;
78 }
79 }
80
81 template <typename _datatype>
82 void SendRecv(int sender, int receiver, _datatype message, MPI_Datatype mpi_datatype, _datatype &dst_buff) const {
83 if (sender >= size_ || receiver >= size_) {
84 throw std::invalid_argument("Sender or receiver is out of hypercube.");
85 }
86 int current = sender;
87 _datatype current_msg = message;
88
89 while (current != receiver) {
90 int route = current ^ receiver;
91 int dim = 0;
92 while ((route & (1 << dim)) == 0) {
93 dim++;
94 }
95
96 int next = current ^ (1 << dim);
97
98 _datatype recv_msg = 0;
99 MPI_Status status;
100
101 if (rank_ == current) {
102 MPI_Send(&current_msg, 1, mpi_datatype, next, 0, comm_);
103 }
104 if (rank_ == next) {
105 MPI_Recv(&recv_msg, 1, mpi_datatype, current, 0, comm_, &status);
106 }
107
108 current = next;
109 current_msg = recv_msg;
110 }
111
112 if (rank_ == receiver) {
113 dst_buff = current_msg;
114 }
115 }
116
117 ~Hypercube() {
118
1/2
✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
16 if (comm_ != MPI_COMM_NULL) {
119 16 MPI_Comm_free(&comm_);
120 }
121 16 }
122 };
123 } // namespace guseva_a_hypercube
124