GCC Code Coverage Report


Directory: ./
File: tasks/kolotukhin_a_hypercube/mpi/src/ops_mpi.cpp
Date: 2026-01-27 01:59:34
Exec Total Coverage
Lines: 95 121 78.5%
Functions: 11 14 78.6%
Branches: 45 100 45.0%

Line Branch Exec Source
1 #include "kolotukhin_a_hypercube/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <cstddef>
6 #include <numeric>
7 #include <utility>
8 #include <vector>
9
10 #include "kolotukhin_a_hypercube/common/include/common.hpp"
11
12 namespace kolotukhin_a_hypercube {
13
14
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 KolotukhinAHypercubeMPI::KolotukhinAHypercubeMPI(const InType &in) {
15 SetTypeOfTask(GetStaticTypeOfTask());
16
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 GetInput() = in;
17 14 GetOutput() = 0;
18 14 }
19
20 int KolotukhinAHypercubeMPI::GetNeighbor(int rank, int dim) {
21 int neighbor = rank ^ (1 << dim);
22 return neighbor;
23 }
24
25 int KolotukhinAHypercubeMPI::CalculateHypercubeDimension(int num_processes) {
26 if (num_processes <= 1) {
27 return 0;
28 }
29 int dimension = 0;
30 int capacity = 1;
31
2/4
✓ Branch 0 taken 14 times.
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
28 while (capacity < num_processes) {
32 14 dimension++;
33 14 capacity *= 2;
34 }
35 return dimension;
36 }
37
38 14 void KolotukhinAHypercubeMPI::PerformComputeLoad(int iterations) {
39 14 volatile int dummy = 0;
40
2/2
✓ Branch 0 taken 2100000 times.
✓ Branch 1 taken 14 times.
2100014 for (int i = 0; i < iterations; i++) {
41 2100000 dummy += (i * 3) / 7;
42 2100000 dummy ^= (i << 3);
43 2100000 dummy = dummy % 10007;
44 }
45 14 [[maybe_unused]] int final_value = dummy;
46 14 }
47
48 7 void KolotukhinAHypercubeMPI::SendData(std::vector<int> &data, int next_neighbor) {
49 7 int data_size = static_cast<int>(data.size());
50 7 MPI_Send(&data_size, 1, MPI_INT, next_neighbor, 0, MPI_COMM_WORLD);
51
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 1 times.
7 if (data_size > 0) {
52 6 MPI_Send(data.data(), data_size, MPI_INT, next_neighbor, 1, MPI_COMM_WORLD);
53 }
54 7 }
55
56 7 void KolotukhinAHypercubeMPI::RecvData(std::vector<int> &data, int prev_neighbor) {
57 7 int data_size = 0;
58 7 MPI_Recv(&data_size, 1, MPI_INT, prev_neighbor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
59 7 data.resize(static_cast<size_t>(data_size));
60
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 1 times.
7 if (data_size > 0) {
61 6 MPI_Recv(data.data(), data_size, MPI_INT, prev_neighbor, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
62 }
63 7 }
64
65 14 void KolotukhinAHypercubeMPI::CalcPositions(int my_rank, std::vector<int> &path, int &my_pos, int &next, int &prev) {
66
1/2
✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
21 for (int i = 0; std::cmp_less(i, static_cast<int>(path.size())); i++) {
67
2/2
✓ Branch 0 taken 14 times.
✓ Branch 1 taken 7 times.
21 if (my_rank == path[i]) {
68 14 my_pos = i;
69
2/2
✓ Branch 0 taken 7 times.
✓ Branch 1 taken 7 times.
14 if (i > 0) {
70 7 prev = path[i - 1];
71 }
72
2/2
✓ Branch 0 taken 7 times.
✓ Branch 1 taken 7 times.
14 if (std::cmp_less(i, static_cast<int>(path.size() - 1))) {
73 7 next = path[i + 1];
74 }
75 break;
76 }
77 }
78 14 }
79
80 14 std::vector<int> KolotukhinAHypercubeMPI::CalcPathLowToHigh(int source, int dest, int dimensions, int xor_val) {
81 14 std::vector<int> path;
82
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 int current = source;
83 path.push_back(current);
84
1/2
✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
14 for (int dim = 0; dim < dimensions; dim++) {
85 14 int mask = 1 << dim;
86
1/2
✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
14 if ((xor_val & mask) != 0) {
87
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 current = current ^ mask;
88 path.push_back(current);
89
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (current == dest) {
90 break;
91 }
92 }
93 }
94 14 return path;
95 }
96
97 std::vector<int> KolotukhinAHypercubeMPI::CalcPathHighToLow(int source, int dest, int dimensions, int xor_val) {
98 std::vector<int> path;
99 int current = source;
100 path.push_back(current);
101 for (int dim = dimensions - 1; dim >= 0; dim--) {
102 int mask = 1 << dim;
103 if ((xor_val & mask) != 0) {
104 current = current ^ mask;
105 path.push_back(current);
106 if (current == dest) {
107 break;
108 }
109 }
110 }
111 return path;
112 }
113
114 14 std::vector<int> KolotukhinAHypercubeMPI::CalcPath(int source, int dest, int dimensions) {
115 14 int xor_val = source ^ dest;
116
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (source > dest) {
117 return CalcPathHighToLow(source, dest, dimensions, xor_val);
118 }
119 14 return CalcPathLowToHigh(source, dest, dimensions, xor_val);
120 }
121
122 14 bool KolotukhinAHypercubeMPI::ValidationImpl() {
123 14 int world_size = 0;
124 14 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
125 auto &input = GetInput();
126
5/8
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 6 times.
✗ Branch 4 not taken.
✗ Branch 5 not taken.
✓ Branch 6 taken 4 times.
✓ Branch 7 taken 2 times.
14 if ((input[0] < 0) || (input[0] > world_size - 1) || ((input[1] < 0) && (input[1] != -2)) ||
127
1/2
✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
14 (input[1] > world_size - 1) || (world_size <= 0)) {
128 12 input[0] = 0;
129 12 input[1] = world_size - 1;
130 }
131 14 return true;
132 }
133
134 14 bool KolotukhinAHypercubeMPI::PreProcessingImpl() {
135 14 GetOutput() = 0;
136 14 return true;
137 }
138
139 14 bool KolotukhinAHypercubeMPI::RunImpl() {
140 14 int rank = 0;
141 14 int world_size = 0;
142 14 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
143 14 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
144
145 const auto &input = GetInput();
146 14 int source = input[0];
147 14 int dest = input[1];
148
149 14 std::vector<int> data{};
150 14 int data_size = 0;
151
152 int dimensions = 0;
153
1/2
✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
14 dimensions = CalculateHypercubeDimension(world_size);
154
2/2
✓ Branch 0 taken 7 times.
✓ Branch 1 taken 7 times.
14 if (rank == source) {
155 7 data_size = input[2];
156
1/4
✓ Branch 1 taken 7 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
7 data.resize(static_cast<size_t>(data_size), 1);
157 }
158
159
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (source == dest) {
160 MPI_Bcast(&data_size, 1, MPI_INT, dest, MPI_COMM_WORLD);
161 if (rank != dest) {
162 data.resize(static_cast<size_t>(data_size));
163 }
164 MPI_Bcast(data.data(), data_size, MPI_INT, dest, MPI_COMM_WORLD);
165 GetOutput() = std::accumulate(data.begin(), data.end(), 0);
166 return true;
167 }
168
169
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 std::vector<int> path = CalcPath(source, dest, dimensions);
170
171 14 int my_position = -1;
172 14 int prev_neighbor = -1;
173 14 int next_neighbor = -1;
174 14 CalcPositions(rank, path, my_position, next_neighbor, prev_neighbor);
175
1/2
✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
14 if (my_position != -1) {
176
2/2
✓ Branch 0 taken 7 times.
✓ Branch 1 taken 7 times.
14 if (rank == source) {
177 7 PerformComputeLoad(150000);
178
1/2
✓ Branch 1 taken 7 times.
✗ Branch 2 not taken.
7 SendData(data, next_neighbor);
179
1/2
✓ Branch 0 taken 7 times.
✗ Branch 1 not taken.
7 } else if (rank == dest) {
180
1/2
✓ Branch 1 taken 7 times.
✗ Branch 2 not taken.
7 RecvData(data, prev_neighbor);
181 7 data_size = static_cast<int>(data.size());
182 7 PerformComputeLoad(150000);
183 } else {
184 RecvData(data, prev_neighbor);
185 data_size = static_cast<int>(data.size());
186 PerformComputeLoad(150000);
187 SendData(data, next_neighbor);
188 }
189 }
190
191
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 MPI_Bcast(&data_size, 1, MPI_INT, dest, MPI_COMM_WORLD);
192
193
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (my_position == -1) {
194 data.resize(static_cast<size_t>(data_size));
195 }
196
197
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 MPI_Bcast(data.data(), data_size, MPI_INT, dest, MPI_COMM_WORLD);
198
199 14 GetOutput() = std::accumulate(data.begin(), data.end(), 0);
200
1/2
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
14 MPI_Barrier(MPI_COMM_WORLD);
201 return true;
202 }
203
204 14 bool KolotukhinAHypercubeMPI::PostProcessingImpl() {
205 14 return true;
206 }
207
208 } // namespace kolotukhin_a_hypercube
209