GCC Code Coverage Report


Directory: ./
File: tasks/fatehov_k_reshetka_tor/mpi/src/ops_mpi.cpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 96 105 91.4%
Functions: 10 10 100.0%
Branches: 50 84 59.5%

Line Branch Exec Source
1 #include "fatehov_k_reshetka_tor/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <algorithm>
6 #include <cmath>
7 #include <complex>
8 #include <cstddef>
9 #include <iostream>
10 #include <vector>
11
12 #include "fatehov_k_reshetka_tor/common/include/common.hpp"
13
14 namespace fatehov_k_reshetka_tor {
15
16
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 FatehovKReshetkaTorMPI::FatehovKReshetkaTorMPI(const InType &in) {
17 SetTypeOfTask(GetStaticTypeOfTask());
18 GetInput() = in;
19 6 GetOutput() = 0;
20 6 }
21
22 6 bool FatehovKReshetkaTorMPI::ValidationImpl() {
23 auto &data = GetInput();
24 6 size_t rows = std::get<0>(data);
25 6 size_t cols = std::get<1>(data);
26 auto &vec = std::get<2>(data);
27
28
2/4
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 6 times.
6 return (rows > 0 && rows <= kMaxRows) && (cols > 0 && cols <= kMaxCols) && (rows * cols <= kMaxMatrixSize) &&
29
2/4
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 6 times.
12 (vec.size() == rows * cols) && (!vec.empty());
30 }
31
32 6 bool FatehovKReshetkaTorMPI::PreProcessingImpl() {
33 6 return true;
34 }
35
36 namespace {
37
38 46 double ProcessValue(double val) {
39 double heavy_val = val;
40
2/2
✓ Branch 0 taken 4600 times.
✓ Branch 1 taken 46 times.
4646 for (int k = 0; k < 100; ++k) {
41
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4600 times.
4600 heavy_val = (std::sin(heavy_val) * std::cos(heavy_val)) + std::exp(std::complex<double>(0, heavy_val).real()) +
42
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4600 times.
4600 std::sqrt(std::abs(heavy_val) + 1.0);
43
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4600 times.
4600 if (std::isinf(heavy_val)) {
44 heavy_val = val;
45 }
46 }
47 46 return heavy_val;
48 }
49
50
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 double FindLocalMax(const std::vector<double> &matrix) {
51
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 if (matrix.empty()) {
52 return -1e18;
53 }
54
55 6 double local_max = -1e18;
56
2/2
✓ Branch 0 taken 46 times.
✓ Branch 1 taken 6 times.
52 for (double val : matrix) {
57 46 double processed_val = ProcessValue(val);
58 46 local_max = std::max(processed_val, local_max);
59 }
60 6 return local_max;
61 }
62
63 6 void CalculateGridDimensions(int world_size, int &grid_rows, int &grid_cols) {
64 6 grid_rows = static_cast<int>(std::sqrt(world_size));
65
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 while (world_size % grid_rows != 0) {
66 grid_rows--;
67 }
68 6 grid_cols = world_size / grid_rows;
69
70
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (grid_rows == 0) {
71 grid_rows = 1;
72 grid_cols = world_size;
73 }
74 6 }
75
76 void GetGridCoordinates(int world_rank, int grid_cols, int &row, int &col) {
77 15 row = world_rank / grid_cols;
78 15 col = world_rank % grid_cols;
79 }
80
81 int GetTorNeighborRank(int world_rank, int grid_rows, int grid_cols, int delta_row, int delta_col) {
82 int row = 0;
83 int col = 0;
84 GetGridCoordinates(world_rank, grid_cols, row, col);
85
86 6 row = (row + delta_row + grid_rows) % grid_rows;
87 6 col = (col + delta_col + grid_cols) % grid_cols;
88
89 6 return (row * grid_cols) + col;
90 }
91
92 void CalculateLocalBlockSize(int world_rank, int grid_rows, int grid_cols, size_t total_rows, size_t total_cols,
93 size_t &my_rows, size_t &my_cols, size_t &start_row, size_t &start_col) {
94 int row = 0;
95 int col = 0;
96 GetGridCoordinates(world_rank, grid_cols, row, col);
97
98 9 size_t rows_per_proc = total_rows / grid_rows;
99 9 size_t rem_rows = total_rows % grid_rows;
100
101 9 auto proc_row = static_cast<size_t>(row);
102 9 auto proc_col = static_cast<size_t>(col);
103
104
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 start_row = (proc_row * rows_per_proc) + std::min<size_t>(proc_row, rem_rows);
105
2/4
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
9 my_rows = rows_per_proc + (proc_row < rem_rows ? 1 : 0);
106
107 9 size_t cols_per_proc = total_cols / grid_cols;
108 9 size_t rem_cols = total_cols % grid_cols;
109
110
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 2 times.
6 start_col = (proc_col * cols_per_proc) + std::min<size_t>(proc_col, rem_cols);
111
3/4
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
9 my_cols = cols_per_proc + (proc_col < rem_cols ? 1 : 0);
112 }
113
114 6 void DistributeMatrixData(int world_rank, int world_size, const std::vector<double> &global_matrix, size_t total_rows,
115 size_t total_cols, int grid_rows, int grid_cols, std::vector<double> &local_matrix) {
116
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
117
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 3 times.
9 for (int dest = 0; dest < world_size; ++dest) {
118 size_t dest_rows = 0;
119 size_t dest_cols = 0;
120 size_t start_row = 0;
121 size_t start_col = 0;
122 CalculateLocalBlockSize(dest, grid_rows, grid_cols, total_rows, total_cols, dest_rows, dest_cols, start_row,
123 start_col);
124
125 6 std::vector<double> buffer(dest_rows * dest_cols);
126
2/2
✓ Branch 0 taken 22 times.
✓ Branch 1 taken 6 times.
28 for (size_t i = 0; i < dest_rows; ++i) {
127
2/2
✓ Branch 0 taken 46 times.
✓ Branch 1 taken 22 times.
68 for (size_t j = 0; j < dest_cols; ++j) {
128 46 size_t global_i = start_row + i;
129 46 size_t global_j = start_col + j;
130 46 buffer[(i * dest_cols) + j] = global_matrix[(global_i * total_cols) + global_j];
131 }
132 }
133
134
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (dest == 0) {
135
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 local_matrix = buffer;
136 } else {
137 3 int buffer_size = static_cast<int>(buffer.size());
138
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 MPI_Send(buffer.data(), buffer_size, MPI_DOUBLE, dest, 0, MPI_COMM_WORLD);
139 }
140 }
141 } else {
142 size_t my_rows = 0;
143 size_t my_cols = 0;
144 size_t start_row = 0;
145 size_t start_col = 0;
146 CalculateLocalBlockSize(world_rank, grid_rows, grid_cols, total_rows, total_cols, my_rows, my_cols, start_row,
147 start_col);
148
149 3 local_matrix.resize(my_rows * my_cols);
150 3 int local_size = static_cast<int>(local_matrix.size());
151 3 MPI_Recv(local_matrix.data(), local_size, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
152 }
153 6 }
154
155 6 void TorusAllReduce(double &local_value, int world_rank, int grid_rows, int grid_cols) {
156 6 double current_value = local_value;
157
158
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
12 for (int col_offset = 1; col_offset < grid_cols; col_offset *= 2) {
159 int left_neighbor = GetTorNeighborRank(world_rank, grid_rows, grid_cols, 0, -col_offset);
160 int right_neighbor = GetTorNeighborRank(world_rank, grid_rows, grid_cols, 0, col_offset);
161
162 6 double received_value = 0.0;
163 MPI_Status status;
164
165 6 MPI_Sendrecv(&current_value, 1, MPI_DOUBLE, right_neighbor, 0, &received_value, 1, MPI_DOUBLE, left_neighbor, 0,
166 MPI_COMM_WORLD, &status);
167
168 6 current_value = std::max(current_value, received_value);
169 }
170
171
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 for (int row_offset = 1; row_offset < grid_rows; row_offset *= 2) {
172 int up_neighbor = GetTorNeighborRank(world_rank, grid_rows, grid_cols, -row_offset, 0);
173 int down_neighbor = GetTorNeighborRank(world_rank, grid_rows, grid_cols, row_offset, 0);
174
175 double received_value = 0.0;
176 MPI_Status status;
177
178 MPI_Sendrecv(&current_value, 1, MPI_DOUBLE, down_neighbor, 0, &received_value, 1, MPI_DOUBLE, up_neighbor, 0,
179 MPI_COMM_WORLD, &status);
180
181 current_value = std::max(current_value, received_value);
182 }
183
184 6 local_value = current_value;
185 6 }
186
187 } // namespace
188
189 6 bool FatehovKReshetkaTorMPI::RunImpl() {
190 6 int world_rank = 0;
191 6 int world_size = 0;
192 6 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
193 6 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
194
195 6 size_t total_rows = 0;
196 6 size_t total_cols = 0;
197 6 std::vector<double> global_matrix;
198
199
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
200 auto &data = GetInput();
201 3 total_rows = std::get<0>(data);
202
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 total_cols = std::get<1>(data);
203
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 global_matrix = std::get<2>(data);
204 }
205
206
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Bcast(&total_rows, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);
207
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Bcast(&total_cols, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);
208
209 6 int grid_rows = 0;
210 int grid_cols = 0;
211 6 CalculateGridDimensions(world_size, grid_rows, grid_cols);
212
213
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (grid_rows * grid_cols != world_size) {
214 if (world_rank == 0) {
215 std::cerr << "Error: Cannot create grid with " << world_size << " processes" << '\n';
216 }
217 return false;
218 }
219
220 6 std::vector<double> local_matrix;
221
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 DistributeMatrixData(world_rank, world_size, global_matrix, total_rows, total_cols, grid_rows, grid_cols,
222 local_matrix);
223
224 6 double local_max = FindLocalMax(local_matrix);
225
226
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 TorusAllReduce(local_max, world_rank, grid_rows, grid_cols);
227
228 6 double global_max = local_max;
229
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Bcast(&global_max, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
230
231
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 GetOutput() = global_max;
232 return true;
233 }
234
235 6 bool FatehovKReshetkaTorMPI::PostProcessingImpl() {
236 6 return true;
237 }
238
239 } // namespace fatehov_k_reshetka_tor
240