GCC Code Coverage Report


Directory: ./
File: tasks/zagryadskov_m_allreduce/mpi/src/allreduce.cpp
Date: 2026-01-09 01:27:18
Exec Total Coverage
Lines: 82 104 78.8%
Functions: 7 7 100.0%
Branches: 39 100 39.0%

Line Branch Exec Source
1 #include "zagryadskov_m_allreduce/mpi/include/allreduce.hpp"
2
3 #include <mpi.h>
4
5 #include <cstddef>
6 #include <cstring>
7 #include <stdexcept>
8 #include <vector>
9
10 #include "zagryadskov_m_allreduce/common/include/common.hpp"
11 #include "zagryadskov_m_allreduce/seq/include/allreduce.hpp"
12
13 namespace zagryadskov_m_allreduce {
14
15
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 ZagryadskovMAllreduceMPI::ZagryadskovMAllreduceMPI(const InType &in) {
16 SetTypeOfTask(GetStaticTypeOfTask());
17 6 int world_rank = 0;
18 int err_code = 0;
19
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 err_code = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
20
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
21 throw std::runtime_error("MPI_Comm_rank failed");
22 }
23
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
24 GetInput() = in;
25 }
26 6 }
27
28 6 bool ZagryadskovMAllreduceMPI::ValidationImpl() {
29 bool res = false;
30 6 int world_rank = 0;
31 6 int world_size = 0;
32 int err_code = 0;
33 6 err_code = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
34
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
35 throw std::runtime_error("MPI_Comm_rank failed");
36 }
37 6 err_code = MPI_Comm_size(MPI_COMM_WORLD, &world_size);
38
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
39 throw std::runtime_error("MPI_Comm_rank failed");
40 }
41
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
42 auto &param1 = std::get<0>(GetInput());
43 3 int param2 = std::get<1>(GetInput());
44
1/2
✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
3 int param3 = std::get<2>(GetInput());
45
46
2/4
✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
3 res = (!param1.empty()) && (param3 >= 0) && (param3 <= 2) && (param2 > 0) &&
47
1/2
✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
3 (param1.size() >= static_cast<size_t>(param2) * static_cast<size_t>(world_size));
48 } else {
49 res = true;
50 }
51 6 return res;
52 }
53
54 6 bool ZagryadskovMAllreduceMPI::PreProcessingImpl() {
55 6 int world_rank = 0;
56 int err_code = 0;
57 6 int count = 0;
58 6 err_code = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
59
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
60 throw std::runtime_error("MPI_Comm_rank failed");
61 }
62
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
63 3 count = std::get<1>(GetInput());
64 }
65
66 6 err_code = MPI_Bcast(&count, 1, MPI_INT, 0, MPI_COMM_WORLD);
67
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
68 throw std::runtime_error("MPI_Bcast failed");
69 }
70 6 temp_vec_.resize(count);
71
72 err_code =
73 6 MPI_Scatter(std::get<0>(GetInput()).data(), count, MPI_INT, temp_vec_.data(), count, MPI_INT, 0, MPI_COMM_WORLD);
74
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
75 throw std::runtime_error("MPI_Scatter failed");
76 }
77
78 6 err_code = MPI_Barrier(MPI_COMM_WORLD);
79
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
80 throw std::runtime_error("MPI_Barrier failed");
81 }
82 6 return true;
83 }
84
85 6 void ZagryadskovMAllreduceMPI::ApplyOp(void *recvbuf, const void *tempbuf, int count, MPI_Datatype type, MPI_Op op,
86 MPI_Comm comm) {
87
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 if (type == MPI_INT) {
88 6 ApplyOp<int>(recvbuf, tempbuf, count, op, comm);
89 } else if (type == MPI_DOUBLE) {
90 ApplyOp<double>(recvbuf, tempbuf, count, op, comm);
91 } else if (type == MPI_FLOAT) {
92 ApplyOp<float>(recvbuf, tempbuf, count, op, comm);
93 } else {
94 MPI_Abort(comm, 1);
95 }
96 6 }
97
98 6 int ZagryadskovMAllreduceMPI::MyAllreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
99 MPI_Op op, MPI_Comm comm) {
100 6 int rank = 0;
101 6 int size = 0;
102 6 MPI_Comm_rank(comm, &rank);
103 6 MPI_Comm_size(comm, &size);
104
105 6 int type_size = 0;
106 6 MPI_Type_size(datatype, &type_size);
107 6 std::vector<unsigned char> container_buf(static_cast<size_t>(count) * static_cast<size_t>(type_size));
108 void *tempbuf = reinterpret_cast<void *>(container_buf.data());
109
110 6 memcpy(recvbuf, sendbuf, static_cast<size_t>(count) * static_cast<size_t>(type_size));
111
112 int p2 = 1;
113
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
12 while (p2 << 1 <= size) {
114 p2 <<= 1;
115 }
116
117
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (rank >= p2) {
118 int partner = rank - p2;
119
120 MPI_Send(recvbuf, count, datatype, partner, 0, comm);
121 MPI_Recv(recvbuf, count, datatype, partner, 0, comm, MPI_STATUS_IGNORE);
122
123 return MPI_SUCCESS;
124 }
125
126
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (rank + p2 < size) {
127 int partner = rank + p2;
128 MPI_Recv(tempbuf, count, datatype, partner, 0, comm, MPI_STATUS_IGNORE);
129 ApplyOp(recvbuf, tempbuf, count, datatype, op, comm);
130 }
131
132
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
12 for (int step = 0; (1 << step) < p2; step++) {
133 6 int partner = rank ^ (1 << step);
134
135 6 MPI_Request request = MPI_REQUEST_NULL;
136
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Isend(recvbuf, count, datatype, partner, 0, comm, &request);
137
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Recv(tempbuf, count, datatype, partner, 0, comm, MPI_STATUS_IGNORE);
138
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 MPI_Wait(&request, MPI_STATUS_IGNORE);
139
140
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 ApplyOp(recvbuf, tempbuf, count, datatype, op, comm);
141 }
142
143
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (rank + p2 < size) {
144 int partner = rank + p2;
145 MPI_Send(recvbuf, count, datatype, partner, 0, comm);
146 }
147
148 return MPI_SUCCESS;
149 }
150
151 6 bool ZagryadskovMAllreduceMPI::RunImpl() {
152 6 int world_size = 0;
153 6 int world_rank = 0;
154 int err_code = 0;
155 6 int iop = 0;
156 6 err_code = MPI_Comm_size(MPI_COMM_WORLD, &world_size);
157
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
158 throw std::runtime_error("MPI_Comm_size failed");
159 }
160 6 err_code = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
161
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
162 throw std::runtime_error("MPI_Comm_rank failed");
163 }
164
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
165 3 iop = std::get<2>(GetInput());
166 }
167 6 err_code = MPI_Bcast(&iop, 1, MPI_INT, 0, MPI_COMM_WORLD);
168
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
169 throw std::runtime_error("MPI_Bcast failed");
170 }
171
172 6 GetOutput().resize(temp_vec_.size());
173 6 MPI_Op op = ZagryadskovMAllreduceSEQ::GetOp(iop);
174 6 ZagryadskovMAllreduceMPI::MyAllreduce(temp_vec_.data(), GetOutput().data(), static_cast<int>(temp_vec_.size()),
175 MPI_INT, op, MPI_COMM_WORLD);
176
177 6 err_code = MPI_Barrier(MPI_COMM_WORLD);
178
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (err_code != MPI_SUCCESS) {
179 throw std::runtime_error("MPI_Barrier failed");
180 }
181 6 return true;
182 }
183
184 6 bool ZagryadskovMAllreduceMPI::PostProcessingImpl() {
185 bool result = false;
186 6 int world_rank = 0;
187 6 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
188
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (world_rank == 0) {
189 3 result = !GetOutput().empty();
190 } else {
191 result = true;
192 }
193 6 return result;
194 }
195
196 } // namespace zagryadskov_m_allreduce
197