GCC Code Coverage Report


Directory: ./
File: tasks/kutergin_a_allreduce/mpi/src/ops_mpi.cpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 42 42 100.0%
Functions: 6 6 100.0%
Branches: 20 30 66.7%

Line Branch Exec Source
1 #include "../include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <cstdint>
6 #include <cstring>
7 #include <numeric>
8 #include <vector>
9
10 #include "../../common/include/common.hpp"
11
12 namespace kutergin_a_allreduce {
13
14 namespace {
15
16 void ApplyOp(void *a, const void *b, int count, MPI_Datatype datatype, MPI_Op op) {
17
2/4
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
✗ Branch 3 not taken.
4 if (op == MPI_SUM && datatype == MPI_INT) {
18
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
8 for (int i = 0; i < count; ++i) {
19 4 reinterpret_cast<int *>(a)[i] += reinterpret_cast<const int *>(b)[i];
20 }
21 }
22 }
23
24 } // namespace
25
26
1/2
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
8 AllreduceMPI::AllreduceMPI(const InType &in) {
27 SetTypeOfTask(GetStaticTypeOfTask());
28 GetInput() = in;
29 8 GetOutput() = 0;
30 8 }
31
32 8 int Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) {
33 8 int rank = 0;
34 8 int size = 0;
35 8 MPI_Comm_rank(comm, &rank);
36 8 MPI_Comm_size(comm, &size);
37
38 8 int type_size = 0;
39 8 MPI_Type_size(datatype, &type_size);
40
41 8 std::memcpy(recvbuf, sendbuf, static_cast<size_t>(count) * type_size);
42
43
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 4 times.
12 for (int mask = 1; mask < size; mask <<= 1) {
44
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
8 if ((rank & mask) != 0) {
45 4 MPI_Send(recvbuf, count, datatype, rank - mask, 0, comm);
46 break;
47 }
48
49
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 if (rank + mask < size) {
50 4 std::vector<uint8_t> tmp(static_cast<size_t>(count) * type_size);
51
1/2
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
4 MPI_Recv(tmp.data(), count, datatype, rank + mask, 0, comm, MPI_STATUS_IGNORE);
52 ApplyOp(recvbuf, tmp.data(), count, datatype, op);
53 }
54 }
55
56
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 for (int mask = 1; mask < size; mask <<= 1) {
57
3/4
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
✓ Branch 2 taken 4 times.
✗ Branch 3 not taken.
8 if (rank < mask && rank + mask < size) {
58 4 MPI_Send(recvbuf, count, datatype, rank + mask, 0, comm);
59
2/4
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
✗ Branch 3 not taken.
4 } else if (rank >= mask && rank < 2 * mask) {
60 4 MPI_Recv(recvbuf, count, datatype, rank - mask, 0, comm, MPI_STATUS_IGNORE);
61 }
62 }
63
64 8 return MPI_SUCCESS;
65 }
66
67 8 bool AllreduceMPI::ValidationImpl() {
68 8 return true;
69 }
70
71 8 bool AllreduceMPI::PreProcessingImpl() {
72 8 return true;
73 }
74
75 8 bool AllreduceMPI::RunImpl() {
76 8 int rank = 0;
77 8 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
78
79 const auto &input_struct = GetInput();
80
81
1/2
✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
8 int local_sum = 0;
82
1/2
✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
8 if (!input_struct.elements.empty()) {
83 8 local_sum = std::accumulate(input_struct.elements.begin(), input_struct.elements.end(), 0);
84 }
85
86 8 int global_sum = 0;
87
88 8 Allreduce(&local_sum, &global_sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
89
90 8 GetOutput() = global_sum;
91
92 8 return true;
93 }
94
95 8 bool AllreduceMPI::PostProcessingImpl() {
96 8 return true;
97 }
98
99 } // namespace kutergin_a_allreduce
100