GCC Code Coverage Report


Directory: ./
File: tasks/zorin_d_ruler/mpi/src/ops_mpi.cpp
Date: 2026-01-27 01:59:34
Exec Total Coverage
Lines: 49 51 96.1%
Functions: 6 6 100.0%
Branches: 20 24 83.3%

Line Branch Exec Source
1 #include "zorin_d_ruler/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <algorithm>
6 #include <cstdint>
7
8 #include "zorin_d_ruler/common/include/common.hpp"
9
10 namespace zorin_d_ruler {
11
12 namespace {
13
14 inline std::int64_t DoHeavyWork(int n, int i_start, int i_end) {
15 std::int64_t acc = 0;
16
2/2
✓ Branch 0 taken 160 times.
✓ Branch 1 taken 6 times.
166 for (int i = i_start; i < i_end; ++i) {
17
2/2
✓ Branch 0 taken 12600 times.
✓ Branch 1 taken 160 times.
12760 for (int j = 0; j < n; ++j) {
18
2/2
✓ Branch 0 taken 1126000 times.
✓ Branch 1 taken 12600 times.
1138600 for (int k = 0; k < n; ++k) {
19 1126000 acc += (static_cast<std::int64_t>(i) * 31) + (static_cast<std::int64_t>(j) * 17) +
20 1126000 (static_cast<std::int64_t>(k) * 13);
21 1126000 acc ^= (acc << 1);
22 1126000 acc += (acc >> 3);
23 }
24 }
25 }
26 return acc;
27 }
28
29 6 inline std::int64_t LineAllSum(std::int64_t local, int rank, int size, MPI_Comm comm) {
30 6 std::int64_t partial = local;
31
32
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (rank > 0) {
33 3 std::int64_t left = 0;
34 3 MPI_Recv(&left, 1, MPI_INT64_T, rank - 1, 100, comm, MPI_STATUS_IGNORE);
35 3 partial += left;
36 }
37
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (rank < size - 1) {
38 3 MPI_Send(&partial, 1, MPI_INT64_T, rank + 1, 100, comm);
39 }
40
41 6 std::int64_t global = 0;
42
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (rank == size - 1) {
43 3 global = partial;
44 }
45
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (rank < size - 1) {
46 3 MPI_Recv(&global, 1, MPI_INT64_T, rank + 1, 101, comm, MPI_STATUS_IGNORE);
47 }
48
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
6 if (rank > 0) {
49 3 MPI_Send(&global, 1, MPI_INT64_T, rank - 1, 101, comm);
50 }
51
52 6 return global;
53 }
54
55 } // namespace
56
57 6 ZorinDRulerMPI::ZorinDRulerMPI(const InType &in) {
58 SetTypeOfTask(GetStaticTypeOfTask());
59 6 GetInput() = in;
60 GetOutput() = 0;
61 6 }
62
63 6 bool ZorinDRulerMPI::ValidationImpl() {
64 6 return GetInput() > 0;
65 }
66
67 6 bool ZorinDRulerMPI::PreProcessingImpl() {
68 6 GetOutput() = 0;
69 6 return true;
70 }
71
72 6 bool ZorinDRulerMPI::RunImpl() {
73 6 int rank = 0;
74 6 int size = 0;
75 6 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
76 6 MPI_Comm_size(MPI_COMM_WORLD, &size);
77
78 6 const int n = GetInput();
79
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (n <= 0) {
80 return false;
81 }
82
83 6 const int base = n / size;
84 6 const int rem = n % size;
85
86
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 const int i_start = (rank * base) + std::min(rank, rem);
87
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
12 const int i_end = i_start + base + (rank < rem ? 1 : 0);
88
89 const std::int64_t local_work = DoHeavyWork(n, i_start, i_end);
90
91 6 const std::int64_t global_work = LineAllSum(local_work, rank, size, MPI_COMM_WORLD);
92
93
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 if (global_work == -1) {
94 GetOutput() = -1;
95 return false;
96 }
97
98 6 GetOutput() = n;
99 6 return true;
100 }
101
102 6 bool ZorinDRulerMPI::PostProcessingImpl() {
103 6 GetOutput() = GetInput();
104 6 return true;
105 }
106
107 } // namespace zorin_d_ruler
108