GCC Code Coverage Report


Directory: ./
File: tasks/gaivoronskiy_m_average_vector_sum/mpi/src/ops_mpi.cpp
Date: 2026-01-27 01:59:34
Exec Total Coverage
Lines: 40 40 100.0%
Functions: 5 5 100.0%
Branches: 22 32 68.8%

Line Branch Exec Source
1 #include "gaivoronskiy_m_average_vector_sum/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <cmath>
6 #include <cstddef>
7 #include <numeric>
8 #include <utility>
9 #include <vector>
10
11 #include "gaivoronskiy_m_average_vector_sum/common/include/common.hpp"
12
13 namespace gaivoronskiy_m_average_vector_sum {
14
15
1/2
✓ Branch 1 taken 12 times.
✗ Branch 2 not taken.
12 GaivoronskiyMAverageVecSumMPI::GaivoronskiyMAverageVecSumMPI(const InType &in) {
16 SetTypeOfTask(GetStaticTypeOfTask());
17
1/2
✓ Branch 1 taken 12 times.
✗ Branch 2 not taken.
12 GetInput() = in;
18 12 GetOutput() = 0.0;
19 12 }
20
21 12 bool GaivoronskiyMAverageVecSumMPI::ValidationImpl() {
22 12 return !GetInput().empty();
23 }
24
25 12 bool GaivoronskiyMAverageVecSumMPI::PreProcessingImpl() {
26 12 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank_);
27 12 MPI_Comm_size(MPI_COMM_WORLD, &world_size_);
28
29 12 total_size_ = 0;
30
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
12 if (world_rank_ == 0) {
31 6 distributed_values_ = GetInput();
32 6 total_size_ = distributed_values_.size();
33 } else {
34 distributed_values_.clear();
35 }
36
37 12 int size_to_share = static_cast<int>(total_size_);
38 12 MPI_Bcast(&size_to_share, 1, MPI_INT, 0, MPI_COMM_WORLD);
39 12 total_size_ = static_cast<std::size_t>(size_to_share);
40
41 12 local_sum_ = 0.0;
42 12 global_sum_ = 0.0;
43 12 return total_size_ > 0;
44 }
45
46 12 bool GaivoronskiyMAverageVecSumMPI::RunImpl() {
47
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 2 times.
12 if (total_size_ == 0) {
48 return false;
49 }
50
51 10 std::vector<int> send_counts(world_size_, 0);
52
1/4
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
10 std::vector<int> displs(world_size_, 0);
53
54 10 const std::size_t base_chunk = total_size_ / static_cast<std::size_t>(world_size_);
55 10 const std::size_t remainder = total_size_ % static_cast<std::size_t>(world_size_);
56
57
2/2
✓ Branch 0 taken 20 times.
✓ Branch 1 taken 10 times.
30 for (int rank = 0; rank < world_size_; rank++) {
58 20 std::size_t chunk = base_chunk + (std::cmp_less(rank, remainder) ? 1 : 0);
59
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 send_counts[rank] = static_cast<int>(chunk);
60
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 if (rank > 0) {
61 10 displs[rank] = displs[rank - 1] + send_counts[rank - 1];
62 }
63 }
64
65
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 const int recv_count = send_counts[world_rank_];
66
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 local_buffer_.resize(recv_count > 0 ? static_cast<std::size_t>(recv_count) : 0);
67
68
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 const double *send_buffer = world_rank_ == 0 ? distributed_values_.data() : nullptr;
69
2/2
✓ Branch 0 taken 9 times.
✓ Branch 1 taken 1 times.
10 double *recv_buffer = !local_buffer_.empty() ? local_buffer_.data() : nullptr;
70
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 MPI_Scatterv(send_buffer, send_counts.data(), displs.data(), MPI_DOUBLE, recv_buffer, recv_count, MPI_DOUBLE, 0,
71 MPI_COMM_WORLD);
72
73 10 local_sum_ = std::accumulate(local_buffer_.begin(), local_buffer_.end(), 0.0);
74
75
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 MPI_Allreduce(&local_sum_, &global_sum_, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
76
77
1/2
✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
10 GetOutput() = global_sum_ / static_cast<double>(total_size_);
78 return true;
79 }
80
81 12 bool GaivoronskiyMAverageVecSumMPI::PostProcessingImpl() {
82 12 return std::isfinite(GetOutput());
83 }
84
85 } // namespace gaivoronskiy_m_average_vector_sum
86