GCC Code Coverage Report


Directory: ./
File: tasks/frolova_s_sum_elem_matrix/mpi/src/ops_mpi.cpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 58 63 92.1%
Functions: 7 7 100.0%
Branches: 29 46 63.0%

Line Branch Exec Source
1 #include "frolova_s_sum_elem_matrix/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <limits>
6 #include <tuple>
7 #include <vector>
8
9 #include "frolova_s_sum_elem_matrix/common/include/common.hpp"
10
11 namespace frolova_s_sum_elem_matrix {
12
13
1/2
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
20 FrolovaSSumElemMatrixMPI::FrolovaSSumElemMatrixMPI(const InType &in) {
14 SetTypeOfTask(GetStaticTypeOfTask());
15 GetInput() = in;
16 20 GetOutput() = 0.0;
17 20 }
18
19 20 bool FrolovaSSumElemMatrixMPI::ValidationImpl() {
20 const auto &in = GetInput();
21
22 const auto &my_matrix = std::get<0>(in);
23 20 int param_dim1 = std::get<1>(in);
24 20 int param_dim2 = std::get<2>(in);
25
26
2/4
✓ Branch 0 taken 20 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 20 times.
20 return (param_dim1 > 0 && param_dim2 > 0 && static_cast<int>(my_matrix.size()) == (param_dim1 * param_dim2));
27 }
28
29 20 bool FrolovaSSumElemMatrixMPI::PreProcessingImpl() {
30 20 return true;
31 }
32
33 20 bool FrolovaSSumElemMatrixMPI::RunImpl() {
34 20 int process_n = 0;
35 20 MPI_Comm_size(MPI_COMM_WORLD, &process_n);
36
37 20 int proc_rank = 0;
38 20 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
39
40 20 int total_size = 0;
41 20 std::vector<double> vect_data;
42
43
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 if (proc_rank == 0) {
44
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 vect_data = std::get<0>(GetInput());
45 10 total_size = static_cast<int>(vect_data.size());
46
47
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
10 if (process_n > total_size && total_size > 0) {
48 process_n = total_size;
49 }
50 }
51
52
1/2
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
20 MPI_Bcast(&process_n, 1, MPI_INT, 0, MPI_COMM_WORLD);
53
1/2
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
20 MPI_Bcast(&total_size, 1, MPI_INT, 0, MPI_COMM_WORLD);
54
55
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
20 if (total_size == 0) {
56 if (proc_rank == 0) {
57 GetOutput() = 0.0;
58 }
59 return true;
60 }
61
62
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 if (proc_rank == 0) {
63
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 GetOutput() = ProcessMaster(process_n, vect_data);
64
1/2
✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
10 } else if (proc_rank < process_n) {
65
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 ProcessWorker();
66 10 GetOutput() = std::numeric_limits<double>::max();
67 } else {
68 GetOutput() = std::numeric_limits<double>::max();
69 }
70
71 return true;
72 }
73
74 20 bool FrolovaSSumElemMatrixMPI::PostProcessingImpl() {
75 20 return true;
76 }
77
78 10 double FrolovaSSumElemMatrixMPI::ProcessMaster(int process_n, const std::vector<double> &vect_data) {
79 10 const int n = static_cast<int>(vect_data.size());
80 10 const int base = n / process_n;
81 10 int remain = n % process_n;
82
83 int start_id = 0;
84
85
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 for (int worker = 1; worker < process_n; worker++) {
86
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 int part_size = base + (remain > 0 ? 1 : 0);
87
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 if (remain > 0) {
88 5 remain--;
89 }
90
91 10 MPI_Send(&part_size, 1, MPI_INT, worker, 0, MPI_COMM_WORLD);
92 10 MPI_Send(vect_data.data() + start_id, part_size, MPI_DOUBLE, worker, 0, MPI_COMM_WORLD);
93
94 10 start_id += part_size;
95 }
96
97 double total_sum = 0.0;
98
2/2
✓ Branch 0 taken 2083 times.
✓ Branch 1 taken 10 times.
2093 for (int i = start_id; i < n; ++i) {
99 2083 total_sum += vect_data[i];
100 }
101
102
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 for (int worker = 1; worker < process_n; worker++) {
103 10 double worker_sum = 0.0;
104 10 MPI_Recv(&worker_sum, 1, MPI_DOUBLE, worker, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
105 10 total_sum += worker_sum;
106 }
107
108 10 return total_sum;
109 }
110
111 10 void FrolovaSSumElemMatrixMPI::ProcessWorker() {
112 10 int part_size = 0;
113 10 MPI_Recv(&part_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
114
115 10 std::vector<double> local_data(part_size);
116
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 MPI_Recv(local_data.data(), part_size, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
117
118 10 double local_sum = 0.0;
119
2/2
✓ Branch 0 taken 2088 times.
✓ Branch 1 taken 10 times.
2098 for (double val : local_data) {
120 2088 local_sum += val;
121 }
122
123
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 MPI_Send(&local_sum, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
124 10 }
125
126 } // namespace frolova_s_sum_elem_matrix
127