GCC Code Coverage Report


Directory: ./
File: tasks/gutyansky_a_monte_carlo_multi_dimension/mpi/src/ops_mpi.cpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 81 82 98.8%
Functions: 8 8 100.0%
Branches: 24 34 70.6%

Line Branch Exec Source
1 #include "gutyansky_a_monte_carlo_multi_dimension/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <cstddef>
6 #include <cstdint>
7 #include <random>
8 #include <utility>
9 #include <vector>
10
11 #include "gutyansky_a_monte_carlo_multi_dimension/common/include/common.hpp"
12 #include "gutyansky_a_monte_carlo_multi_dimension/common/include/function_registry.hpp"
13
14 namespace gutyansky_a_monte_carlo_multi_dimension {
15
16 namespace {}
17
18
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 GutyanskyAMonteCarloMultiDimensionMPI::GutyanskyAMonteCarloMultiDimensionMPI(const InType &in) {
19 SetTypeOfTask(GetStaticTypeOfTask());
20
21
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 GetInput() = in;
22 16 GetOutput() = {};
23 16 }
24
25 16 bool GutyanskyAMonteCarloMultiDimensionMPI::ValidationImpl() {
26 16 int rank = -1;
27 16 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
28
29
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (rank == 0) {
30 8 return GetInput().IsValid();
31 }
32
33 return true;
34 }
35
36 16 bool GutyanskyAMonteCarloMultiDimensionMPI::PreProcessingImpl() {
37 16 return true;
38 }
39
40 16 int GutyanskyAMonteCarloMultiDimensionMPI::ComputePackedTaskSize(size_t n_dims) {
41 int size = 0;
42 16 int tmp = 0;
43
44 16 MPI_Pack_size(1, MPI_UINT64_T, MPI_COMM_WORLD, &tmp); // func_id
45 16 size += tmp;
46
47 16 MPI_Pack_size(1, MPI_UINT64_T, MPI_COMM_WORLD, &tmp); // n_dims
48 16 size += tmp;
49
50 16 MPI_Pack_size(1, MPI_UINT64_T, MPI_COMM_WORLD, &tmp); // n_points
51 16 size += tmp;
52
53 16 MPI_Pack_size(static_cast<int>(n_dims), MPI_DOUBLE, MPI_COMM_WORLD, &tmp); // lower_bounds
54 16 size += tmp;
55
56 16 MPI_Pack_size(static_cast<int>(n_dims), MPI_DOUBLE, MPI_COMM_WORLD, &tmp); // upper_bounds
57 16 size += tmp;
58
59 16 return size;
60 }
61
62 8 void GutyanskyAMonteCarloMultiDimensionMPI::PackTaskData(const IntegrationTask &task, std::vector<uint8_t> &buffer) {
63 8 int position = 0;
64
65 8 MPI_Pack(&task.func_id, 1, MPI_UINT64_T, buffer.data(), static_cast<int>(buffer.size()), &position, MPI_COMM_WORLD);
66 8 MPI_Pack(&task.n_dims, 1, MPI_UINT64_T, buffer.data(), static_cast<int>(buffer.size()), &position, MPI_COMM_WORLD);
67 8 MPI_Pack(&task.n_points, 1, MPI_UINT64_T, buffer.data(), static_cast<int>(buffer.size()), &position, MPI_COMM_WORLD);
68 8 MPI_Pack(task.lower_bounds.data(), static_cast<int>(task.n_dims), MPI_DOUBLE, buffer.data(),
69 static_cast<int>(buffer.size()), &position, MPI_COMM_WORLD);
70 8 MPI_Pack(task.upper_bounds.data(), static_cast<int>(task.n_dims), MPI_DOUBLE, buffer.data(),
71 static_cast<int>(buffer.size()), &position, MPI_COMM_WORLD);
72 8 }
73
74 8 void GutyanskyAMonteCarloMultiDimensionMPI::UnpackTaskData(const std::vector<uint8_t> &buffer, IntegrationTask &task) {
75 8 int position = 0;
76 8 MPI_Unpack(buffer.data(), static_cast<int>(buffer.size()), &position, &task.func_id, 1, MPI_UINT64_T, MPI_COMM_WORLD);
77 8 MPI_Unpack(buffer.data(), static_cast<int>(buffer.size()), &position, &task.n_dims, 1, MPI_UINT64_T, MPI_COMM_WORLD);
78 8 MPI_Unpack(buffer.data(), static_cast<int>(buffer.size()), &position, &task.n_points, 1, MPI_UINT64_T,
79 MPI_COMM_WORLD);
80 8 task.lower_bounds.resize(task.n_dims);
81 8 MPI_Unpack(buffer.data(), static_cast<int>(buffer.size()), &position, task.lower_bounds.data(),
82 8 static_cast<int>(task.n_dims), MPI_DOUBLE, MPI_COMM_WORLD);
83 8 task.upper_bounds.resize(task.n_dims);
84 8 MPI_Unpack(buffer.data(), static_cast<int>(buffer.size()), &position, task.upper_bounds.data(),
85 8 static_cast<int>(task.n_dims), MPI_DOUBLE, MPI_COMM_WORLD);
86 8 }
87
88 16 bool GutyanskyAMonteCarloMultiDimensionMPI::RunImpl() {
89 16 int world_size = 0;
90 16 int rank = 0;
91 16 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
92 16 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
93
94 16 MPI_Bcast(&GetInput().n_dims, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
95
96 16 std::vector<uint8_t> buffer(ComputePackedTaskSize(GetInput().n_dims));
97
98
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (rank == 0) {
99
1/2
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
8 PackTaskData(GetInput(), buffer);
100 }
101
102
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 MPI_Bcast(buffer.data(), static_cast<int>(buffer.size()), MPI_PACKED, 0, MPI_COMM_WORLD);
103
104
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (rank != 0) {
105
1/2
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
8 UnpackTaskData(buffer, GetInput());
106 }
107
108 16 size_t n_points = GetInput().n_points;
109 16 size_t n_dims = GetInput().n_dims;
110
111 16 size_t chunk_size = n_points / static_cast<size_t>(world_size);
112 16 size_t remainder_size = n_points % static_cast<size_t>(world_size);
113
114 size_t size = 0;
115
116
1/2
✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
16 if (std::cmp_less(rank, remainder_size)) {
117 size = chunk_size + 1;
118 } else {
119 size = chunk_size;
120 }
121
122
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 FunctionRegistry::IntegralFunction function = GetInput().GetFunction();
123
124
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 std::random_device rd;
125 16 std::mt19937 gen(rd() + static_cast<unsigned>(rank));
126 std::uniform_real_distribution<double> distr(0.0, 1.0);
127
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 std::vector<double> random_point(n_dims);
128
129 16 double sum = 0.0;
130
131
2/2
✓ Branch 0 taken 4000000 times.
✓ Branch 1 taken 16 times.
4000016 for (size_t i = 0; i < size; i++) {
132
2/2
✓ Branch 0 taken 11500000 times.
✓ Branch 1 taken 4000000 times.
15500000 for (size_t j = 0; j < n_dims; j++) {
133 11500000 double lb = GetInput().lower_bounds[j];
134 11500000 double rb = GetInput().upper_bounds[j];
135
136 11500000 random_point[j] = lb + (distr(gen) * (rb - lb));
137 }
138
139 4000000 sum += function(random_point);
140 }
141
142 16 double total_sum = 0.0;
143
1/2
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
16 MPI_Reduce(&sum, &total_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
144
145
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (rank == 0) {
146 double volume = 1.0;
147
148
2/2
✓ Branch 0 taken 23 times.
✓ Branch 1 taken 8 times.
31 for (size_t i = 0; i < n_dims; i++) {
149 23 volume *= GetInput().upper_bounds[i] - GetInput().lower_bounds[i];
150 }
151
152 8 GetOutput() = volume * (total_sum / static_cast<double>(n_points));
153 }
154
155 16 return true;
156 }
157
158 16 bool GutyanskyAMonteCarloMultiDimensionMPI::PostProcessingImpl() {
159 16 return true;
160 }
161
162 } // namespace gutyansky_a_monte_carlo_multi_dimension
163