GCC Code Coverage Report


Directory: ./
File: tasks/kulik_a_radix_sort_double_simple_merge/mpi/src/ops_mpi.cpp
Date: 2026-02-02 01:14:38
Exec Total Coverage
Lines: 0 118 0.0%
Functions: 0 10 0.0%
Branches: 0 118 0.0%

Line Branch Exec Source
1 #include "kulik_a_radix_sort_double_simple_merge/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <algorithm>
6 #include <cmath>
7 #include <cstddef>
8 #include <cstdint>
9 #include <utility>
10 #include <vector>
11
12 #include "kulik_a_radix_sort_double_simple_merge/common/include/common.hpp"
13
14 namespace kulik_a_radix_sort_double_simple_merge {
15
16 KulikARadixSortDoubleSimpleMergeMPI::KulikARadixSortDoubleSimpleMergeMPI(const InType &in) {
17 SetTypeOfTask(GetStaticTypeOfTask());
18 int proc_rank = 0;
19 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
20 if (proc_rank == 0) {
21 GetInput() = in;
22 } else {
23 GetInput() = InType{};
24 }
25 }
26
27 bool KulikARadixSortDoubleSimpleMergeMPI::ValidationImpl() {
28 int proc_rank = 0;
29 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
30 if (proc_rank == 0) {
31 return (!GetInput().empty());
32 }
33 return true;
34 }
35
36 bool KulikARadixSortDoubleSimpleMergeMPI::PreProcessingImpl() {
37 return true;
38 }
39
40 double *KulikARadixSortDoubleSimpleMergeMPI::LSDSortBytes(double *arr, double *buffer, size_t size) {
41 double *parr = arr;
42 double *pbuffer = buffer;
43 for (uint64_t byte = 0; byte < sizeof(double); ++byte) {
44 std::vector<uint64_t> count(256, 0);
45 auto *bytes = reinterpret_cast<unsigned char *>(parr);
46 for (size_t i = 0; i < size; ++i) {
47 count[bytes[(sizeof(double) * i) + byte]]++;
48 }
49 uint64_t pos = 0;
50 for (uint64_t i = 0; i < 256; ++i) {
51 uint64_t temp = count[i];
52 count[i] = pos;
53 pos += temp;
54 }
55 for (size_t i = 0; i < size; ++i) {
56 unsigned char byte_value = bytes[(sizeof(double) * i) + byte];
57 uint64_t new_pos = count[byte_value]++;
58 pbuffer[new_pos] = parr[i];
59 }
60 std::swap(parr, pbuffer);
61 }
62 return parr;
63 }
64
65 void KulikARadixSortDoubleSimpleMergeMPI::AdjustNegativeNumbers(std::vector<double> &arr, size_t size) {
66 size_t neg_start = 0;
67 while (neg_start < size && arr[neg_start] >= 0.0) {
68 ++neg_start;
69 }
70 if (neg_start < size) {
71 for (size_t i = neg_start, j = size - 1; i < j; ++i, --j) {
72 std::swap(arr[i], arr[j]);
73 }
74 std::vector<double> temp(size);
75 size_t index = 0;
76 for (size_t i = neg_start; i < size; ++i) {
77 temp[index++] = arr[i];
78 }
79 for (size_t i = 0; i < neg_start; ++i) {
80 temp[index++] = arr[i];
81 }
82 arr = std::move(temp);
83 }
84 }
85
86 void KulikARadixSortDoubleSimpleMergeMPI::LSDSortLocal(std::vector<double> &local_arr) {
87 size_t size = local_arr.size();
88 if (size <= 1) {
89 return;
90 }
91 std::vector<double> buffer(size);
92 double *sorted_ptr = LSDSortBytes(local_arr.data(), buffer.data(), size);
93 if (sorted_ptr == buffer.data()) {
94 std::ranges::copy(buffer, local_arr.begin());
95 }
96 AdjustNegativeNumbers(local_arr, size);
97 }
98
99 std::vector<double> KulikARadixSortDoubleSimpleMergeMPI::SimpleMerge(
100 const std::vector<std::vector<double>> &sorted_arrays) {
101 if (sorted_arrays.empty()) {
102 return {};
103 }
104 size_t global_size = 0;
105 for (const auto &arr : sorted_arrays) {
106 global_size += arr.size();
107 }
108 std::vector<double> result;
109 result.reserve(global_size);
110 std::vector<size_t> indices(sorted_arrays.size(), 0);
111 while (result.size() < global_size) {
112 int min_idx = -1;
113 double min_val = 0.0;
114 bool first = true;
115 for (size_t i = 0; i < sorted_arrays.size(); ++i) {
116 if (indices[i] < sorted_arrays[i].size()) {
117 double val = sorted_arrays[i][indices[i]];
118 if (first || val < min_val) {
119 min_val = val;
120 min_idx = static_cast<int>(i);
121 first = false;
122 }
123 }
124 }
125 if (min_idx != -1) {
126 result.push_back(min_val);
127 indices[min_idx]++;
128 }
129 }
130 return result;
131 }
132
133 void KulikARadixSortDoubleSimpleMergeMPI::LSDSortDouble(std::vector<double> &arr) {
134 int proc_rank = 0;
135 int proc_num = 0;
136 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
137 MPI_Comm_size(MPI_COMM_WORLD, &proc_num);
138 size_t global_size = arr.size();
139 MPI_Bcast(&global_size, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
140 size_t local_size = global_size / proc_num;
141 size_t r = global_size % proc_num;
142 if (std::cmp_less(proc_rank, r)) {
143 local_size++;
144 }
145 std::vector<double> local_arr(local_size);
146 std::vector<int> sendcounts(proc_num);
147 std::vector<int> displs(proc_num);
148 if (proc_rank == 0) {
149 int offset = 0;
150 for (int i = 0; i < proc_num; ++i) {
151 sendcounts[i] = std::cmp_less(i, r) ? static_cast<int>((global_size / proc_num) + 1)
152 : static_cast<int>(global_size / proc_num);
153 displs[i] = offset;
154 offset += sendcounts[i];
155 }
156 }
157 MPI_Scatterv(arr.data(), sendcounts.data(), displs.data(), MPI_DOUBLE, local_arr.data(), static_cast<int>(local_size),
158 MPI_DOUBLE, 0, MPI_COMM_WORLD);
159 LSDSortLocal(local_arr);
160 std::vector<int> recv_counts(proc_num);
161 int local_size_int = static_cast<int>(local_size);
162 MPI_Gather(&local_size_int, 1, MPI_INT, recv_counts.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
163 std::vector<int> recv_displs(proc_num);
164 if (proc_rank == 0) {
165 recv_displs[0] = 0;
166 for (int i = 1; i < proc_num; ++i) {
167 recv_displs[i] = recv_displs[i - 1] + recv_counts[i - 1];
168 }
169 }
170 if (proc_rank == 0) {
171 arr.resize(global_size);
172 }
173 MPI_Gatherv(local_arr.data(), local_size_int, MPI_DOUBLE, arr.data(), recv_counts.data(), recv_displs.data(),
174 MPI_DOUBLE, 0, MPI_COMM_WORLD);
175 if (proc_rank == 0) {
176 std::vector<std::vector<double>> sorted_parts;
177 for (int i = 0; i < proc_num; ++i) {
178 std::vector<double> part(arr.begin() + recv_displs[i], arr.begin() + recv_displs[i] + recv_counts[i]);
179 sorted_parts.push_back(std::move(part));
180 }
181 arr = SimpleMerge(sorted_parts);
182 }
183 }
184
185 bool KulikARadixSortDoubleSimpleMergeMPI::RunImpl() {
186 GetOutput() = GetInput();
187 LSDSortDouble(GetOutput());
188 return true;
189 }
190
191 bool KulikARadixSortDoubleSimpleMergeMPI::PostProcessingImpl() {
192 int proc_rank = 0;
193 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
194 size_t size = 0;
195 if (proc_rank == 0) {
196 size = GetOutput().size();
197 }
198 MPI_Bcast(&size, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
199 GetOutput().resize(size);
200 MPI_Bcast(GetOutput().data(), static_cast<int>(size), MPI_DOUBLE, 0, MPI_COMM_WORLD);
201 return (!GetOutput().empty());
202 }
203
204 } // namespace kulik_a_radix_sort_double_simple_merge
205