GCC Code Coverage Report


Directory: ./
File: tasks/petrov_e_allreduce/seq/src/ops_seq.cpp
Date: 2026-01-09 01:27:18
Exec Total Coverage
Lines: 0 39 0.0%
Functions: 0 5 0.0%
Branches: 0 36 0.0%

Line Branch Exec Source
1 #include "petrov_e_allreduce/seq/include/ops_seq.hpp"
2
3 #include <mpi.h>
4
5 #include <algorithm>
6 #include <cfloat>
7 #include <cmath>
8 #include <limits>
9 #include <type_traits>
10 #include <utility>
11 #include <vector>
12
13 #include "petrov_e_allreduce/common/include/common.hpp"
14
15 namespace petrov_e_allreduce {
16
17 PetrovEAllreduceMPI::PetrovEAllreduceMPI(const InType &in) {
18 SetTypeOfTask(GetStaticTypeOfTask());
19 GetInput() = in;
20 GetOutput() = {};
21 }
22
23 bool PetrovEAllreduceMPI::ValidationImpl() {
24 return (std::get<0>(GetInput()) * std::get<1>(GetInput()) == static_cast<int>(std::get<2>(GetInput()).size())) &&
25 (GetOutput().empty());
26 }
27
28 bool PetrovEAllreduceMPI::PreProcessingImpl() {
29 return (std::get<0>(GetInput()) * std::get<1>(GetInput()) == static_cast<int>(std::get<2>(GetInput()).size()));
30 }
31
32 bool PetrovEAllreduceMPI::RunImpl() {
33 if ((std::get<0>(GetInput()) * std::get<1>(GetInput()) != static_cast<int>(std::get<2>(GetInput()).size()))) {
34 return false;
35 }
36
37 auto &n = std::get<0>(GetInput());
38 auto &m = std::get<1>(GetInput());
39 auto &matrix = std::get<2>(GetInput());
40 OutType &res = GetOutput();
41 using MatrixElemType = std::remove_reference_t<decltype(matrix[0])>;
42 MPI_Datatype mpi_matrix_elem_type = GetMPIDatatype<MatrixElemType>();
43 if (mpi_matrix_elem_type == MPI_DATATYPE_NULL) {
44 return false;
45 }
46 int proc_num = 0;
47 int proc_rank = 0;
48 MPI_Comm_size(MPI_COMM_WORLD, &proc_num);
49 MPI_Comm_rank(MPI_COMM_WORLD, &proc_rank);
50 int i = 0;
51 int j = 0;
52 MatrixElemType max = NAN;
53 res.resize(m);
54 int col_num_per_proc = m / proc_num;
55 int col_num_wo_proc = m % proc_num;
56 int flag = 0;
57
58 std::vector<int> start(proc_num);
59 std::vector<int> end(proc_num);
60
61 int proc_start = 0;
62 int proc_end = 0;
63
64 if (proc_rank == 0) {
65 for (i = 0; i < proc_num; i++) {
66 if (i < col_num_wo_proc) {
67 flag = 1;
68 } else {
69 flag = 0;
70 }
71 start[i] = (i * col_num_per_proc) + std::min(i, col_num_wo_proc);
72 end[i] = start[i] + col_num_per_proc + flag;
73 }
74 }
75
76 MPI_Scatter(start.data(), 1, MPI_INT, &proc_start, 1, MPI_INT, 0, MPI_COMM_WORLD);
77 MPI_Scatter(end.data(), 1, MPI_INT, &proc_end, 1, MPI_INT, 0, MPI_COMM_WORLD);
78
79 OutType proc_res(m, std::numeric_limits<MatrixElemType>::lowest());
80
81 for (i = proc_start; std::cmp_less(i, proc_end); i++) {
82 max = matrix[static_cast<int>(i * n)];
83 for (j = 1; j < n; j++) {
84 max = std::max(matrix[(i * n) + j], max);
85 }
86 proc_res[i] = max;
87 }
88
89 MPI_Allreduce(proc_res.data(), res.data(), m, mpi_matrix_elem_type, MPI_MAX, MPI_COMM_WORLD);
90
91 return true;
92 }
93
94 bool PetrovEAllreduceMPI::PostProcessingImpl() {
95 return true;
96 }
97
98 } // namespace petrov_e_allreduce
99