GCC Code Coverage Report


Directory: ./
File: tasks/chetverikova_e_lattice_torus/mpi/src/ops_mpi.cpp
Date: 2026-01-09 01:27:18
Exec Total Coverage
Lines: 0 89 0.0%
Functions: 0 12 0.0%
Branches: 0 78 0.0%

Line Branch Exec Source
1 #include "chetverikova_e_lattice_torus/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <algorithm>
6 #include <cmath>
7 #include <iterator>
8 #include <tuple>
9 #include <utility>
10 #include <vector>
11
12 #include "chetverikova_e_lattice_torus/common/include/common.hpp"
13
14 namespace chetverikova_e_lattice_torus {
15
16 ChetverikovaELatticeTorusMPI::ChetverikovaELatticeTorusMPI(const InType &in) {
17 SetTypeOfTask(GetStaticTypeOfTask());
18 GetInput() = in;
19 GetOutput() = std::make_tuple(std::vector<double>{}, std::vector<int>{});
20 }
21
22 bool ChetverikovaELatticeTorusMPI::ValidationImpl() {
23 return true;
24 }
25
26 bool ChetverikovaELatticeTorusMPI::PreProcessingImpl() {
27 MPI_Comm_size(MPI_COMM_WORLD, &world_size_);
28 MPI_Comm_rank(MPI_COMM_WORLD, &rank_);
29
30 DetermineGridDimensions();
31 return rows_ * cols_ == world_size_;
32 }
33
34 void ChetverikovaELatticeTorusMPI::DetermineGridDimensions() {
35 int best_rows = 1;
36 int min_diff = world_size_;
37 for (int row = 1; row * row <= world_size_; ++row) {
38 if (world_size_ % row == 0) {
39 int c = world_size_ / row;
40 int diff = std::abs(row - c);
41 if (diff < min_diff) {
42 min_diff = diff;
43 best_rows = row;
44 }
45 }
46 }
47 rows_ = best_rows;
48 cols_ = world_size_ / rows_;
49 }
50
51 int ChetverikovaELatticeTorusMPI::GetRank(int row, int col) const {
52 row = ((row % rows_) + rows_) % rows_;
53 col = ((col % cols_) + cols_) % cols_;
54 return (row * cols_) + col;
55 }
56
57 int ChetverikovaELatticeTorusMPI::GetOptimalDirection(int start, int end, int size) {
58 int forward = (end - start + size) % size;
59 int backward = (start - end + size) % size;
60 return (forward <= backward) ? 1 : -1;
61 }
62
63 int ChetverikovaELatticeTorusMPI::ComputeNextNode(int curr, int end) const {
64 if (curr == end) {
65 return -1;
66 }
67
68 int curr_row = curr / cols_;
69 int curr_col = curr % cols_;
70 int dest_row = end / cols_;
71 int dest_col = end % cols_;
72
73 if (curr_col != dest_col) {
74 int dir = GetOptimalDirection(curr_col, dest_col, cols_);
75 return GetRank(curr_row, curr_col + dir);
76 }
77
78 if (curr_row != dest_row) {
79 int dir = GetOptimalDirection(curr_row, dest_row, rows_);
80 return GetRank(curr_row + dir, curr_col);
81 }
82
83 return -1;
84 }
85
86 std::vector<int> ChetverikovaELatticeTorusMPI::ComputeFullPath(int start, int end) const {
87 std::vector<int> path;
88 path.push_back(start);
89 int curr = start;
90 while (curr != end) {
91 int next = ComputeNextNode(curr, end);
92 if (next == -1) {
93 break;
94 }
95 path.push_back(next);
96 curr = next;
97 }
98 return path;
99 }
100
101 std::vector<double> ChetverikovaELatticeTorusMPI::ReceiveData(int sender) {
102 int recv_size = 0;
103 MPI_Recv(&recv_size, 1, MPI_INT, sender, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
104
105 std::vector<double> data;
106 if (recv_size > 0) {
107 data.resize(recv_size);
108 MPI_Recv(data.data(), recv_size, MPI_DOUBLE, sender, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
109 }
110 return data;
111 }
112
113 void ChetverikovaELatticeTorusMPI::SendDataToNext(const std::vector<double> &data, int next_node) {
114 int data_size = static_cast<int>(data.size());
115 MPI_Send(&data_size, 1, MPI_INT, next_node, 0, MPI_COMM_WORLD);
116
117 if (data_size > 0) {
118 MPI_Send(data.data(), data_size, MPI_DOUBLE, next_node, 1, MPI_COMM_WORLD);
119 }
120 }
121
122 bool ChetverikovaELatticeTorusMPI::RunImpl() {
123 int start = 0;
124 int end = 0;
125
126 if (rank_ == 0) {
127 start = std::get<0>(GetInput()); // отправитель
128 end = std::get<1>(GetInput()); // получатель
129 }
130 MPI_Bcast(&start, 1, MPI_INT, 0, MPI_COMM_WORLD);
131 MPI_Bcast(&end, 1, MPI_INT, 0, MPI_COMM_WORLD);
132 bool not_correct = start >= world_size_ || end >= world_size_ || start < 0 || end < 0;
133 if (not_correct) {
134 GetOutput() = std::make_tuple(std::vector<double>{}, std::vector<int>{});
135 return true;
136 }
137
138 std::vector<int> path = ComputeFullPath(start, end);
139 auto it = std::ranges::find(path, rank_);
140 bool is_on_path = (it != path.end());
141
142 std::vector<double> recv_data;
143
144 if (rank_ == start) {
145 recv_data = std::get<2>(GetInput());
146 if (start != end) {
147 SendDataToNext(recv_data, path[1]);
148 }
149 } else if (is_on_path) {
150 int index = static_cast<int>(std::distance(path.begin(), it));
151 int prev_node = path[index - 1];
152
153 recv_data = ReceiveData(prev_node);
154
155 if (rank_ != end && (index + 1) < static_cast<int>(path.size())) {
156 int next_node = path[index + 1];
157 SendDataToNext(recv_data, next_node);
158 }
159 }
160 if (rank_ == end) {
161 GetOutput() = std::make_tuple(std::move(recv_data), std::move(path));
162 } else {
163 GetOutput() = std::make_tuple(std::vector<double>{}, std::vector<int>{});
164 }
165
166 MPI_Barrier(MPI_COMM_WORLD);
167 return true;
168 }
169
170 bool ChetverikovaELatticeTorusMPI::PostProcessingImpl() {
171 return true;
172 }
173 } // namespace chetverikova_e_lattice_torus
174