GCC Code Coverage Report


Directory: ./
File: tasks/romanov_a_scatter/mpi/src/ops_mpi.cpp
Date: 2026-01-10 02:40:41
Exec Total Coverage
Lines: 63 69 91.3%
Functions: 6 6 100.0%
Branches: 29 58 50.0%

Line Branch Exec Source
1 #include "romanov_a_scatter/mpi/include/ops_mpi.hpp"
2
3 #include <mpi.h>
4
5 #include <cstddef>
6 #include <cstring>
7 #include <vector>
8
9 #include "romanov_a_scatter/common/include/common.hpp"
10
11 namespace romanov_a_scatter {
12
13 10 int MyMPIScatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount,
14 MPI_Datatype recvtype, int root, MPI_Comm comm) {
15 10 int sendtype_size = 0;
16 10 MPI_Type_size(sendtype, &sendtype_size);
17
18 10 int recvtype_size = 0;
19 10 MPI_Type_size(recvtype, &recvtype_size);
20
21
1/2
✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
10 if (sendcount * sendtype_size < recvcount * recvtype_size) {
22 return MPI_ERR_ARG;
23 }
24
25 10 int rank = 0;
26 10 MPI_Comm_rank(comm, &rank);
27
28 10 int num_processes = 0;
29 10 MPI_Comm_size(comm, &num_processes);
30
31 10 int bytes_per_block = recvcount * recvtype_size;
32
33 10 int blocks_in_subtree = 0;
34 10 std::vector<char> data;
35
36
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 if (rank == root) {
37
1/2
✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
5 if (rank == 0) {
38 5 blocks_in_subtree = num_processes;
39 5 data.assign(static_cast<char *>(sendbuf),
40
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 static_cast<char *>(sendbuf) + static_cast<ptrdiff_t>(blocks_in_subtree * bytes_per_block));
41 } else {
42 int total_blocks = num_processes;
43 MPI_Send(&total_blocks, 1, MPI_INT, 0, 0, comm);
44 MPI_Send(sendbuf, total_blocks * bytes_per_block, MPI_BYTE, 0, 1, comm);
45 }
46 }
47
48
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 if (rank == 0) {
49
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 if (data.empty()) {
50 MPI_Recv(&blocks_in_subtree, 1, MPI_INT, root, 0, comm, MPI_STATUS_IGNORE);
51 data.resize(static_cast<size_t>(blocks_in_subtree) * bytes_per_block);
52 MPI_Recv(data.data(), blocks_in_subtree * bytes_per_block, MPI_BYTE, root, 1, comm, MPI_STATUS_IGNORE);
53 }
54 } else {
55 5 int parent_rank = rank & (rank - 1);
56
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 MPI_Recv(&blocks_in_subtree, 1, MPI_INT, parent_rank, 0, comm, MPI_STATUS_IGNORE);
57
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 data.resize(static_cast<size_t>(blocks_in_subtree) * bytes_per_block);
58
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 MPI_Recv(data.data(), blocks_in_subtree * bytes_per_block, MPI_BYTE, parent_rank, 1, comm, MPI_STATUS_IGNORE);
59 }
60
61 10 std::memcpy(recvbuf, data.data(), bytes_per_block);
62
63 int mask = 1;
64 10 int rank_copy = rank;
65
4/4
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 5 times.
✓ Branch 2 taken 5 times.
✓ Branch 3 taken 5 times.
15 while (((rank_copy & 1) == 0) && (mask < blocks_in_subtree)) {
66 5 mask <<= 1;
67 5 rank_copy >>= 1;
68 }
69 10 mask >>= 1;
70
71 // mask содержит такой наибольший бит, что все биты правее него (включая его) равны нулю
72
73
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 10 times.
15 while (mask > 0) {
74 5 int child_rank = rank | mask;
75
1/2
✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
5 if (child_rank < num_processes) {
76 5 int send_blocks = blocks_in_subtree - mask;
77
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 MPI_Send(&send_blocks, 1, MPI_INT, child_rank, 0, comm);
78 5 MPI_Send(data.data() + static_cast<ptrdiff_t>(mask * recvcount * recvtype_size),
79
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 send_blocks * recvcount * recvtype_size, MPI_BYTE, child_rank, 1, comm);
80 5 blocks_in_subtree = mask;
81 }
82 5 mask >>= 1;
83 }
84
85 return MPI_SUCCESS;
86 }
87
88
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 RomanovAScatterMPI::RomanovAScatterMPI(const InType &in) {
89 SetTypeOfTask(GetStaticTypeOfTask());
90 GetInput() = in;
91 10 GetOutput() = std::vector<int>{};
92 10 }
93
94 10 bool RomanovAScatterMPI::ValidationImpl() {
95 10 return true;
96 }
97
98 10 bool RomanovAScatterMPI::PreProcessingImpl() {
99 10 return true;
100 }
101
102 10 bool RomanovAScatterMPI::RunImpl() {
103 10 int rank = 0;
104 10 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
105
106 10 int num_processes = 0;
107 10 MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
108
109 // Эти данные должны быть известны каждому процессу для Scatter
110 10 int root = std::get<2>(GetInput());
111 10 int sendcount = std::get<1>(GetInput());
112
113 10 std::vector<int> sendbuf;
114
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 5 times.
10 if (rank == root) {
115
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 sendbuf = std::get<0>(GetInput());
116
1/2
✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
5 sendbuf.resize(static_cast<size_t>(num_processes) * static_cast<size_t>(sendcount));
117 }
118
119
2/6
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 10 times.
✗ Branch 5 not taken.
✗ Branch 6 not taken.
✗ Branch 7 not taken.
10 std::vector<int> recvbuf(sendcount);
120
121
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 MyMPIScatter(sendbuf.data(), sendcount, MPI_INT, recvbuf.data(), sendcount, MPI_INT, root, MPI_COMM_WORLD);
122
123
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 GetOutput() = recvbuf;
124
125 10 return true;
126 }
127
128 10 bool RomanovAScatterMPI::PostProcessingImpl() {
129 10 return true;
130 }
131
132 } // namespace romanov_a_scatter
133