| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | #include "yakimov_i_linear_virtual_topology/mpi/include/ops_mpi.hpp" | ||
| 2 | |||
| 3 | #include <mpi.h> | ||
| 4 | |||
| 5 | #include <cstddef> | ||
| 6 | #include <filesystem> | ||
| 7 | #include <fstream> | ||
| 8 | #include <iostream> | ||
| 9 | #include <string> | ||
| 10 | #include <vector> | ||
| 11 | |||
| 12 | #include "yakimov_i_linear_virtual_topology/common/include/common.hpp" | ||
| 13 | |||
| 14 | namespace yakimov_i_linear_virtual_topology { | ||
| 15 | |||
| 16 | namespace { | ||
| 17 | constexpr int kMaxProcesses = 20; | ||
| 18 | |||
| 19 | bool IsValidProcess(int process_id) { | ||
| 20 | bool result = false; | ||
| 21 | ✗ | result = (process_id >= 0) && (process_id < kMaxProcesses); | |
| 22 | return result; | ||
| 23 | } | ||
| 24 | |||
| 25 | void ProcessTriplet(int sender, int receiver, int data_value, int rank, int &local_sum) { | ||
| 26 | ✗ | if (IsValidProcess(sender) && IsValidProcess(receiver)) { | |
| 27 | ✗ | if (rank == receiver) { | |
| 28 | ✗ | local_sum += data_value; | |
| 29 | } | ||
| 30 | } | ||
| 31 | } | ||
| 32 | } // namespace | ||
| 33 | |||
| 34 | ✗ | YakimovILinearVirtualTopologyMPI::YakimovILinearVirtualTopologyMPI(const InType &in) { | |
| 35 | SetTypeOfTask(GetStaticTypeOfTask()); | ||
| 36 | ✗ | GetInput() = in; | |
| 37 | GetOutput() = 0; | ||
| 38 | ✗ | std::filesystem::path base_path = std::filesystem::current_path(); | |
| 39 | ✗ | while (base_path.filename() != "ppc-2025-processes-engineers") { | |
| 40 | ✗ | base_path = base_path.parent_path(); | |
| 41 | } | ||
| 42 | data_filename_ = | ||
| 43 | ✗ | base_path.string() + "/tasks/yakimov_i_linear_virtual_topology/data/" + std::to_string(GetInput()) + ".txt"; | |
| 44 | ✗ | } | |
| 45 | |||
| 46 | ✗ | bool YakimovILinearVirtualTopologyMPI::ValidationImpl() { | |
| 47 | ✗ | int rank = 0; | |
| 48 | ✗ | MPI_Comm_rank(MPI_COMM_WORLD, &rank); | |
| 49 | ✗ | if (rank == 0) { | |
| 50 | bool result = false; | ||
| 51 | ✗ | result = (GetInput() > 0); | |
| 52 | ✗ | return result; | |
| 53 | } | ||
| 54 | return true; | ||
| 55 | } | ||
| 56 | |||
| 57 | ✗ | bool YakimovILinearVirtualTopologyMPI::PreProcessingImpl() { | |
| 58 | ✗ | int rank = 0; | |
| 59 | ✗ | MPI_Comm_rank(MPI_COMM_WORLD, &rank); | |
| 60 | |||
| 61 | ✗ | if (rank == 0) { | |
| 62 | ✗ | ReadDataFromFile(data_filename_); | |
| 63 | } | ||
| 64 | |||
| 65 | ✗ | local_sum_ = 0; | |
| 66 | ✗ | total_sum_ = 0; | |
| 67 | ✗ | MPI_Barrier(MPI_COMM_WORLD); | |
| 68 | ✗ | return true; | |
| 69 | } | ||
| 70 | |||
| 71 | ✗ | void YakimovILinearVirtualTopologyMPI::ReadDataFromFile(const std::string &filename) { | |
| 72 | ✗ | std::ifstream file(filename); | |
| 73 | |||
| 74 | ✗ | int value = 0; | |
| 75 | data_.clear(); | ||
| 76 | |||
| 77 | ✗ | while (file >> value) { | |
| 78 | ✗ | data_.push_back(value); | |
| 79 | } | ||
| 80 | |||
| 81 | ✗ | file.close(); | |
| 82 | ✗ | } | |
| 83 | |||
| 84 | ✗ | void YakimovILinearVirtualTopologyMPI::CreateLinearTopology(MPI_Comm &linear_comm) { | |
| 85 | ✗ | MPI_Comm_dup(MPI_COMM_WORLD, &linear_comm); | |
| 86 | ✗ | } | |
| 87 | |||
| 88 | ✗ | void YakimovILinearVirtualTopologyMPI::ProcessDataInTopology(int rank, MPI_Comm &linear_comm) { | |
| 89 | ✗ | int rank_in_linear = 0; | |
| 90 | ✗ | int size_in_linear = 0; | |
| 91 | ✗ | MPI_Comm_rank(linear_comm, &rank_in_linear); | |
| 92 | ✗ | MPI_Comm_size(linear_comm, &size_in_linear); | |
| 93 | |||
| 94 | ✗ | for (size_t i = 0; i + 2 < data_.size(); i += 3) { | |
| 95 | ✗ | int sender = data_[i]; | |
| 96 | ✗ | int receiver = data_[i + 1]; | |
| 97 | ✗ | int data_value = data_[i + 2]; | |
| 98 | |||
| 99 | ProcessTriplet(sender, receiver, data_value, rank, local_sum_); | ||
| 100 | } | ||
| 101 | |||
| 102 | ✗ | MPI_Barrier(linear_comm); | |
| 103 | ✗ | } | |
| 104 | |||
| 105 | ✗ | void YakimovILinearVirtualTopologyMPI::ExchangeDataInTopology(MPI_Comm &linear_comm) { | |
| 106 | ✗ | int all_sum = 0; | |
| 107 | ✗ | MPI_Allreduce(&local_sum_, &all_sum, 1, MPI_INT, MPI_SUM, linear_comm); | |
| 108 | ✗ | total_sum_ = all_sum; | |
| 109 | |||
| 110 | ✗ | MPI_Comm_free(&linear_comm); | |
| 111 | ✗ | } | |
| 112 | |||
| 113 | ✗ | bool YakimovILinearVirtualTopologyMPI::RunImpl() { | |
| 114 | ✗ | int rank = 0; | |
| 115 | ✗ | int size = 0; | |
| 116 | ✗ | MPI_Comm_rank(MPI_COMM_WORLD, &rank); | |
| 117 | ✗ | MPI_Comm_size(MPI_COMM_WORLD, &size); | |
| 118 | |||
| 119 | ✗ | int data_size = 0; | |
| 120 | ✗ | if (rank == 0) { | |
| 121 | ✗ | data_size = static_cast<int>(data_.size()); | |
| 122 | } | ||
| 123 | ✗ | MPI_Bcast(&data_size, 1, MPI_INT, 0, MPI_COMM_WORLD); | |
| 124 | |||
| 125 | ✗ | if (rank != 0) { | |
| 126 | ✗ | data_.resize(static_cast<size_t>(data_size)); | |
| 127 | } | ||
| 128 | |||
| 129 | ✗ | MPI_Bcast(data_.data(), data_size, MPI_INT, 0, MPI_COMM_WORLD); | |
| 130 | |||
| 131 | ✗ | MPI_Comm linear_comm = MPI_COMM_NULL; | |
| 132 | CreateLinearTopology(linear_comm); | ||
| 133 | ✗ | ProcessDataInTopology(rank, linear_comm); | |
| 134 | ✗ | ExchangeDataInTopology(linear_comm); | |
| 135 | |||
| 136 | ✗ | return true; | |
| 137 | } | ||
| 138 | |||
| 139 | ✗ | bool YakimovILinearVirtualTopologyMPI::PostProcessingImpl() { | |
| 140 | ✗ | int rank = 0; | |
| 141 | ✗ | MPI_Comm_rank(MPI_COMM_WORLD, &rank); | |
| 142 | |||
| 143 | ✗ | if (rank == 0) { | |
| 144 | ✗ | GetOutput() = total_sum_; | |
| 145 | } | ||
| 146 | |||
| 147 | ✗ | OutType final_result = GetOutput(); | |
| 148 | ✗ | MPI_Bcast(&final_result, 1, MPI_INT, 0, MPI_COMM_WORLD); | |
| 149 | ✗ | GetOutput() = final_result; | |
| 150 | |||
| 151 | ✗ | return true; | |
| 152 | } | ||
| 153 | |||
| 154 | } // namespace yakimov_i_linear_virtual_topology | ||
| 155 |