[688] | 1 | #include "mpi_routing.hpp" |
---|
[694] | 2 | #include "mpi.hpp" |
---|
[688] | 3 | #include "node.hpp" |
---|
| 4 | #include "elt.hpp" |
---|
| 5 | #include "timerRemap.hpp" |
---|
| 6 | #include <iostream> |
---|
[1053] | 7 | #ifdef _usingEP |
---|
| 8 | #include "ep_declaration.hpp" |
---|
| 9 | #endif |
---|
[688] | 10 | |
---|
| 11 | namespace sphereRemap { |
---|
| 12 | |
---|
| 13 | const int verbose = 0; |
---|
| 14 | |
---|
| 15 | CMPIRouting::CMPIRouting(MPI_Comm comm) : communicator(comm) |
---|
| 16 | { |
---|
| 17 | MPI_Comm_rank(comm, &mpiRank); |
---|
| 18 | MPI_Comm_size(comm, &mpiSize); |
---|
| 19 | } |
---|
| 20 | |
---|
| 21 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
| 22 | but message lengths are *known* to receiver */ |
---|
| 23 | template <typename T> |
---|
| 24 | void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) |
---|
| 25 | { |
---|
| 26 | vector<MPI_Request> request(ranks.size() * 2); |
---|
| 27 | vector<MPI_Status> status(ranks.size() * 2); |
---|
| 28 | |
---|
| 29 | // communicate data |
---|
| 30 | int nbRequest = 0; |
---|
| 31 | for (int i = 0; i < ranks.size(); i++) |
---|
| 32 | if (recv[i].size()) |
---|
| 33 | MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
| 34 | for (int i = 0; i < ranks.size(); i++) |
---|
| 35 | if (send[i].size()) |
---|
| 36 | MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
| 37 | MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
| 38 | } |
---|
| 39 | |
---|
| 40 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
| 41 | but message lengths are *unknown* to receiver */ |
---|
| 42 | template <typename T> |
---|
| 43 | void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) |
---|
| 44 | { |
---|
| 45 | vector<MPI_Request> request(ranks.size() * 2); |
---|
| 46 | vector<MPI_Status> status(ranks.size() * 2); |
---|
| 47 | |
---|
| 48 | // communicate sizes |
---|
| 49 | int nbRequest = 0; |
---|
| 50 | vector<int> sendSizes(ranks.size()); |
---|
| 51 | vector<int> recvSizes(ranks.size()); |
---|
| 52 | for (int i = 0; i < ranks.size(); i++) |
---|
| 53 | sendSizes[i] = send[i].size(); |
---|
| 54 | for (int i = 0; i < ranks.size(); i++) |
---|
| 55 | MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
| 56 | for (int i = 0; i < ranks.size(); i++) |
---|
| 57 | MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
| 58 | MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
| 59 | |
---|
| 60 | // allocate |
---|
| 61 | for (int i = 0; i < ranks.size(); i++) |
---|
| 62 | if (recvSizes[i]) recv[i].resize(recvSizes[i]); |
---|
| 63 | // communicate data |
---|
| 64 | alltoalls_known(send, recv, ranks, communicator); |
---|
| 65 | } |
---|
| 66 | |
---|
| 67 | void setElementsSendCnt(int route, vector<int>& sendCnt) |
---|
| 68 | { |
---|
| 69 | sendCnt[route]++; |
---|
| 70 | } |
---|
| 71 | |
---|
| 72 | void setElementsSendCnt(const vector<int>& route, vector<int>& sendCnt) |
---|
| 73 | { |
---|
| 74 | for (int i = 0; i < route.size(); i++) |
---|
| 75 | sendCnt[route[i]]++; |
---|
| 76 | } |
---|
| 77 | |
---|
| 78 | void setTargetElementIndex(int route, vector<int>& indices, const int *rankToIndex) |
---|
| 79 | { |
---|
| 80 | int index = rankToIndex[route]; |
---|
| 81 | indices.push_back(index); |
---|
| 82 | } |
---|
| 83 | |
---|
| 84 | void setTargetElementIndex(const vector<int>& route, vector<int>& indices, const int *rankToIndex) |
---|
| 85 | { |
---|
| 86 | for (int i = 0; i < route.size(); i++) |
---|
| 87 | { |
---|
| 88 | int index = rankToIndex[route[i]]; |
---|
| 89 | indices.push_back(index); |
---|
| 90 | } |
---|
| 91 | } |
---|
| 92 | |
---|
| 93 | template<typename T> |
---|
| 94 | void CMPIRouting::init(const vector<T>& route, CMPICascade *cascade) |
---|
| 95 | { |
---|
| 96 | vector<int> nbElementSend(mpiSize); |
---|
| 97 | int *toSend = new int[mpiSize]; |
---|
| 98 | int *recvCount = new int[mpiSize]; |
---|
| 99 | int *targetRankToIndex; |
---|
| 100 | |
---|
| 101 | for (int i = 0; i < route.size(); i++) |
---|
| 102 | setElementsSendCnt(route[i], nbElementSend); |
---|
| 103 | |
---|
| 104 | nbTarget = 0; |
---|
| 105 | vector<int> destRanks; |
---|
| 106 | for (int i = 0; i < mpiSize; i++) |
---|
| 107 | { |
---|
| 108 | if (nbElementSend[i] == 0) |
---|
| 109 | toSend[i] = 0; |
---|
| 110 | else |
---|
| 111 | { |
---|
| 112 | destRanks.push_back(i); |
---|
| 113 | toSend[i] = 1; |
---|
| 114 | nbTarget++; |
---|
| 115 | } |
---|
| 116 | recvCount[i] = 1; |
---|
| 117 | } |
---|
| 118 | //int recvCntDeb = (stree) ? stree->scatter_reduce(destRanks) : -1; |
---|
| 119 | |
---|
| 120 | MPI_Barrier(communicator); |
---|
| 121 | CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); |
---|
| 122 | CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); |
---|
| 123 | MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator); |
---|
| 124 | CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); |
---|
| 125 | CTimer::get("CMPIRouting::init(reduce_scatter)").print(); |
---|
| 126 | |
---|
[1053] | 127 | MPI_Info info_null; |
---|
[688] | 128 | |
---|
[1053] | 129 | MPI_Alloc_mem(nbTarget *sizeof(int), info_null, &targetRank); |
---|
| 130 | MPI_Alloc_mem(nbSource *sizeof(int), info_null, &sourceRank); |
---|
| 131 | |
---|
[688] | 132 | targetRankToIndex = new int[mpiSize]; |
---|
| 133 | int index = 0; |
---|
| 134 | for (int i = 0; i < mpiSize; i++) |
---|
| 135 | { |
---|
| 136 | if (toSend[i] == 1) |
---|
| 137 | { |
---|
| 138 | targetRank[index] = i; |
---|
| 139 | targetRankToIndex[i] = index; |
---|
| 140 | index++; |
---|
| 141 | } |
---|
| 142 | } |
---|
| 143 | |
---|
| 144 | MPI_Barrier(communicator); |
---|
| 145 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
| 146 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
| 147 | |
---|
| 148 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
| 149 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
| 150 | |
---|
| 151 | int indexRequest = 0; |
---|
| 152 | if (verbose) cout << "CMPIRouting::init nbSource : " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
| 153 | // assert(recvCntDeb == -1 || recvCntDeb == nbSource); |
---|
| 154 | //cout << mpiRank << "DEB : " << recvCntDeb << " nbSource " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
| 155 | for (int i = 0; i < nbSource; i++) |
---|
| 156 | { |
---|
[1110] | 157 | #ifdef _usingEP |
---|
| 158 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, -1, 0, communicator, &request[indexRequest]); |
---|
| 159 | #else |
---|
[688] | 160 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
[1110] | 161 | #endif |
---|
[688] | 162 | indexRequest++; |
---|
| 163 | } |
---|
| 164 | MPI_Barrier(communicator); |
---|
| 165 | for (int i = 0; i < nbTarget; i++) |
---|
| 166 | { |
---|
| 167 | MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 168 | indexRequest++; |
---|
| 169 | } |
---|
| 170 | MPI_Waitall(indexRequest, request, status); |
---|
| 171 | MPI_Barrier(communicator); //needed |
---|
| 172 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
| 173 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
| 174 | |
---|
| 175 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
| 176 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
| 177 | |
---|
| 178 | indexRequest = 0; |
---|
| 179 | for (int i = 0; i < nbSource; i++) |
---|
| 180 | { |
---|
[1110] | 181 | #ifdef _usingEP |
---|
| 182 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, -1, 0, communicator, &request[indexRequest]); |
---|
| 183 | #else |
---|
[688] | 184 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
[1110] | 185 | #endif |
---|
[688] | 186 | indexRequest++; |
---|
| 187 | } |
---|
| 188 | |
---|
| 189 | for (int i = 0; i < nbTarget; i++) |
---|
| 190 | { |
---|
| 191 | MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 192 | indexRequest++; |
---|
| 193 | } |
---|
| 194 | MPI_Waitall(indexRequest, request, status); |
---|
| 195 | MPI_Barrier(communicator); |
---|
| 196 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
| 197 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
| 198 | |
---|
| 199 | CTimer::get("CMPIRouting::init(send_element)").reset(); |
---|
| 200 | CTimer::get("CMPIRouting::init(send_element)").resume(); |
---|
| 201 | |
---|
| 202 | nbTargetElement.resize(nbTarget); |
---|
| 203 | nbSourceElement.resize(nbSource); |
---|
| 204 | |
---|
| 205 | for (int i = 0; i < route.size(); i++) |
---|
| 206 | setTargetElementIndex(route[i], targetElementIndex, targetRankToIndex); |
---|
| 207 | |
---|
| 208 | for (int i = 0; i < targetElementIndex.size(); i++) |
---|
| 209 | nbTargetElement[targetElementIndex[i]]++; |
---|
| 210 | |
---|
| 211 | indexRequest = 0; |
---|
| 212 | totalSourceElement = 0; |
---|
| 213 | totalTargetElement = 0; |
---|
| 214 | for (int i = 0; i < nbSource; i++) |
---|
| 215 | { |
---|
| 216 | MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 217 | indexRequest++; |
---|
| 218 | } |
---|
| 219 | |
---|
| 220 | for (int i = 0; i < nbTarget; i++) |
---|
| 221 | { |
---|
| 222 | totalTargetElement += nbTargetElement[i]; |
---|
| 223 | MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 224 | indexRequest++; |
---|
| 225 | } |
---|
| 226 | |
---|
| 227 | MPI_Waitall(indexRequest, request, status); |
---|
| 228 | |
---|
| 229 | CTimer::get("CMPIRouting::init(send_element)").suspend(); |
---|
| 230 | CTimer::get("CMPIRouting::init(send_element)").print(); |
---|
| 231 | |
---|
| 232 | totalSourceElement = 0; |
---|
| 233 | for (int i = 0; i < nbSource; i++) |
---|
| 234 | totalSourceElement += nbSourceElement[i]; |
---|
| 235 | |
---|
| 236 | sourceElementIndex.resize(totalSourceElement); |
---|
| 237 | |
---|
| 238 | totalSourceElement = 0; |
---|
| 239 | for (int i = 0; i < nbSource; i++) |
---|
| 240 | { |
---|
| 241 | for (int j = 0; j < nbSourceElement[i]; j++) |
---|
| 242 | { |
---|
| 243 | sourceElementIndex[totalSourceElement] = i; |
---|
| 244 | totalSourceElement++; |
---|
| 245 | } |
---|
| 246 | } |
---|
| 247 | |
---|
| 248 | delete[] toSend; |
---|
| 249 | delete[] recvCount; |
---|
| 250 | delete[] request; |
---|
| 251 | delete[] status; |
---|
| 252 | } |
---|
| 253 | |
---|
| 254 | |
---|
| 255 | int CMPIRouting::getTotalSourceElement(void) |
---|
| 256 | { |
---|
| 257 | return totalSourceElement; |
---|
| 258 | } |
---|
| 259 | |
---|
| 260 | template<typename T> |
---|
| 261 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements) |
---|
| 262 | { |
---|
| 263 | char** targetBuffer = new char*[nbTarget]; |
---|
| 264 | int* indexTargetBuffer = new int[nbTarget]; |
---|
| 265 | |
---|
| 266 | for(int i=0;i<nbTarget; i++) |
---|
| 267 | { |
---|
| 268 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
| 269 | indexTargetBuffer[i]= 0; |
---|
| 270 | } |
---|
| 271 | |
---|
| 272 | char** sourceBuffer = new char*[nbSource]; |
---|
| 273 | int* indexSourceBuffer = new int[nbSource]; |
---|
| 274 | |
---|
| 275 | for(int i=0;i<nbSource; i++) |
---|
| 276 | { |
---|
| 277 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
| 278 | indexSourceBuffer[i]= 0; |
---|
| 279 | } |
---|
| 280 | |
---|
| 281 | // pack the data |
---|
| 282 | int index; |
---|
| 283 | for(int i=0;i<totalTargetElement;i++) |
---|
| 284 | { |
---|
| 285 | index=targetElementIndex[i]; |
---|
| 286 | *((T*) &(targetBuffer[index][indexTargetBuffer[index]])) = targetElements[i]; |
---|
| 287 | indexTargetBuffer[index]+=sizeof(T); |
---|
| 288 | } |
---|
| 289 | |
---|
| 290 | |
---|
| 291 | MPI_Request* request=new MPI_Request[nbSource+nbTarget]; |
---|
| 292 | MPI_Status* status=new MPI_Status[nbSource+nbTarget]; |
---|
| 293 | int indexRequest=0; |
---|
| 294 | |
---|
| 295 | MPI_Barrier(communicator); |
---|
| 296 | CTimer::get("CMPIRouting::transferToTarget").reset(); |
---|
| 297 | CTimer::get("CMPIRouting::transferToTarget").resume(); |
---|
| 298 | |
---|
| 299 | for(int i=0; i<nbSource; i++) |
---|
| 300 | { |
---|
| 301 | MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 302 | indexRequest++; |
---|
| 303 | } |
---|
| 304 | |
---|
| 305 | for(int i=0;i<nbTarget; i++) |
---|
| 306 | { |
---|
| 307 | MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 308 | indexRequest++; |
---|
| 309 | } |
---|
| 310 | |
---|
| 311 | MPI_Waitall(indexRequest,request,status); |
---|
| 312 | |
---|
| 313 | CTimer::get("CMPIRouting::transferToTarget").suspend(); |
---|
| 314 | CTimer::get("CMPIRouting::transferToTarget").print(); |
---|
| 315 | MPI_Barrier(communicator); |
---|
| 316 | |
---|
| 317 | // unpack the data |
---|
| 318 | for(int i=0;i<totalSourceElement;i++) |
---|
| 319 | { |
---|
| 320 | index=sourceElementIndex[i]; |
---|
| 321 | sourceElements[i] = *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])); |
---|
| 322 | indexSourceBuffer[index]+=sizeof(T); |
---|
| 323 | } |
---|
| 324 | |
---|
| 325 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
| 326 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
| 327 | delete[] targetBuffer; |
---|
| 328 | delete[] indexTargetBuffer; |
---|
| 329 | delete[] sourceBuffer; |
---|
| 330 | delete[] indexSourceBuffer; |
---|
| 331 | delete[] request; |
---|
| 332 | delete[] status; |
---|
| 333 | } |
---|
| 334 | |
---|
| 335 | |
---|
| 336 | template<typename T, typename t_pack, typename t_unpack> |
---|
| 337 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements, t_pack pack, t_unpack unpack) |
---|
| 338 | { |
---|
| 339 | char** targetBuffer = new char*[nbTarget]; |
---|
| 340 | int* indexTargetBuffer = new int[nbTarget]; |
---|
| 341 | int* targetMessageSize = new int[nbTarget]; |
---|
| 342 | int* sourceMessageSize = new int[nbSource]; |
---|
| 343 | int index; |
---|
| 344 | |
---|
| 345 | // compute target buffer size |
---|
| 346 | for (int i = 0; i < nbTarget; i++) |
---|
| 347 | targetMessageSize[i] = 0; |
---|
| 348 | |
---|
| 349 | for (int i = 0; i < totalTargetElement; i++) |
---|
| 350 | { |
---|
| 351 | index = targetElementIndex[i]; |
---|
| 352 | pack(targetElements[i], NULL, targetMessageSize[index]); |
---|
| 353 | } |
---|
| 354 | |
---|
| 355 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
| 356 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
| 357 | int indexRequest = 0; |
---|
| 358 | |
---|
| 359 | MPI_Barrier(communicator); |
---|
| 360 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); |
---|
| 361 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); |
---|
| 362 | |
---|
| 363 | for(int i=0; i<nbSource; i++) |
---|
| 364 | { |
---|
| 365 | MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 366 | indexRequest++; |
---|
| 367 | } |
---|
| 368 | |
---|
| 369 | for(int i=0; i<nbTarget; i++) |
---|
| 370 | { |
---|
| 371 | MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 372 | indexRequest++; |
---|
| 373 | } |
---|
| 374 | |
---|
| 375 | MPI_Waitall(indexRequest,request,status); |
---|
| 376 | |
---|
| 377 | MPI_Barrier(communicator); |
---|
| 378 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); |
---|
| 379 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); |
---|
| 380 | |
---|
| 381 | for(int i=0; i<nbTarget; i++) |
---|
| 382 | { |
---|
| 383 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
| 384 | indexTargetBuffer[i] = 0; |
---|
| 385 | } |
---|
| 386 | |
---|
| 387 | char** sourceBuffer = new char*[nbSource]; |
---|
| 388 | int* indexSourceBuffer = new int[nbSource]; |
---|
| 389 | |
---|
| 390 | for(int i=0;i<nbSource; i++) |
---|
| 391 | { |
---|
| 392 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
| 393 | indexSourceBuffer[i]= 0; |
---|
| 394 | } |
---|
| 395 | |
---|
| 396 | // pack the data |
---|
| 397 | for(int i=0; i<totalTargetElement; i++) |
---|
| 398 | { |
---|
| 399 | index=targetElementIndex[i]; |
---|
| 400 | pack(targetElements[i], targetBuffer[index], indexTargetBuffer[index] ); |
---|
| 401 | } |
---|
| 402 | |
---|
| 403 | indexRequest=0; |
---|
| 404 | |
---|
| 405 | MPI_Barrier(communicator); |
---|
| 406 | CTimer::get("CMPIRouting::transferToTarget(data)").reset(); |
---|
| 407 | CTimer::get("CMPIRouting::transferToTarget(data)").resume(); |
---|
| 408 | for(int i=0; i<nbSource; i++) |
---|
| 409 | { |
---|
| 410 | MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 411 | indexRequest++; |
---|
| 412 | } |
---|
| 413 | |
---|
| 414 | for(int i=0;i<nbTarget; i++) |
---|
| 415 | { |
---|
| 416 | MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 417 | indexRequest++; |
---|
| 418 | } |
---|
| 419 | |
---|
| 420 | MPI_Waitall(indexRequest,request,status); |
---|
| 421 | |
---|
| 422 | MPI_Barrier(communicator); |
---|
| 423 | CTimer::get("CMPIRouting::transferToTarget(data)").suspend(); |
---|
| 424 | CTimer::get("CMPIRouting::transferToTarget(data)").print(); |
---|
| 425 | |
---|
| 426 | // unpack the data |
---|
| 427 | for(int i=0; i<totalSourceElement; i++) |
---|
| 428 | { |
---|
| 429 | index=sourceElementIndex[i]; |
---|
| 430 | unpack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index]); |
---|
| 431 | } |
---|
| 432 | |
---|
| 433 | for (int i=0; i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
| 434 | for (int i=0; i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
| 435 | delete[] targetBuffer; |
---|
| 436 | delete[] indexTargetBuffer; |
---|
| 437 | delete[] targetMessageSize; |
---|
| 438 | delete[] sourceBuffer; |
---|
| 439 | delete[] indexSourceBuffer; |
---|
| 440 | delete[] sourceMessageSize; |
---|
| 441 | delete[] request; |
---|
| 442 | delete[] status; |
---|
| 443 | } |
---|
| 444 | |
---|
| 445 | template<typename T> |
---|
| 446 | void CMPIRouting::transferFromSource(T* targetElements, T* sourceElements) |
---|
| 447 | { |
---|
| 448 | char** targetBuffer = new char*[nbTarget]; |
---|
| 449 | int* indexTargetBuffer = new int[nbTarget]; |
---|
| 450 | |
---|
| 451 | for (int i = 0; i < nbTarget; i++) |
---|
| 452 | { |
---|
| 453 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
| 454 | indexTargetBuffer[i] = 0; |
---|
| 455 | } |
---|
| 456 | |
---|
| 457 | char** sourceBuffer = new char*[nbSource]; |
---|
| 458 | int* indexSourceBuffer = new int[nbSource]; |
---|
| 459 | |
---|
| 460 | for (int i = 0; i < nbSource; i++) |
---|
| 461 | { |
---|
| 462 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
| 463 | indexSourceBuffer[i] = 0; |
---|
| 464 | } |
---|
| 465 | |
---|
| 466 | // pack the data |
---|
| 467 | int index; |
---|
| 468 | for (int i = 0; i < totalSourceElement; i++) |
---|
| 469 | { |
---|
| 470 | index = sourceElementIndex[i]; |
---|
| 471 | *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])) = sourceElements[i]; |
---|
| 472 | indexSourceBuffer[index] += sizeof(T); |
---|
| 473 | } |
---|
| 474 | |
---|
| 475 | MPI_Request* request=new MPI_Request[nbSource+nbTarget]; |
---|
| 476 | MPI_Status* status=new MPI_Status[nbSource+nbTarget]; |
---|
| 477 | int indexRequest=0; |
---|
| 478 | |
---|
| 479 | for(int i=0; i<nbSource; i++) |
---|
| 480 | { |
---|
| 481 | MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 482 | indexRequest++; |
---|
| 483 | } |
---|
| 484 | |
---|
| 485 | for(int i=0;i<nbTarget; i++) |
---|
| 486 | { |
---|
| 487 | MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 488 | indexRequest++; |
---|
| 489 | } |
---|
| 490 | |
---|
| 491 | MPI_Waitall(indexRequest,request,status); |
---|
| 492 | |
---|
| 493 | // unpack the data |
---|
| 494 | for(int i=0;i<totalTargetElement;i++) |
---|
| 495 | { |
---|
| 496 | index=targetElementIndex[i]; |
---|
| 497 | targetElements[i] = *((T*) &(targetBuffer[index][indexTargetBuffer[index]])); |
---|
| 498 | indexTargetBuffer[index]+=sizeof(T); |
---|
| 499 | } |
---|
| 500 | |
---|
| 501 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
| 502 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
| 503 | delete[] targetBuffer; |
---|
| 504 | delete[] indexTargetBuffer; |
---|
| 505 | delete[] sourceBuffer; |
---|
| 506 | delete[] indexSourceBuffer; |
---|
| 507 | delete[] request; |
---|
| 508 | delete[] status; |
---|
| 509 | } |
---|
| 510 | |
---|
| 511 | |
---|
| 512 | /* number of source and target elements is known from previous call to init() */ |
---|
| 513 | template<typename T, typename t_pack, typename t_unpack> |
---|
| 514 | void CMPIRouting::transferFromSource(T *targetElements, T *sourceElements, t_pack pack, t_unpack unpack) |
---|
| 515 | { |
---|
| 516 | char **targetBuffer = new char*[nbTarget]; |
---|
| 517 | int *indexTargetBuffer = new int[nbTarget]; |
---|
| 518 | int *targetMessageSize = new int[nbTarget]; |
---|
| 519 | int *sourceMessageSize = new int[nbSource]; |
---|
| 520 | int index; |
---|
| 521 | |
---|
| 522 | // compute target buffer size |
---|
| 523 | for (int i = 0; i < nbSource; i++) sourceMessageSize[i] = 0; |
---|
| 524 | |
---|
| 525 | for (int i = 0; i < totalSourceElement; i++) |
---|
| 526 | { |
---|
| 527 | index = sourceElementIndex[i]; |
---|
| 528 | pack(sourceElements[i], NULL, sourceMessageSize[index]); |
---|
| 529 | } |
---|
| 530 | |
---|
| 531 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
| 532 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
| 533 | int indexRequest = 0; |
---|
| 534 | for (int i = 0; i < nbSource; i++) |
---|
| 535 | { |
---|
| 536 | MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 537 | indexRequest++; |
---|
| 538 | } |
---|
| 539 | for (int i = 0; i < nbTarget; i++) |
---|
| 540 | { |
---|
| 541 | MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 542 | indexRequest++; |
---|
| 543 | } |
---|
| 544 | MPI_Waitall(indexRequest, request, status); |
---|
| 545 | |
---|
| 546 | for (int i = 0; i < nbTarget; i++) |
---|
| 547 | { |
---|
| 548 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
| 549 | indexTargetBuffer[i] = 0; |
---|
| 550 | } |
---|
| 551 | |
---|
| 552 | char **sourceBuffer = new char*[nbSource]; |
---|
| 553 | int *indexSourceBuffer = new int[nbSource]; |
---|
| 554 | |
---|
| 555 | for (int i = 0; i < nbSource; i++) |
---|
| 556 | { |
---|
| 557 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
| 558 | indexSourceBuffer[i] = 0; |
---|
| 559 | } |
---|
| 560 | |
---|
| 561 | |
---|
| 562 | // pack the data |
---|
| 563 | for (int i = 0; i < totalSourceElement; i++) |
---|
| 564 | { |
---|
| 565 | index = sourceElementIndex[i]; |
---|
| 566 | pack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index] ); |
---|
| 567 | } |
---|
| 568 | |
---|
| 569 | indexRequest = 0; |
---|
| 570 | for (int i = 0; i < nbSource; i++) |
---|
| 571 | { |
---|
| 572 | MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
| 573 | indexRequest++; |
---|
| 574 | } |
---|
| 575 | for (int i = 0; i < nbTarget; i++) |
---|
| 576 | { |
---|
| 577 | MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
| 578 | indexRequest++; |
---|
| 579 | } |
---|
| 580 | MPI_Waitall(indexRequest, request, status); |
---|
| 581 | |
---|
| 582 | // unpack the data |
---|
| 583 | for (int i = 0; i < totalTargetElement; i++) |
---|
| 584 | { |
---|
| 585 | index = targetElementIndex[i]; |
---|
| 586 | unpack(targetElements[i], targetBuffer[index], indexTargetBuffer[index]); |
---|
| 587 | } |
---|
| 588 | |
---|
| 589 | for (int i = 0; i < nbTarget; i++) delete[] targetBuffer[i]; |
---|
| 590 | for (int i = 0; i < nbSource; i++) delete[] sourceBuffer[i]; |
---|
| 591 | delete[] targetBuffer; |
---|
| 592 | delete[] indexTargetBuffer; |
---|
| 593 | delete[] targetMessageSize; |
---|
| 594 | delete[] sourceBuffer; |
---|
| 595 | delete[] indexSourceBuffer; |
---|
| 596 | delete[] sourceMessageSize; |
---|
| 597 | delete[] request; |
---|
| 598 | delete[] status; |
---|
| 599 | } |
---|
| 600 | |
---|
| 601 | CMPIRouting::~CMPIRouting() |
---|
| 602 | { |
---|
| 603 | }; |
---|
| 604 | |
---|
| 605 | template void CMPIRouting::init(const std::vector<int>& route, CMPICascade *cascade); |
---|
| 606 | template void CMPIRouting::init(const std::vector<vector<int> >& route, CMPICascade *cascade); |
---|
| 607 | |
---|
| 608 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements); |
---|
| 609 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
| 610 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements); |
---|
| 611 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
| 612 | |
---|
| 613 | template void CMPIRouting::transferToTarget(Node* targetElements, Node* sourceElements, void (*pack)(Node&, char*, int&), void (* unpack)(Node&, char *, int&)); |
---|
| 614 | template void CMPIRouting::transferToTarget(Elt **targetElements, Elt **sourceElements, void (*pack)(Elt *, char*, int&), void (* unpack)(Elt *, char *, int&)); |
---|
| 615 | template void CMPIRouting::transferFromSource(std::vector<int> *targetElements, std::vector<int> *sourceElements, void (*pack)(const std::vector<int>&, char*, int&), void (* unpack)(std::vector<int>&, const char *, int&)); |
---|
| 616 | |
---|
| 617 | struct NES { int cnt; int risc; int rank; }; |
---|
| 618 | |
---|
| 619 | template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, |
---|
| 620 | const std::vector<int>& ranks, MPI_Comm communicator); |
---|
| 621 | |
---|
| 622 | template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, |
---|
| 623 | const std::vector<int>& ranks, MPI_Comm communicator); |
---|
| 624 | |
---|
| 625 | } |
---|