Changeset 1602 for XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.cpp
- Timestamp:
- 11/19/18 16:28:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.cpp
r1158 r1602 12 12 13 13 #include "mapper.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace sphereRemap { 17 18 extern CRemapGrid srcGrid; 19 #pragma omp threadprivate(srcGrid) 20 21 extern CRemapGrid tgtGrid; 22 #pragma omp threadprivate(tgtGrid) 23 16 24 17 25 /* A subdivition of an array into N sub-arays … … 303 311 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 304 312 nbSendRequest++; 305 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);313 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 306 314 nbSendRequest++; 307 315 if (order == 2) 308 316 { 309 317 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 310 MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);318 MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 311 319 nbSendRequest++; 312 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);320 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 313 321 //ym --> attention taille GloId 314 322 nbSendRequest++; … … 316 324 else 317 325 { 318 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);326 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]); 319 327 //ym --> attention taille GloId 320 328 nbSendRequest++; … … 325 333 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 326 334 nbRecvRequest++; 327 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 328 336 nbRecvRequest++; 329 337 if (order == 2) 330 338 { 331 339 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 332 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);340 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 333 341 nbRecvRequest++; 334 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);342 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 335 343 //ym --> attention taille GloId 336 344 nbRecvRequest++; … … 338 346 else 339 347 { 340 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);348 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 341 349 //ym --> attention taille GloId 342 350 nbRecvRequest++;
Note: See TracChangeset
for help on using the changeset viewer.