- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_inverse.cpp
r1545 r1642 15 15 #include "inverse_axis.hpp" 16 16 #include "client_client_dht_template.hpp" 17 using namespace ep_lib;18 17 19 18 namespace xios { … … 28 27 std::map<int, int>& elementPositionInGridDst2AxisPosition, 29 28 std::map<int, int>& elementPositionInGridDst2DomainPosition) 29 TRY 30 30 { 31 31 std::vector<CAxis*> axisListDestP = gridDst->getAxis(); … … 38 38 return (new CAxisAlgorithmInverse(axisListDestP[axisDstIndex], axisListSrcP[axisSrcIndex], inverseAxis)); 39 39 } 40 CATCH 40 41 41 42 bool CAxisAlgorithmInverse::registerTrans() 43 TRY 42 44 { 43 45 CGridTransformationFactory<CAxis>::registerTransformation(TRANS_INVERSE_AXIS, create); 44 46 } 45 47 CATCH 46 48 47 49 CAxisAlgorithmInverse::CAxisAlgorithmInverse(CAxis* axisDestination, CAxis* axisSource, CInverseAxis* inverseAxis) 48 50 : CAxisAlgorithmTransformation(axisDestination, axisSource) 51 TRY 49 52 { 50 53 if (axisDestination->n_glo.getValue() != axisSource->n_glo.getValue()) … … 56 59 } 57 60 } 61 CATCH 58 62 59 63 void CAxisAlgorithmInverse::computeIndexSourceMapping_(const std::vector<CArray<double,1>* >& dataAuxInputs) 64 TRY 60 65 { 61 66 this->transformationMapping_.resize(1); … … 83 88 } 84 89 } 90 CATCH 85 91 86 92 /*! … … 89 95 */ 90 96 void CAxisAlgorithmInverse::updateAxisValue() 97 TRY 91 98 { 92 99 CContext* context = CContext::getCurrent(); … … 154 161 sendRankSizeMap[itIndex->first] = sendSize; 155 162 } 156 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);163 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 157 164 158 165 displ[0]=0 ; … … 161 168 int* recvRankBuff=new int[recvSize]; 162 169 int* recvSizeBuff=new int[recvSize]; 163 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);164 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);170 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 171 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 165 172 for (int i = 0; i < nbClient; ++i) 166 173 { … … 174 181 175 182 // Sending global index of grid source to corresponding process as well as the corresponding mask 176 std::vector< MPI_Request> requests;177 std::vector< MPI_Status> status;183 std::vector<ep_lib::MPI_Request> requests; 184 std::vector<ep_lib::MPI_Status> status; 178 185 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 179 186 std::unordered_map<int, double* > sendValueToDest; … … 185 192 sendValueToDest[recvRank] = new double [recvSize]; 186 193 187 requests.push_back( MPI_Request());188 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back());194 requests.push_back(ep_lib::MPI_Request()); 195 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 189 196 } 190 197 … … 207 214 208 215 // Send global index source and mask 209 requests.push_back( MPI_Request());210 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back());216 requests.push_back(ep_lib::MPI_Request()); 217 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 211 218 } 212 219 213 220 status.resize(requests.size()); 214 MPI_Waitall(requests.size(), &requests[0], &status[0]);215 216 217 std::vector< MPI_Request>().swap(requests);218 std::vector< MPI_Status>().swap(status);221 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 222 223 224 std::vector<ep_lib::MPI_Request>().swap(requests); 225 std::vector<ep_lib::MPI_Status>().swap(status); 219 226 220 227 // Okie, on destination side, we will wait for information of masked index of source … … 224 231 int recvSize = itSend->second; 225 232 226 requests.push_back( MPI_Request());227 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back());233 requests.push_back(ep_lib::MPI_Request()); 234 ep_lib::MPI_Irecv(recvValueFromSrc[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 228 235 } 229 236 … … 242 249 } 243 250 // Okie, now inform the destination which source index are masked 244 requests.push_back( MPI_Request());245 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back());251 requests.push_back(ep_lib::MPI_Request()); 252 ep_lib::MPI_Isend(sendValueToDest[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 246 253 } 247 254 status.resize(requests.size()); 248 MPI_Waitall(requests.size(), &requests[0], &status[0]);255 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 249 256 250 257 … … 283 290 delete [] itLong->second; 284 291 } 285 286 } 292 CATCH 293 294 }
Note: See TracChangeset
for help on using the changeset viewer.