Changeset 1053 for XIOS/dev/branch_yushan/src
- Timestamp:
- 02/17/17 17:55:37 (7 years ago)
- Location:
- XIOS/dev/branch_yushan/src
- Files:
-
- 50 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/src/buffer_server.hpp
r717 r1053 4 4 #include "xios_spl.hpp" 5 5 #include "buffer.hpp" 6 #include "mpi .hpp"6 #include "mpi_std.hpp" 7 7 #include "cxios.hpp" 8 8 -
XIOS/dev/branch_yushan/src/client.cpp
r1037 r1053 31 31 else is_MPI_Initialized=false ; 32 32 33 //return;34 35 33 // don't use OASIS 36 34 if (!CXios::usingOasis) -
XIOS/dev/branch_yushan/src/client.hpp
r1037 r1053 16 16 static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 17 17 static void finalize(void); 18 static void registerContext(const string& id, MPI_Comm contextComm);18 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 19 19 20 20 static MPI_Comm intraComm; -
XIOS/dev/branch_yushan/src/client_client_dht_template.hpp
r941 r1053 13 13 #include "xios_spl.hpp" 14 14 #include "array_new.hpp" 15 #include "mpi .hpp"15 #include "mpi_std.hpp" 16 16 #include "policy.hpp" 17 17 #include <boost/unordered_map.hpp> … … 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const MPI_Comm& clientIntraComm);42 const ep_lib::MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm);45 const ep_lib::MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel,64 const ep_lib::MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const MPI_Comm& intraCommLevel,68 const ep_lib::MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const MPI_Comm& intraCommLevel,75 const ep_lib::MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const MPI_Comm& clientIntraComm,88 std::vector< MPI_Request>& requestSendInfo);87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 89 90 90 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 const MPI_Comm& clientIntraComm,92 std::vector< MPI_Request>& requestRecvInfo);91 const ep_lib::MPI_Comm& clientIntraComm, 92 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 93 93 94 94 // Send global index to clients 95 95 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 96 const MPI_Comm& clientIntraComm,97 std::vector< MPI_Request>& requestSendIndexGlobal);96 const ep_lib::MPI_Comm& clientIntraComm, 97 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 98 98 99 99 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 const MPI_Comm& clientIntraComm,101 std::vector< MPI_Request>& requestRecvIndex);100 const ep_lib::MPI_Comm& clientIntraComm, 101 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 102 102 103 103 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp
r1037 r1053 18 18 { 19 19 template<typename T, typename H> 20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 21 21 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 22 22 { … … 38 38 template<typename T, typename H> 39 39 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 40 const MPI_Comm& clientIntraComm)40 const ep_lib::MPI_Comm& clientIntraComm) 41 41 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 42 42 { … … 68 68 template<typename T, typename H> 69 69 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 70 const MPI_Comm& clientIntraComm)70 const ep_lib::MPI_Comm& clientIntraComm) 71 71 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 72 72 { … … 104 104 template<typename T, typename H> 105 105 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 106 const MPI_Comm& commLevel,106 const ep_lib::MPI_Comm& commLevel, 107 107 int level) 108 108 { … … 178 178 recvIndexBuff = new unsigned long[recvNbIndexCount]; 179 179 180 std::vector< MPI_Request> request;180 std::vector<ep_lib::MPI_Request> request; 181 181 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 182 182 iteRecvIndex = recvRankClient.end(), … … 199 199 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 200 200 201 std::vector< MPI_Status> status(request.size());201 std::vector<ep_lib::MPI_Status> status(request.size()); 202 202 203 203 //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); … … 259 259 } 260 260 261 std::vector< MPI_Request> requestOnReturn;261 std::vector<ep_lib::MPI_Request> requestOnReturn; 262 262 currentIndex = 0; 263 263 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 310 310 } 311 311 312 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());312 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 313 313 //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 314 314 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); … … 380 380 template<typename T, typename H> 381 381 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 382 const MPI_Comm& commLevel,382 const ep_lib::MPI_Comm& commLevel, 383 383 int level) 384 384 { … … 465 465 // it will send a message to the correct clients. 466 466 // Contents of the message are index and its corresponding informatioin 467 std::vector< MPI_Request> request;467 std::vector<ep_lib::MPI_Request> request; 468 468 int currentIndex = 0; 469 469 int nbRecvClient = recvRankClient.size(); … … 504 504 505 505 //printf("check 8 OK. clientRank = %d\n", clientRank); 506 std::vector< MPI_Status> status(request.size());506 std::vector<ep_lib::MPI_Status> status(request.size()); 507 507 508 508 MPI_Waitall(request.size(), &request[0], &status[0]); … … 564 564 template<typename T, typename H> 565 565 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 566 const MPI_Comm& clientIntraComm,567 std::vector< MPI_Request>& requestSendIndex)568 { 569 MPI_Request request;566 const ep_lib::MPI_Comm& clientIntraComm, 567 std::vector<ep_lib::MPI_Request>& requestSendIndex) 568 { 569 ep_lib::MPI_Request request; 570 570 requestSendIndex.push_back(request); 571 571 … … 583 583 template<typename T, typename H> 584 584 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 585 const MPI_Comm& clientIntraComm,586 std::vector< MPI_Request>& requestRecvIndex)587 { 588 MPI_Request request;585 const ep_lib::MPI_Comm& clientIntraComm, 586 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 587 { 588 ep_lib::MPI_Request request; 589 589 requestRecvIndex.push_back(request); 590 590 … … 603 603 template<typename T, typename H> 604 604 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 605 const MPI_Comm& clientIntraComm,606 std::vector< MPI_Request>& requestSendInfo)607 { 608 MPI_Request request;605 const ep_lib::MPI_Comm& clientIntraComm, 606 std::vector<ep_lib::MPI_Request>& requestSendInfo) 607 { 608 ep_lib::MPI_Request request; 609 609 requestSendInfo.push_back(request); 610 610 //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); … … 623 623 template<typename T, typename H> 624 624 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 625 const MPI_Comm& clientIntraComm,626 std::vector< MPI_Request>& requestRecvInfo)627 { 628 MPI_Request request;625 const ep_lib::MPI_Comm& clientIntraComm, 626 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 627 { 628 ep_lib::MPI_Request request; 629 629 requestRecvInfo.push_back(request); 630 630 … … 699 699 { 700 700 recvNbElements.resize(recvNbRank.size()); 701 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());702 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());701 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 702 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 703 703 704 704 int nRequest = 0; … … 751 751 std::vector<int> recvBuff(recvBuffSize*2,0); 752 752 753 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);754 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);753 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 754 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 755 755 756 756 int nRequest = 0; -
XIOS/dev/branch_yushan/src/client_server_mapping.hpp
r1037 r1053 41 41 42 42 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 43 MPI_Comm& clientIntraComm,43 ep_lib::MPI_Comm& clientIntraComm, 44 44 const std::vector<int>& connectedServerRank); 45 45 -
XIOS/dev/branch_yushan/src/client_server_mapping_distributed.hpp
r835 r1053 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 37 const MPI_Comm& clientIntraComm,37 const ep_lib::MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/dev/branch_yushan/src/context_client.cpp
r1037 r1053 20 20 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 21 21 */ 22 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_, CContext* cxtSer)22 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 23 23 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 24 24 { … … 163 163 for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) 164 164 { 165 retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 165 CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize); 166 //retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 167 //int m_size = retBuffer.size(); 168 //retBuffer.resize(m_size+1); 169 //m_size = retBuffer.size(); 170 retBuffer.push_back(m_buf); 166 171 } 167 172 return retBuffer; -
XIOS/dev/branch_yushan/src/context_client.hpp
r1037 r1053 31 31 public: 32 32 // Contructor 33 CContextClient(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm, CContext* parentServer = 0);33 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 34 34 35 35 // Send event to server … … 66 66 int serverSize; //!< Size of server group 67 67 68 MPI_Comm interComm; //!< Communicator of server group68 ep_lib::MPI_Comm interComm; //!< Communicator of server group 69 69 70 MPI_Comm intraComm; //!< Communicator of client group70 ep_lib::MPI_Comm intraComm; //!< Communicator of client group 71 71 72 72 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/dev/branch_yushan/src/context_server.cpp
r1037 r1053 10 10 #include "file.hpp" 11 11 #include "grid.hpp" 12 #include "mpi .hpp"12 #include "mpi_std.hpp" 13 13 #include "tracer.hpp" 14 14 #include "timer.hpp" … … 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_) 26 26 { 27 27 context=parent; … … 71 71 int count; 72 72 char * addr; 73 MPI_Status status;73 ep_lib::MPI_Status status; 74 74 map<int,CServerBuffer*>::iterator it; 75 75 … … 101 101 { 102 102 addr=(char*)it->second->getBuffer(count); 103 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);103 ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 104 104 bufferRequest[rank]=addr; 105 105 //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize); … … 113 113 void CContextServer::checkPendingRequest(void) 114 114 { 115 map<int, MPI_Request>::iterator it;115 map<int,ep_lib::MPI_Request>::iterator it; 116 116 list<int> recvRequest; 117 117 list<int>::iterator itRecv; … … 119 119 int flag; 120 120 int count; 121 MPI_Status status;121 ep_lib::MPI_Status status; 122 122 123 123 //printf("enter checkPendingRequest\n"); -
XIOS/dev/branch_yushan/src/context_server.hpp
r1037 r1053 14 14 public: 15 15 16 CContextServer(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm) ;16 CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 17 17 bool eventLoop(void) ; 18 18 void listen(void) ; … … 25 25 bool hasFinished(void); 26 26 27 MPI_Comm intraComm ;27 ep_lib::MPI_Comm intraComm ; 28 28 int intraCommSize ; 29 29 int intraCommRank ; 30 30 31 MPI_Comm interComm ;31 ep_lib::MPI_Comm interComm ; 32 32 int commSize ; 33 33 34 34 map<int,CServerBuffer*> buffers ; 35 map<int, MPI_Request> pendingRequest ;35 map<int,ep_lib::MPI_Request> pendingRequest ; 36 36 map<int,char*> bufferRequest ; 37 37 -
XIOS/dev/branch_yushan/src/cxios.cpp
r1037 r1053 79 79 MPI_Info info; 80 80 MPI_Comm *ep_comm; 81 MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm); 81 MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm); // servers should reach here too. 82 82 83 83 globalComm = ep_comm[0]; -
XIOS/dev/branch_yushan/src/cxios.hpp
r1037 r1053 19 19 public: 20 20 static void initialize(void) ; 21 static void initClientSide(const string & codeId, MPI_Comm& localComm,MPI_Comm& returnComm) ;21 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 22 22 static void initServerSide(void) ; 23 23 static void clientFinalize(void) ; -
XIOS/dev/branch_yushan/src/dht_auto_indexing.cpp
r1037 r1053 22 22 23 23 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const MPI_Comm& clientIntraComm)24 const ep_lib::MPI_Comm& clientIntraComm) 25 25 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 26 { … … 58 58 */ 59 59 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const MPI_Comm& clientIntraComm)60 const ep_lib::MPI_Comm& clientIntraComm) 61 61 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 62 { -
XIOS/dev/branch_yushan/src/dht_auto_indexing.hpp
r1037 r1053 28 28 29 29 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 30 const MPI_Comm& clientIntraComm);30 const ep_lib::MPI_Comm& clientIntraComm); 31 31 32 32 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 33 const MPI_Comm& clientIntraComm);33 const ep_lib::MPI_Comm& clientIntraComm); 34 34 35 35 size_t getNbIndexesGlobal() const; -
XIOS/dev/branch_yushan/src/filter/filter.cpp
r1037 r1053 14 14 CDataPacketPtr outputPacket = engine->apply(data); 15 15 if (outputPacket) 16 { 17 printf("filter/filter.cpp : deliverOuput(outputPacket)\n"); 16 18 deliverOuput(outputPacket); 19 printf("filter/filter.cpp : deliverOuput(outputPacket) OKOK\n"); 20 } 17 21 } 18 22 } // namespace xios -
XIOS/dev/branch_yushan/src/filter/input_pin.cpp
r1037 r1053 33 33 // Unregister before calling onInputReady in case the filter registers again 34 34 gc.unregisterFilter(this, packet->timestamp); 35 printf("filter/input_pin.cpp : onInputReady\n"); 35 36 onInputReady(it->second.packets); 37 printf("filter/input_pin.cpp : onInputReady OKOK\n"); 36 38 inputs.erase(it); 37 39 } -
XIOS/dev/branch_yushan/src/filter/output_pin.cpp
r1037 r1053 22 22 for (it = outputs.begin(), itEnd = outputs.end(); it != itEnd; ++it) 23 23 { 24 printf("filter/output_pin.cpp : setInput\n"); 24 25 it->first->setInput(it->second, packet); 26 printf("filter/output_pin.cpp : setInput OKOK\n"); 25 27 } 26 28 } -
XIOS/dev/branch_yushan/src/filter/source_filter.cpp
r1037 r1053 29 29 grid->inputField(data, packet->data); 30 30 31 printf("filter/source_filter.cpp : deliverOuput(packet) \n"); 31 32 deliverOuput(packet); 33 printf("filter/source_filter.cpp : deliverOuput(packet) OKOK\n"); 32 34 } 33 35 -
XIOS/dev/branch_yushan/src/filter/spatial_transform_filter.cpp
r1037 r1053 150 150 151 151 idxSendBuff = 0; 152 std::vector< MPI_Request> sendRecvRequest;152 std::vector<ep_lib::MPI_Request> sendRecvRequest; 153 153 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 154 154 { … … 160 160 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 161 161 } 162 sendRecvRequest.push_back( MPI_Request());162 sendRecvRequest.push_back(ep_lib::MPI_Request()); 163 163 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 164 164 } … … 178 178 int srcRank = itRecv->first; 179 179 int countSize = itRecv->second.size(); 180 sendRecvRequest.push_back( MPI_Request());180 sendRecvRequest.push_back(ep_lib::MPI_Request()); 181 181 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 182 182 currentBuff += countSize; 183 183 } 184 std::vector< MPI_Status> status(sendRecvRequest.size());184 std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 185 185 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 186 186 -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1037 r1053 23 23 #include "context.hpp" 24 24 #include "context_client.hpp" 25 #include "mpi .hpp"25 #include "mpi_std.hpp" 26 26 #include "timer.hpp" 27 27 #include "array_new.hpp" … … 54 54 { 55 55 std::string str; 56 MPI_Comm local_comm;57 MPI_Comm return_comm;56 ep_lib::MPI_Comm local_comm; 57 ep_lib::MPI_Comm return_comm; 58 58 59 fc_comm_map.clear();59 ep_lib::fc_comm_map.clear(); 60 60 61 61 if (!cstr2string(client_id, len_client_id, str)) return; … … 63 63 int initialized; 64 64 MPI_Initialized(&initialized); 65 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 65 //if (initialized) local_comm.mpi_comm = MPI_Comm_f2c(*f_local_comm); 66 if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 66 67 else local_comm = MPI_COMM_NULL; 67 68 … … 69 70 70 71 CXios::initClientSide(str, local_comm, return_comm); 71 *f_return_comm = MPI_Comm_c2f(return_comm); 72 73 *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 72 74 73 75 printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm); … … 80 82 { 81 83 std::string str; 82 MPI_Comm comm;84 ep_lib::MPI_Comm comm; 83 85 84 86 if (!cstr2string(context_id, len_context_id, str)) return; 85 87 CTimer::get("XIOS").resume(); 86 88 CTimer::get("XIOS init context").resume(); 87 comm =MPI_Comm_f2c(*f_comm);89 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 88 90 89 91 CClient::registerContext(str, comm); 90 92 91 //printf("client register context OK\n");93 printf("icdata.cpp: client register context OK\n"); 92 94 93 95 CTimer::get("XIOS init context").suspend(); -
XIOS/dev/branch_yushan/src/interface/c/oasis_cinterface.cpp
r1037 r1053 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 comm=MPI_Comm_f2c(f_comm) ;28 //comm=MPI_Comm_f2c(f_comm) ; 29 29 } 30 30 … … 34 34 35 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 36 comm_client_server=MPI_Comm_f2c(f_comm) ;36 //comm_client_server=MPI_Comm_f2c(f_comm) ; 37 37 } 38 38 … … 42 42 43 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 44 comm_client_server=MPI_Comm_f2c(f_comm) ;44 //comm_client_server=MPI_Comm_f2c(f_comm) ; 45 45 } 46 46 } -
XIOS/dev/branch_yushan/src/interface/fortran/idata.F90
r1037 r1053 476 476 477 477 !print*, "in fortran, world_f = ", MPI_COMM_WORLD 478 478 479 print*, "in fortran, f_return_comm = ", f_return_comm 479 480 -
XIOS/dev/branch_yushan/src/io/inetcdf4.cpp
r948 r1053 18 18 } 19 19 mpi = comm && !multifile; 20 MPI_Info m_info; 20 21 21 22 // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 22 23 // even if Parallel NetCDF ends up being used. 23 24 if (mpi) 24 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 25 26 else 26 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/dev/branch_yushan/src/io/inetcdf4.hpp
r802 r1053 7 7 #include "array_new.hpp" 8 8 9 #include "mpi .hpp"9 #include "mpi_std.hpp" 10 10 #include "netcdf.hpp" 11 11 -
XIOS/dev/branch_yushan/src/io/nc4_data_output.cpp
r1037 r1053 26 26 CNc4DataOutput::CNc4DataOutput 27 27 (const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 28 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)28 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 29 29 : SuperClass() 30 30 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) … … 450 450 StdString domainName = domain->name; 451 451 domain->assignMesh(domainName, domain->nvertex); 452 domain->mesh->createMeshEpsilon(s erver->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv);452 domain->mesh->createMeshEpsilon(static_cast<MPI_Comm>(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 453 453 454 454 StdString node_x = domainName + "_node_x"; -
XIOS/dev/branch_yushan/src/io/nc4_data_output.hpp
r887 r1053 27 27 (const StdString & filename, bool exist, bool useClassicFormat, 28 28 bool useCFConvention, 29 MPI_Comm comm_file, bool multifile, bool isCollective = true,29 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 30 const StdString& timeCounterName = "time_counter"); 31 31 … … 116 116 117 117 /// Propriétés privées /// 118 MPI_Comm comm_file;118 ep_lib::MPI_Comm comm_file; 119 119 const StdString filename; 120 120 std::map<Time, StdSize> timeToRecordCache; -
XIOS/dev/branch_yushan/src/io/netCdfInterface.hpp
r1037 r1053 16 16 #endif 17 17 18 #include "mpi.hpp" 19 //#include <mpi.h> 18 #include "mpi_std.hpp" 20 19 #include "netcdf.hpp" 21 20 -
XIOS/dev/branch_yushan/src/io/netcdf.hpp
r685 r1053 1 1 #ifndef __XIOS_NETCDF_HPP__ 2 2 #define __XIOS_NETCDF_HPP__ 3 #include "mpi .hpp"3 #include "mpi_std.hpp" 4 4 #define MPI_INCLUDED 5 5 #include <netcdf.h> … … 18 18 extern "C" 19 19 { 20 #include <netcdf_par.h>20 #include <netcdf_par.h> 21 21 } 22 22 # endif … … 30 30 namespace xios 31 31 { 32 inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)32 inline int nc_create_par(const char *path, int cmode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 33 33 { 34 34 #if defined(USING_NETCDF_PAR) 35 return ::nc_create_par(path, cmode, comm, info, ncidp) ;35 return ::nc_create_par(path, cmode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 36 36 #else 37 37 ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", … … 41 41 } 42 42 43 inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)43 inline int nc_open_par(const char *path, int mode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 44 44 { 45 45 #if defined(USING_NETCDF_PAR) 46 return ::nc_open_par(path, mode, comm, info, ncidp) ;46 return ::nc_open_par(path, mode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 47 47 #else 48 48 ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", -
XIOS/dev/branch_yushan/src/io/onetcdf4.cpp
r1037 r1053 3 3 #include "onetcdf4.hpp" 4 4 #include "group_template.hpp" 5 //#include "mpi_std.hpp"6 5 #include "netcdf.hpp" 7 6 #include "netCdfInterface.hpp" … … 12 11 /// ////////////////////// Définitions ////////////////////// /// 13 12 14 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 15 bool useCFConvention, 16 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 13 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 14 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 17 15 : path() 18 16 , wmpi(false) … … 32 30 33 31 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 34 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)32 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 35 33 { 36 34 this->useClassicFormat = useClassicFormat; … … 58 56 { 59 57 if (wmpi) 60 CNetCdfInterface::createPar(filename, mode, *comm, info_null, this->ncidp); 58 { 59 CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 60 printf("creating file with createPar\n"); 61 } 61 62 else 63 { 62 64 CNetCdfInterface::create(filename, mode, this->ncidp); 65 printf("creating file with create\n"); 66 } 67 63 68 64 69 this->appendMode = false; … … 68 73 mode |= NC_WRITE; 69 74 if (wmpi) 70 CNetCdfInterface::openPar(filename, mode, *comm, info_null, this->ncidp); 75 { 76 CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 77 printf("opening file with openPar\n"); 78 } 71 79 else 80 { 72 81 CNetCdfInterface::open(filename, mode, this->ncidp); 82 printf("opening file with open\n"); 83 } 73 84 74 85 this->appendMode = true; -
XIOS/dev/branch_yushan/src/io/onetcdf4.hpp
r1037 r1053 7 7 #include "data_output.hpp" 8 8 #include "array_new.hpp" 9 #include "mpi.hpp" 10 //#include <mpi.h> 9 #include "mpi_std.hpp" 11 10 #include "netcdf.hpp" 12 11 … … 29 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 30 29 bool useCFConvention = true, 31 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 32 31 const StdString& timeCounterName = "time_counter"); 33 32 … … 38 37 /// Initialisation /// 39 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 40 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 41 40 void close(void); 42 41 void sync(void); -
XIOS/dev/branch_yushan/src/mpi.hpp
r1037 r1053 12 12 13 13 #ifdef _usingEP 14 #include "../extern/src_ep /ep_lib.hpp"14 #include "../extern/src_ep_dev/ep_lib.hpp" 15 15 using namespace ep_lib; 16 16 #elif _usingMPI -
XIOS/dev/branch_yushan/src/node/axis.cpp
r1037 r1053 742 742 CContextServer* server = CContext::getCurrent()->server; 743 743 axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 744 MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);745 MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);744 ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 745 ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 746 746 axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 747 747 } -
XIOS/dev/branch_yushan/src/node/context.cpp
r1037 r1053 236 236 237 237 //! Initialize client side 238 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)238 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 239 239 { 240 240 hasClient=true; … … 248 248 registryOut->setPath(getId()) ; 249 249 250 MPI_Comm intraCommServer, interCommServer;250 ep_lib::MPI_Comm intraCommServer, interCommServer; 251 251 if (cxtServer) // Attached mode 252 252 { … … 311 311 312 312 //! Initialize server 313 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)313 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 314 314 { 315 315 hasServer=true; … … 323 323 registryOut->setPath(getId()) ; 324 324 325 MPI_Comm intraCommClient, interCommClient;325 ep_lib::MPI_Comm intraCommClient, interCommClient; 326 326 if (cxtClient) // Attached mode 327 327 { … … 369 369 closeAllFile(); 370 370 registryOut->hierarchicalGatherRegistry() ; 371 //registryOut->gatherRegistry() ; 371 372 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 372 373 } 373 374 374 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)375 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 375 376 MPI_Comm_free(&(*it)); 376 377 comms.clear(); -
XIOS/dev/branch_yushan/src/node/context.hpp
r1037 r1053 88 88 public : 89 89 // Initialize server or client 90 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);91 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);90 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 91 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 92 92 bool isInitialized(void); 93 93 … … 229 229 StdString idServer_; 230 230 CGarbageCollector garbageCollector; 231 std::list< MPI_Comm> comms; //!< Communicators allocated internally231 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 232 232 233 233 public: // Some function maybe removed in the near future -
XIOS/dev/branch_yushan/src/node/domain.cpp
r1037 r1053 475 475 { 476 476 CContext* context = CContext::getCurrent(); 477 477 CContextClient* client = context->client; 478 478 lon_g.resize(ni_glo) ; 479 479 lat_g.resize(nj_glo) ; -
XIOS/dev/branch_yushan/src/node/field_impl.hpp
r1037 r1053 20 20 if (clientSourceFilter) 21 21 { 22 printf("file_impl.hpp : clientSourceFilter->streamData\n"); 22 23 clientSourceFilter->streamData(CContext::getCurrent()->getCalendar()->getCurrentDate(), _data); 24 printf("file_impl.hpp : clientSourceFilter->streamData OKOK\n"); 23 25 } 24 26 else if (!field_ref.isEmpty() || !content.empty()) 27 { 25 28 ERROR("void CField::setData(const CArray<double, N>& _data)", 26 29 << "Impossible to receive data from the model for a field [ id = " << getId() << " ] with a reference or an arithmetic operation."); 30 } 27 31 } 28 32 -
XIOS/dev/branch_yushan/src/node/file.cpp
r1037 r1053 564 564 565 565 if (isOpen) data_out->closeFile(); 566 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective));567 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name));566 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective)); 567 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective, time_counter_name)); 568 568 isOpen = true; 569 569 } -
XIOS/dev/branch_yushan/src/node/file.hpp
r1037 r1053 159 159 bool isOpen; 160 160 bool allDomainEmpty; 161 MPI_Comm fileComm;161 ep_lib::MPI_Comm fileComm; 162 162 163 163 private : -
XIOS/dev/branch_yushan/src/node/mesh.cpp
r1037 r1053 493 493 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 494 494 */ 495 void CMesh::createMeshEpsilon(const MPI_Comm& comm,495 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 496 496 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 497 497 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 1534 1534 */ 1535 1535 1536 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1536 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1537 1537 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1538 1538 CArray<int, 2>& nghbFaces) … … 1690 1690 */ 1691 1691 1692 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1692 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1693 1693 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1694 1694 CArray<int, 2>& nghbFaces) … … 1871 1871 */ 1872 1872 1873 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm,1873 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 1874 1874 const CArray<int, 1>& face_idx, 1875 1875 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/dev/branch_yushan/src/node/mesh.hpp
r931 r1053 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 84 84 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 85 85 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 86 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);87 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);86 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 87 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 88 88 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 89 89 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/dev/branch_yushan/src/policy.hpp
r855 r1053 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm);33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const MPI_Comm& internalComm_;43 const ep_lib::MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/dev/branch_yushan/src/registry.cpp
r1037 r1053 1 1 #include "registry.hpp" 2 2 #include "type.hpp" 3 #include <mpi.hpp>4 3 #include <fstream> 5 4 #include <sstream> … … 261 260 void CRegistry::hierarchicalGatherRegistry(void) 262 261 { 263 //hierarchicalGatherRegistry(communicator) ;262 hierarchicalGatherRegistry(communicator) ; 264 263 } 265 264 … … 288 287 if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 289 288 else color=1 ; 289 290 290 MPI_Comm_split(comm,color,mpiRank,&commDown) ; 291 291 292 if (color==0) gatherRegistry(commDown) ; 293 printf("gatherRegistry OKOK\n"); 292 294 MPI_Comm_free(&commDown) ; 293 295 } -
XIOS/dev/branch_yushan/src/registry.hpp
r1037 r1053 28 28 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 29 29 30 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {}30 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 31 31 32 32 … … 127 127 128 128 /** MPI communicator used for broadcast and gather operation */ 129 MPI_Comm communicator ;129 ep_lib::MPI_Comm communicator ; 130 130 } ; 131 131 -
XIOS/dev/branch_yushan/src/test/test_client.f90
r1037 r1053 42 42 43 43 CALL MPI_COMM_RANK(comm,rank,ierr) 44 print*, "test_client MPI_COMM_RANK OK" 44 print*, "test_client MPI_COMM_RANK OK", rank 45 45 CALL MPI_COMM_SIZE(comm,size,ierr) 46 print*, "test_client MPI_COMM_SIZE OK", size 46 47 47 48 … … 138 139 PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 139 140 !DO ts=1,24*10 140 DO ts=1, 24141 DO ts=1,6 141 142 CALL xios_update_calendar(ts) 142 143 print*, "xios_update_calendar OK, ts = ", ts -
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp
r1037 r1053 173 173 174 174 // Sending global index of grid source to corresponding process as well as the corresponding mask 175 std::vector< MPI_Request> requests;176 std::vector< MPI_Status> status;175 std::vector<ep_lib::MPI_Request> requests; 176 std::vector<ep_lib::MPI_Status> status; 177 177 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 178 178 boost::unordered_map<int, double* > sendValueToDest; … … 184 184 sendValueToDest[recvRank] = new double [recvSize]; 185 185 186 requests.push_back( MPI_Request());186 requests.push_back(ep_lib::MPI_Request()); 187 187 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 188 188 } … … 206 206 207 207 // Send global index source and mask 208 requests.push_back( MPI_Request());208 requests.push_back(ep_lib::MPI_Request()); 209 209 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 210 210 } … … 215 215 //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 216 216 217 std::vector< MPI_Request>().swap(requests);218 std::vector< MPI_Status>().swap(status);217 std::vector<ep_lib::MPI_Request>().swap(requests); 218 std::vector<ep_lib::MPI_Status>().swap(status); 219 219 220 220 // Okie, on destination side, we will wait for information of masked index of source … … 224 224 int recvSize = itSend->second; 225 225 226 requests.push_back( MPI_Request());226 requests.push_back(ep_lib::MPI_Request()); 227 227 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 228 228 } … … 242 242 } 243 243 // Okie, now inform the destination which source index are masked 244 requests.push_back( MPI_Request());244 requests.push_back(ep_lib::MPI_Request()); 245 245 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 246 246 } -
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp
r933 r1053 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 17 15 18 namespace xios { 16 19 -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp
r1037 r1053 371 371 CContextClient* client=context->client; 372 372 373 MPI_Comm poleComme(MPI_COMM_NULL);374 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);373 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 374 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 375 375 if (MPI_COMM_NULL != poleComme) 376 376 { 377 377 int nbClientPole; 378 MPI_Comm_size(poleComme, &nbClientPole);378 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 379 379 380 380 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 541 541 double* sendWeightBuff = new double [sendBuffSize]; 542 542 543 std::vector< MPI_Request> sendRequest;543 std::vector<ep_lib::MPI_Request> sendRequest; 544 544 545 545 int sendOffSet = 0, l = 0; … … 562 562 } 563 563 564 sendRequest.push_back( MPI_Request());564 sendRequest.push_back(ep_lib::MPI_Request()); 565 565 MPI_Isend(sendIndexDestBuff + sendOffSet, 566 566 k, … … 570 570 client->intraComm, 571 571 &sendRequest.back()); 572 sendRequest.push_back( MPI_Request());572 sendRequest.push_back(ep_lib::MPI_Request()); 573 573 MPI_Isend(sendIndexSrcBuff + sendOffSet, 574 574 k, … … 578 578 client->intraComm, 579 579 &sendRequest.back()); 580 sendRequest.push_back( MPI_Request());580 sendRequest.push_back(ep_lib::MPI_Request()); 581 581 MPI_Isend(sendWeightBuff + sendOffSet, 582 582 k, … … 597 597 while (receivedSize < recvBuffSize) 598 598 { 599 MPI_Status recvStatus;599 ep_lib::MPI_Status recvStatus; 600 600 MPI_Recv((recvIndexDestBuff + receivedSize), 601 601 recvBuffSize, … … 637 637 } 638 638 639 std::vector<MPI_Status> requestStatus(sendRequest.size()); 640 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 639 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 640 ep_lib::MPI_Status stat_ignore; 641 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 642 //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 641 643 642 644 delete [] sendIndexDestBuff; … … 724 726 725 727 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 726 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);728 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 727 729 728 730 std::vector<StdSize> start(1, startIndex - localNbWeight); 729 731 std::vector<StdSize> count(1, localNbWeight); 730 732 731 WriteNetCdf netCdfWriter(filename, client->intraComm);733 WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 732 734 733 735 // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp
r1037 r1053 13 13 #include "transformation.hpp" 14 14 #include "nc4_data_output.hpp" 15 #ifdef _usingEP 16 #include "ep_declaration.hpp" 17 #endif 15 18 16 19 namespace xios { -
XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp
r1037 r1053 475 475 476 476 // Sending global index of grid source to corresponding process as well as the corresponding mask 477 std::vector< MPI_Request> requests;478 std::vector< MPI_Status> status;477 std::vector<ep_lib::MPI_Request> requests; 478 std::vector<ep_lib::MPI_Status> status; 479 479 boost::unordered_map<int, unsigned char* > recvMaskDst; 480 480 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; … … 486 486 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 487 487 488 requests.push_back( MPI_Request());488 requests.push_back(ep_lib::MPI_Request()); 489 489 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 490 requests.push_back( MPI_Request());490 requests.push_back(ep_lib::MPI_Request()); 491 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 492 492 } … … 524 524 525 525 // Send global index source and mask 526 requests.push_back( MPI_Request());526 requests.push_back(ep_lib::MPI_Request()); 527 527 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 528 requests.push_back( MPI_Request());528 requests.push_back(ep_lib::MPI_Request()); 529 529 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 530 530 } … … 536 536 537 537 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 538 std::vector< MPI_Request>().swap(requests);539 std::vector< MPI_Status>().swap(status);538 std::vector<ep_lib::MPI_Request>().swap(requests); 539 std::vector<ep_lib::MPI_Status>().swap(status); 540 540 // Okie, on destination side, we will wait for information of masked index of source 541 541 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 544 544 int recvSize = itSend->second; 545 545 546 requests.push_back( MPI_Request());546 requests.push_back(ep_lib::MPI_Request()); 547 547 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 548 548 } … … 581 581 582 582 // Okie, now inform the destination which source index are masked 583 requests.push_back( MPI_Request());583 requests.push_back(ep_lib::MPI_Request()); 584 584 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 585 585 }
Note: See TracChangeset
for help on using the changeset viewer.