Changeset 1661
- Timestamp:
- 05/15/19 17:19:08 (5 years ago)
- Location:
- XIOS/dev/dev_trunk_omp
- Files:
-
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/bld.cfg
r1646 r1661 52 52 bld::target test_remap.exe 53 53 bld::target test_remap_omp.exe 54 #bld::target test_complete.exe54 bld::target test_complete.exe 55 55 bld::target test_complete_omp.exe 56 56 #bld::target test_client.exe -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_routing.cpp
r1646 r1661 153 153 for (int i = 0; i < nbSource; i++) 154 154 { 155 #ifdef _usingEP 155 156 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest++]); 157 #endif 158 159 #ifdef _usingMPI 160 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest++]); 161 #endif 156 162 } 157 163 MPI_Barrier(communicator); … … 171 177 for (int i = 0; i < nbSource; i++) 172 178 { 179 #ifdef _usingEP 173 180 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest]); 181 #endif 182 #ifdef _usingMPI 183 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 184 #endif 174 185 indexRequest++; 175 186 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.cpp
r1646 r1661 12 12 using namespace std; 13 13 14 //map<string,CTimer*> CTimer::allTimer;15 14 map<string,CTimer*> *CTimer::allTimer_ptr = 0; 16 15 … … 61 60 map<string,CTimer*>::iterator it; 62 61 if(allTimer_ptr == 0) allTimer_ptr = new map<string,CTimer*>; 63 //it=allTimer.find(name);64 62 it=allTimer_ptr->find(name); 65 //if (it==allTimer.end()) it=allTimer.insert(pair<string,CTimer*>(name,new CTimer(name))).first;66 63 if (it==allTimer_ptr->end()) it=allTimer_ptr->insert(pair<string,CTimer*>(name,new CTimer(name))).first; 67 64 return *(it->second); -
XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.hpp
r1602 r1661 26 26 double getCumulatedTime(void); 27 27 void print(void); 28 //static map<string,CTimer*> allTimer;29 28 static map<string,CTimer*> *allTimer_ptr; 30 29 #pragma omp threadprivate(allTimer_ptr) -
XIOS/dev/dev_trunk_omp/inputs/COMPLETE/context_atmosphere.xml
r1650 r1661 3 3 <context id="atmosphere"> 4 4 5 <field_definition level="1" enabled=". TRUE." default_value="9.96921e+36">5 <field_definition level="1" enabled=".FALSE." default_value="9.96921e+36"> 6 6 <field id="field_A_atm" name="field_A_atm_origin" operation="average" freq_op="1ts" grid_ref="grid_A_atm" /> 7 7 <field id="field_A_atm_zoom" name="field_A_atm" operation="average" freq_op="1ts" field_ref="field_A_atm" grid_ref="grid_A_atm_zoom" /> -
XIOS/dev/dev_trunk_omp/inputs/COMPLETE/iodef.xml
r1646 r1661 19 19 20 20 <variable_group id="parameters" > 21 <variable id="info_level" type="int"> 50</variable>21 <variable id="info_level" type="int">100</variable> 22 22 <variable id="print_file" type="bool">true</variable> 23 23 </variable_group> -
XIOS/dev/dev_trunk_omp/src/attribute_enum.hpp
r1646 r1661 14 14 namespace xios 15 15 { 16 /// ////////////////////// D éclarations ////////////////////// ///16 /// ////////////////////// Declarations ////////////////////// /// 17 17 /*! 18 18 \class CAttributeEnum -
XIOS/dev/dev_trunk_omp/src/calendar_util.cpp
r1630 r1661 1 1 #include "calendar_util.hpp" 2 #include "calendar.hpp"3 2 4 3 namespace xios -
XIOS/dev/dev_trunk_omp/src/client_client_dht_template_impl.hpp
r1601 r1661 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);19 MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);39 MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);64 MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 99 99 { 100 100 int clientRank; 101 ep_lib::MPI_Comm_rank(commLevel,&clientRank);101 MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 200 200 201 201 std::vector<ep_lib::MPI_Status> status(request.size()); 202 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]);202 MPI_Waitall(request.size(), &request[0], &status[0]); 203 203 204 204 CArray<size_t,1>* tmpGlobalIndex; … … 324 324 325 325 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 326 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);326 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 327 327 328 328 Index2VectorInfoTypeMap indexToInfoMapping; … … 394 394 { 395 395 int clientRank; 396 ep_lib::MPI_Comm_rank(commLevel,&clientRank);396 MPI_Comm_rank(commLevel,&clientRank); 397 397 computeSendRecvRank(level, clientRank); 398 398 … … 508 508 509 509 std::vector<ep_lib::MPI_Status> status(request.size()); 510 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]);510 MPI_Waitall(request.size(), &request[0], &status[0]); 511 511 512 512 Index2VectorInfoTypeMap indexToInfoMapping; … … 566 566 ep_lib::MPI_Request request; 567 567 requestSendIndex.push_back(request); 568 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,568 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 569 569 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 570 570 } … … 601 601 ep_lib::MPI_Request request; 602 602 requestRecvIndex.push_back(request); 603 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,603 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 604 604 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 605 605 } … … 637 637 requestSendInfo.push_back(request); 638 638 639 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR,639 MPI_Isend(info, infoSize, MPI_CHAR, 640 640 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 641 641 } … … 674 674 requestRecvInfo.push_back(request); 675 675 676 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR,676 MPI_Irecv(info, infoSize, MPI_CHAR, 677 677 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 678 678 } … … 767 767 for (int idx = 0; idx < recvNbRank.size(); ++idx) 768 768 { 769 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT,769 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 770 770 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 771 771 ++nRequest; … … 774 774 for (int idx = 0; idx < sendNbRank.size(); ++idx) 775 775 { 776 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,776 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 777 777 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 778 778 ++nRequest; 779 779 } 780 780 781 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);781 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 782 782 } 783 783 … … 811 811 for (int idx = 0; idx < recvBuffSize; ++idx) 812 812 { 813 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT,813 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 814 814 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 815 815 } … … 824 824 for (int idx = 0; idx < sendBuffSize; ++idx) 825 825 { 826 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT,826 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 827 827 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 828 828 } 829 829 830 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]);830 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 831 831 int nbRecvRank = 0, nbRecvElements = 0; 832 832 recvNbRank.clear(); -
XIOS/dev/dev_trunk_omp/src/cxios.cpp
r1646 r1661 54 54 #pragma omp critical 55 55 { 56 //std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl;57 56 parseFile(rootFile); 58 57 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; -
XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.cpp
r1646 r1661 73 73 } 74 74 75 76 77 78 79 75 CSpatialTemporalFilter::CSpatialTemporalFilter(CGarbageCollector& gc, CSpatialTransformFilterEngine* engine, CGridTransformation* gridTransformation, double outputValue, size_t inputSlotsCount) 80 76 : CSpatialTransformFilter(gc, engine, outputValue, inputSlotsCount), record(0) -
XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.cpp
r1646 r1661 568 568 info(200)<<"start nc_inq_grpname_full"<<std::endl; 569 569 } 570 570 571 if (NC_NOERR != status) 571 572 { -
XIOS/dev/dev_trunk_omp/src/mpi.hpp
r1650 r1661 14 14 #include "ep_lib.hpp" 15 15 #include "ep_declaration.hpp" 16 //using namespace ep_lib;17 16 #elif _usingMPI 18 17 #include <mpi.h> -
XIOS/dev/dev_trunk_omp/src/node/axis.cpp
r1646 r1661 149 149 \return the number of indexes written by each server 150 150 */ 151 int CAxis::getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)151 int CAxis::getNumberWrittenIndexes(MPI_Comm writtenCom) 152 152 TRY 153 153 { 154 154 int writtenSize; 155 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);155 MPI_Comm_size(writtenCom, &writtenSize); 156 156 return numberWrittenIndexes_[writtenSize]; 157 157 } … … 162 162 \return the total number of indexes written by the servers 163 163 */ 164 int CAxis::getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)164 int CAxis::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 165 165 TRY 166 166 { 167 167 int writtenSize; 168 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);168 MPI_Comm_size(writtenCom, &writtenSize); 169 169 return totalNumberWrittenIndexes_[writtenSize]; 170 170 } … … 175 175 \return the offset of indexes written by each server 176 176 */ 177 int CAxis::getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom)177 int CAxis::getOffsetWrittenIndexes(MPI_Comm writtenCom) 178 178 TRY 179 179 { 180 180 int writtenSize; 181 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);181 MPI_Comm_size(writtenCom, &writtenSize); 182 182 return offsetWrittenIndexes_[writtenSize]; 183 183 } 184 184 CATCH_DUMP_ATTR 185 185 186 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom)186 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 187 187 TRY 188 188 { 189 189 int writtenSize; 190 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);190 MPI_Comm_size(writtenCom, &writtenSize); 191 191 return compressedIndexToWriteOnServer[writtenSize]; 192 192 } … … 787 787 CATCH_DUMP_ATTR 788 788 789 void CAxis::computeWrittenCompressedIndex( ep_lib::MPI_Comm writtenComm)789 void CAxis::computeWrittenCompressedIndex(MPI_Comm writtenComm) 790 790 TRY 791 791 { 792 792 int writtenCommSize; 793 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize);793 MPI_Comm_size(writtenComm, &writtenCommSize); 794 794 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 795 795 return; … … 869 869 { 870 870 871 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);872 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);871 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 872 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 873 873 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 874 874 } -
XIOS/dev/dev_trunk_omp/src/node/axis.hpp
r1646 r1661 182 182 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 183 183 #pragma omp threadprivate(transformationMapList_ptr) 184 //static bool dummyTransformationMapList_;185 184 186 185 DECLARE_REF_FUNC(Axis,axis) -
XIOS/dev/dev_trunk_omp/src/node/context.cpp
r1646 r1661 287 287 else 288 288 { 289 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer);289 MPI_Comm_dup(intraComm, &intraCommServer); 290 290 comms.push_back(intraCommServer); 291 ep_lib::MPI_Comm_dup(interComm, &interCommServer);291 MPI_Comm_dup(interComm, &interCommServer); 292 292 comms.push_back(interCommServer); 293 293 } … … 312 312 { 313 313 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 314 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer);314 MPI_Comm_dup(intraComm, &intraCommServer); 315 315 comms.push_back(intraCommServer); 316 ep_lib::MPI_Comm_dup(interComm, &interCommServer);316 MPI_Comm_dup(interComm, &interCommServer); 317 317 comms.push_back(interCommServer); 318 318 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); … … 413 413 else 414 414 { 415 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient);415 MPI_Comm_dup(intraComm, &intraCommClient); 416 416 comms.push_back(intraCommClient); 417 ep_lib::MPI_Comm_dup(interComm, &interCommClient);417 MPI_Comm_dup(interComm, &interCommClient); 418 418 comms.push_back(interCommClient); 419 419 } … … 506 506 //! Free internally allocated communicators 507 507 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 508 ep_lib::MPI_Comm_free(&(*it));508 MPI_Comm_free(&(*it)); 509 509 comms.clear(); 510 510 … … 554 554 //! Free internally allocated communicators 555 555 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 556 ep_lib::MPI_Comm_free(&(*it));556 MPI_Comm_free(&(*it)); 557 557 comms.clear(); 558 558 … … 573 573 { 574 574 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 575 ep_lib::MPI_Comm_free(&(*it));575 MPI_Comm_free(&(*it)); 576 576 comms.clear(); 577 577 } -
XIOS/dev/dev_trunk_omp/src/node/context.hpp
r1646 r1661 145 145 void distributeFileOverMemoryBandwith() ; 146 146 147 148 147 // Send context close definition 149 148 void sendCloseDefinition(void); -
XIOS/dev/dev_trunk_omp/src/node/domain.cpp
r1646 r1661 116 116 \return the number of indexes written by each server 117 117 */ 118 int CDomain::getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)118 int CDomain::getNumberWrittenIndexes(MPI_Comm writtenCom) 119 119 TRY 120 120 { 121 121 int writtenSize; 122 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);122 MPI_Comm_size(writtenCom, &writtenSize); 123 123 return numberWrittenIndexes_[writtenSize]; 124 124 } … … 129 129 \return the total number of indexes written by the servers 130 130 */ 131 int CDomain::getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)131 int CDomain::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 132 132 TRY 133 133 { 134 134 int writtenSize; 135 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);135 MPI_Comm_size(writtenCom, &writtenSize); 136 136 return totalNumberWrittenIndexes_[writtenSize]; 137 137 } … … 142 142 \return the offset of indexes written by each server 143 143 */ 144 int CDomain::getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom)144 int CDomain::getOffsetWrittenIndexes(MPI_Comm writtenCom) 145 145 TRY 146 146 { 147 147 int writtenSize; 148 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);148 MPI_Comm_size(writtenCom, &writtenSize); 149 149 return offsetWrittenIndexes_[writtenSize]; 150 150 } 151 151 CATCH_DUMP_ATTR 152 152 153 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom)153 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 154 154 TRY 155 155 { 156 156 int writtenSize; 157 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);157 MPI_Comm_size(writtenCom, &writtenSize); 158 158 return compressedIndexToWriteOnServer[writtenSize]; 159 159 } … … 707 707 int v ; 708 708 v=ibegin ; 709 ep_lib::MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ;709 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 710 710 v=jbegin ; 711 ep_lib::MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ;711 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 712 712 v=ni ; 713 ep_lib::MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ;713 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 714 714 v=nj ; 715 ep_lib::MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ;716 717 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ;718 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ;715 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 716 717 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 718 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 719 719 720 720 delete[] ibegin_g ; … … 1949 1949 displs[0] = 0; 1950 1950 int localCount = connectedServerRank_[nbServer].size() ; 1951 ep_lib::MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ;1951 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 1952 1952 for (int i = 0; i < clientSize-1; ++i) 1953 1953 { … … 1955 1955 } 1956 1956 std::vector<int> allConnectedServers(displs[clientSize-1]+counts[clientSize-1]); 1957 1958 ep_lib::MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 1959 1957 MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 1960 1958 1961 1959 if ((allConnectedServers.size() != nbServer) && (rank == 0)) … … 2022 2020 CATCH_DUMP_ATTR 2023 2021 2024 void CDomain::computeWrittenCompressedIndex( ep_lib::MPI_Comm writtenComm)2022 void CDomain::computeWrittenCompressedIndex(MPI_Comm writtenComm) 2025 2023 TRY 2026 2024 { 2027 2025 int writtenCommSize; 2028 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize);2026 MPI_Comm_size(writtenComm, &writtenCommSize); 2029 2027 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 2030 2028 return; … … 2083 2081 { 2084 2082 2085 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2086 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2083 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2084 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2087 2085 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 2088 2086 } -
XIOS/dev/dev_trunk_omp/src/node/file.cpp
r1646 r1661 307 307 308 308 int color = allZoneEmpty ? 0 : 1; 309 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);310 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm);309 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 310 if (allZoneEmpty) MPI_Comm_free(&fileComm); 311 311 } 312 312 CATCH_DUMP_ATTR … … 557 557 { 558 558 int commSize, commRank; 559 ep_lib::MPI_Comm_size(fileComm, &commSize);560 ep_lib::MPI_Comm_rank(fileComm, &commRank);559 MPI_Comm_size(fileComm, &commSize); 560 MPI_Comm_rank(fileComm, &commRank); 561 561 562 562 if (server->intraCommSize > 1) … … 683 683 { 684 684 int commSize, commRank; 685 ep_lib::MPI_Comm_size(readComm, &commSize);686 ep_lib::MPI_Comm_rank(readComm, &commRank);685 MPI_Comm_size(readComm, &commSize); 686 MPI_Comm_rank(readComm, &commRank); 687 687 688 688 if (server->intraCommSize > 1) … … 728 728 isOpen = false; 729 729 } 730 //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 730 #ifdef _usingMPI 731 if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 732 #endif 731 733 } 732 734 CATCH_DUMP_ATTR -
XIOS/dev/dev_trunk_omp/src/node/grid.cpp
r1646 r1661 697 697 CContext* context = CContext::getCurrent(); 698 698 699 CContextClient* client = context->client; // Here it's not important which contextClient to recuperate699 CContextClient* client = context->client; 700 700 int rank = client->clientRank; 701 701 … … 856 856 displs[0] = 0; 857 857 int localCount = connectedServerRank_[receiverSize].size() ; 858 ep_lib::MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 859 858 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 860 859 for (int i = 0; i < client->clientSize-1; ++i) 861 860 { … … 863 862 } 864 863 std::vector<int> allConnectedServers(displs[client->clientSize-1]+counts[client->clientSize-1]); 865 866 ep_lib::MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 867 864 MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 868 865 869 866 if ((allConnectedServers.size() != receiverSize) && (client->clientRank == 0)) -
XIOS/dev/dev_trunk_omp/src/object_factory_impl.hpp
r1628 r1661 141 141 if(U::AllVectObj_ptr) 142 142 { 143 //const std::vector<std::shared_ptr<U> > temp;144 143 return (*U::AllVectObj_ptr)[context]; 145 //return std::vector<std::shared_ptr<U> > (0);146 144 } 147 145 -
XIOS/dev/dev_trunk_omp/src/policy.cpp
r1646 r1661 16 16 namespace xios 17 17 { 18 ///*! 19 // Calculate MPI communicator for each level of hierarchy. 20 // \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients) 21 // \param[in] levels number of level in hierarchy 22 //*/ 23 //void DivideCommByTwo::computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels) 24 //{ 25 // int nbProc; 26 // MPI_Comm_size(mpiCommRoot,&nbProc); 27 // if (levels > nbProc) levels = std::log10(nbProc) * 3.3219; // log2(x) = log2(10) * log10(x); stupid C++98 28 // else if (1 > levels) levels = 1; 29 // 30 // commLevel_.push_back(mpiCommRoot); 31 // divideMPICommLevel(mpiCommRoot, levels); 32 //} 33 // 34 ///*! 35 // Divide each MPI communicator into sub-communicator. Recursive function 36 // \param [in] mpiCommLevel MPI communicator of current level 37 // \param [in] level current level 38 //*/ 39 //void DivideCommByTwo::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level) 40 //{ 41 // int clientRank; 42 // MPI_Comm_rank(mpiCommLevel,&clientRank); 43 // 44 // --level; 45 // if (0 < level) 46 // { 47 // int color = clientRank % 2; 48 // commLevel_.push_back(MPI_Comm()); 49 // MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 50 // divideMPICommLevel(commLevel_.back(), level); 51 // } 52 //} 18 53 19 54 20 DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) -
XIOS/dev/dev_trunk_omp/src/policy.hpp
r1601 r1661 16 16 namespace xios 17 17 { 18 //class DivideCommByTwo 19 //{ 20 //protected: 21 // void computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels); 22 // 23 //protected: 24 // std::vector<MPI_Comm> commLevel_; 25 //private: 26 // // Divide MPI communicator on each level recursively 27 // void divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level); 28 //}; 18 29 19 30 20 class DivideAdaptiveComm … … 49 39 std::vector<int> nbInGroup_; //! Number of process in each group 50 40 bool computed_; 51 // std::vector<std::vector<int> > child_; /*!< List of child rank for each level */ 52 // std::vector<int> nbChild_; /*!< Number of child for each level */ 41 42 53 43 }; 54 44 -
XIOS/dev/dev_trunk_omp/src/server.cpp
r1646 r1661 50 50 void CServer::initialize(void) 51 51 { 52 //int initialized ;53 //MPI_Initialized(&initialized) ;54 //if (initialized) is_MPI_Initialized=true ;55 //else is_MPI_Initialized=false ;56 52 int rank ; 57 53 … … 60 56 { 61 57 62 //if (!is_MPI_Initialized)63 //{64 // MPI_Init(NULL, NULL);65 //}66 58 CTimer::get("XIOS").resume() ; 67 59 -
XIOS/dev/dev_trunk_omp/src/test/test_complete_omp.f90
r1650 r1661 14 14 TYPE(xios_duration) :: dtime 15 15 TYPE(xios_context) :: ctx_hdl 16 INTEGER,PARAMETER :: ni_glo=100 017 INTEGER,PARAMETER :: nj_glo=100 016 INTEGER,PARAMETER :: ni_glo=100 17 INTEGER,PARAMETER :: nj_glo=100 18 18 INTEGER,PARAMETER :: llm=5 19 19 DOUBLE PRECISION :: lval(llm)=1 -
XIOS/dev/dev_trunk_omp/src/tracer.cpp
r501 r1661 1 1 #include "tracer.hpp" 2 #ifdef VTRACE 2 3 #if defined(VTRACE) 4 3 5 #include <vt_user.h> 6 7 #elif defined(SCOREP) 8 9 #include <scorep/SCOREP_User.h> 10 11 #elif defined(ITAC) 12 13 #include <VT.h> 14 4 15 #endif 16 5 17 #include <string> 18 #include <map> 19 #include <iostream> 6 20 7 21 namespace xios 8 22 { 9 23 using namespace std ; 24 25 std::map<std::string,int> regionId ; 26 int count=0 ; 10 27 11 28 void traceOn(void) 12 29 { 13 #if def VTRACE30 #if defined(VTRACE) 14 31 VT_ON() ; 32 #elif defined(SCOREP) 33 SCOREP_RECORDING_ON() ; 34 #elif defined(ITAC) 35 VT_traceon() ; 15 36 #endif 16 37 } … … 18 39 void traceOff(void) 19 40 { 20 #if def VTRACE41 #if defined(VTRACE) 21 42 VT_OFF() ; 43 #elif defined(SCOREP) 44 SCOREP_RECORDING_OFF() 45 #elif defined(ITAC) 46 VT_traceoff() ; 22 47 #endif 23 48 } … … 25 50 void traceBegin(const string& name) 26 51 { 27 #if def VTRACE52 #if defined(VTRACE) 28 53 VT_USER_START(name.c_str()) ; 54 #elif defined(SCOREP) 55 SCOREP_USER_REGION_BY_NAME_BEGIN(name.c_str(),SCOREP_USER_REGION_TYPE_COMMON) 56 57 #elif defined(ITAC) 58 int classhandle ; 59 auto it = regionId.find(name); 60 if (it==regionId.end()) 61 { 62 classhandle=count ; 63 count++ ; 64 VT_symdef (classhandle, name.c_str(), "XIOS") ; 65 regionId[name]=classhandle; 66 } 67 else classhandle = it->second ; 68 VT_begin(classhandle) ; 69 cout<<"VT_begin "<<name<<" "<<classhandle<<endl ; 70 29 71 #endif 72 30 73 } 31 74 32 75 void traceEnd(const string& name) 33 76 { 34 #if def VTRACE77 #if defined (VTRACE) 35 78 VT_USER_END(name.c_str()) ; 79 #elif defined(SCOREP) 80 SCOREP_USER_REGION_BY_NAME_END(name.c_str()) 81 #elif defined(ITAC) 82 int classhandle ; 83 auto it = regionId.find(name); 84 if (it==regionId.end()) 85 { 86 return ; 87 VT_classdef (name.c_str(), &classhandle) ; 88 regionId[name]=classhandle; 89 } 90 else classhandle = it->second ; 91 VT_end(classhandle) ; 92 cout<<"VT_end "<<name<<" "<<classhandle<<endl ; 93 36 94 #endif 37 95 } 38 96 97 39 98 // void marker(const string& name,const string& text) ; 40 99 -
XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.cpp
r1646 r1661 438 438 439 439 ep_lib::MPI_Comm poleComme = MPI_COMM_NULL; 440 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme);440 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 441 441 if (poleComme!=MPI_COMM_NULL) 442 442 { 443 443 int nbClientPole; 444 ep_lib::MPI_Comm_size(poleComme, &nbClientPole);444 MPI_Comm_size(poleComme, &nbClientPole); 445 445 446 446 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 453 453 std::vector<int> recvCount(nbClientPole,0); 454 454 std::vector<int> displ(nbClientPole,0); 455 ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 455 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 456 456 457 displ[0]=0; 457 458 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; … … 475 476 476 477 // Gather all index and weight for pole 477 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme);478 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme);478 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 479 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 479 480 480 481 std::map<int,double> recvTemp; … … 633 634 634 635 635 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);636 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 636 637 637 638 int* sendIndexDestBuff = new int [sendBuffSize]; … … 661 662 } 662 663 663 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet,664 MPI_Isend(sendIndexDestBuff + sendOffSet, 664 665 k, 665 666 MPI_INT, … … 668 669 client->intraComm, 669 670 &sendRequest[position++]); 670 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet,671 MPI_Isend(sendIndexSrcBuff + sendOffSet, 671 672 k, 672 673 MPI_INT, … … 675 676 client->intraComm, 676 677 &sendRequest[position++]); 677 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet,678 MPI_Isend(sendWeightBuff + sendOffSet, 678 679 k, 679 680 MPI_DOUBLE, … … 694 695 { 695 696 ep_lib::MPI_Status recvStatus; 696 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize),697 MPI_Recv((recvIndexDestBuff + receivedSize), 697 698 recvBuffSize, 698 699 MPI_INT, … … 710 711 #endif 711 712 712 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize),713 MPI_Recv((recvIndexSrcBuff + receivedSize), 713 714 recvBuffSize, 714 715 MPI_INT, … … 718 719 &recvStatus); 719 720 720 ep_lib::MPI_Recv((recvWeightBuff + receivedSize),721 MPI_Recv((recvWeightBuff + receivedSize), 721 722 recvBuffSize, 722 723 MPI_DOUBLE, … … 735 736 736 737 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 737 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]);738 MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 738 739 739 740 delete [] sendIndexDestBuff; … … 843 844 } 844 845 845 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm);846 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);846 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 847 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 847 848 848 849 if (0 == globalNbWeight) -
XIOS/dev/dev_trunk_omp/src/transformation/generic_algorithm_transformation.cpp
r1646 r1661 136 136 { 137 137 distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 138 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;138 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 139 139 140 140 } … … 142 142 { 143 143 distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 144 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;144 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 145 145 } 146 146 else //it's a scalar … … 238 238 int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 239 239 int recvValue = 0; 240 ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm);240 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 241 241 computeGlobalIndexOnProc = (0 < recvValue); 242 242 -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp
r1646 r1661 514 514 sendRankSizeMap[itIndex->first] = sendSize; 515 515 } 516 ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);516 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 517 517 518 518 displ[0]=0 ; … … 521 521 int* recvRankBuff=new int[recvSize]; 522 522 int* recvSizeBuff=new int[recvSize]; 523 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);524 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);523 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 524 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 525 525 for (int i = 0; i < nbClient; ++i) 526 526 { … … 546 546 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 547 547 548 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]);549 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]);548 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 549 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 550 550 } 551 551 … … 582 582 583 583 // Send global index source and mask 584 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]);585 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]);584 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 585 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 586 586 } 587 587 … … 599 599 int recvSize = itSend->second; 600 600 601 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]);601 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 602 602 } 603 603 … … 635 635 636 636 // Okie, now inform the destination which source index are masked 637 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]);637 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 638 638 } 639 639 status.resize(requests.size()); 640 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]);640 MPI_Waitall(requests.size(), &requests[0], &status[0]); 641 641 642 642 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.