Changeset 1601 for XIOS/dev/dev_trunk_omp/src/server.cpp
- Timestamp:
- 11/19/18 15:52:54 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/src/server.cpp
r1587 r1601 15 15 #include "event_scheduler.hpp" 16 16 #include "string_tools.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios … … 47 48 void CServer::initialize(void) 48 49 { 49 int initialized ;50 MPI_Initialized(&initialized) ;51 if (initialized) is_MPI_Initialized=true ;52 else is_MPI_Initialized=false ;50 //int initialized ; 51 //MPI_Initialized(&initialized) ; 52 //if (initialized) is_MPI_Initialized=true ; 53 //else is_MPI_Initialized=false ; 53 54 int rank ; 54 55 … … 57 58 { 58 59 59 if (!is_MPI_Initialized)60 {61 MPI_Init(NULL, NULL);62 }60 //if (!is_MPI_Initialized) 61 //{ 62 // MPI_Init(NULL, NULL); 63 //} 63 64 CTimer::get("XIOS").resume() ; 64 65 … … 152 153 if (serverLevel==2) 153 154 { 155 #pragma omp critical (_output) 154 156 info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; 155 157 for (i=0; i<sndServerGlobalRanks.size(); i++) … … 188 190 MPI_Comm_size(intraComm,&intraCommSize) ; 189 191 MPI_Comm_rank(intraComm,&intraCommRank) ; 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 194 #pragma omp critical (_output) 195 { 196 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 191 197 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;198 } 199 194 200 interCommLeft.push_back(newComm) ; 195 201 } … … 209 215 MPI_Comm_size(intraComm, &intraCommSize) ; 210 216 MPI_Comm_rank(intraComm, &intraCommRank) ; 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 217 218 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 219 #pragma omp critical (_output) 220 { 221 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 212 222 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 213 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;223 } 214 224 interCommLeft.push_back(newComm) ; 215 225 } … … 221 231 MPI_Comm_size(intraComm, &intraCommSize) ; 222 232 MPI_Comm_rank(intraComm, &intraCommRank) ; 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 233 234 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 235 #pragma omp critical (_output) 236 { 237 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 224 238 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 225 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ;239 } 226 240 interCommRight.push_back(newComm) ; 227 241 } … … 234 248 MPI_Comm_size(intraComm, &intraCommSize) ; 235 249 MPI_Comm_rank(intraComm, &intraCommRank) ; 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 250 251 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 252 #pragma omp critical (_output) 253 { 254 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 237 255 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 238 239 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 256 } 257 240 258 interCommLeft.push_back(newComm) ; 241 259 } … … 426 444 { 427 445 if (CXios::usingOasis) oasis_finalize(); 428 else MPI_Finalize() ;446 //else MPI_Finalize() ; 429 447 } 448 430 449 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 431 450 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 637 656 { 638 657 traceOff() ; 639 MPI_Iprobe( MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ;658 MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 640 659 traceOn() ; 641 660 if (flag==true) 642 661 { 662 #ifdef _usingMPI 643 663 rank=status.MPI_SOURCE ; 664 #elif _usingEP 665 rank=status.ep_src; 666 #endif 644 667 MPI_Get_count(&status,MPI_CHAR,&count) ; 645 668 buffer=new char[count] ; … … 655 678 if (flag==true) 656 679 { 680 #ifdef _usingMPI 657 681 rank=status.MPI_SOURCE ; 682 #elif _usingEP 683 rank=status.ep_src; 684 #endif 658 685 MPI_Get_count(&status,MPI_CHAR,&count) ; 659 686 recvContextMessage((void*)buffer,count) ; … … 740 767 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ; 741 768 buffers.push_back(new char[counts.back()]) ; 769 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&request) ; 742 770 requests.push_back(request); 743 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ;744 771 isEventRegistered.push_back(false); 745 772 isEventQueued.push_back(false); … … 750 777 { 751 778 // (2) If context id is received, register an event 752 MPI_Test(&requests[ctxNb],&flag,&status) ;779 if(!isEventRegistered[ctxNb]) MPI_Test(&requests[ctxNb],&flag,&status) ; 753 780 if (flag==true && !isEventRegistered[ctxNb]) 754 781 { … … 794 821 MPI_Intercomm_merge(contextInterComm,1,&inter); 795 822 MPI_Barrier(inter); 796 MPI_Comm_free(&inter);797 823 context->initServer(intraComm,contextInterComm); 798 824 contextInterComms.push_back(contextInterComm); 799 825 826 MPI_Comm_free(&inter); 800 827 } 801 828 // Secondary server: create communication channel with a primary server
Note: See TracChangeset
for help on using the changeset viewer.