Changeset 1638 for XIOS/trunk/src/server.cpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/server.cpp
r1587 r1638 18 18 namespace xios 19 19 { 20 MPI_Comm CServer::intraComm ;21 std::list< MPI_Comm> CServer::interCommLeft ;22 std::list< MPI_Comm> CServer::interCommRight ;23 std::list< MPI_Comm> CServer::contextInterComms;24 std::list< MPI_Comm> CServer::contextIntraComms;20 ep_lib::MPI_Comm CServer::intraComm ; 21 std::list<ep_lib::MPI_Comm> CServer::interCommLeft ; 22 std::list<ep_lib::MPI_Comm> CServer::interCommRight ; 23 std::list<ep_lib::MPI_Comm> CServer::contextInterComms; 24 std::list<ep_lib::MPI_Comm> CServer::contextIntraComms; 25 25 int CServer::serverLevel = 0 ; 26 26 int CServer::nbContexts = 0; … … 48 48 { 49 49 int initialized ; 50 MPI_Initialized(&initialized) ;50 ep_lib::MPI_Initialized(&initialized) ; 51 51 if (initialized) is_MPI_Initialized=true ; 52 52 else is_MPI_Initialized=false ; … … 59 59 if (!is_MPI_Initialized) 60 60 { 61 MPI_Init(NULL, NULL);61 ep_lib::MPI_Init(NULL, NULL); 62 62 } 63 63 CTimer::get("XIOS").resume() ; … … 72 72 int myColor ; 73 73 int i,c ; 74 MPI_Comm newComm;75 76 MPI_Comm_size(CXios::globalComm, &size) ;77 MPI_Comm_rank(CXios::globalComm, &rank_);74 ep_lib::MPI_Comm newComm; 75 76 ep_lib::MPI_Comm_size(CXios::globalComm, &size) ; 77 ep_lib::MPI_Comm_rank(CXios::globalComm, &rank_); 78 78 79 79 hashAll=new unsigned long[size] ; 80 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ;80 ep_lib::MPI_Allgather(&hashServer, 1, EP_LONG, hashAll, 1, EP_LONG, CXios::globalComm) ; 81 81 82 82 map<unsigned long, int> colors ; … … 174 174 // (2) Create intraComm 175 175 if (serverLevel != 2) myColor=colors[hashServer]; 176 MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ;176 ep_lib::MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; 177 177 178 178 // (3) Create interComm … … 186 186 clientLeader=it->second ; 187 187 int intraCommSize, intraCommRank ; 188 MPI_Comm_size(intraComm,&intraCommSize) ;189 MPI_Comm_rank(intraComm,&intraCommRank) ;188 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 189 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 190 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 191 191 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 192 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;193 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 194 194 interCommLeft.push_back(newComm) ; 195 195 } … … 207 207 clientLeader=it->second ; 208 208 int intraCommSize, intraCommRank ; 209 MPI_Comm_size(intraComm, &intraCommSize) ;210 MPI_Comm_rank(intraComm, &intraCommRank) ;209 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 210 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 211 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 212 212 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 213 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;213 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 214 214 interCommLeft.push_back(newComm) ; 215 215 } … … 219 219 { 220 220 int intraCommSize, intraCommRank ; 221 MPI_Comm_size(intraComm, &intraCommSize) ;222 MPI_Comm_rank(intraComm, &intraCommRank) ;221 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 222 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 223 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 224 224 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 225 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ;225 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 226 226 interCommRight.push_back(newComm) ; 227 227 } … … 232 232 clientLeader = leaders[hashString(CXios::xiosCodeId)]; 233 233 int intraCommSize, intraCommRank ; 234 MPI_Comm_size(intraComm, &intraCommSize) ;235 MPI_Comm_rank(intraComm, &intraCommRank) ;234 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 235 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 236 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 237 237 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 238 238 239 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ;239 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 240 240 interCommLeft.push_back(newComm) ; 241 241 } … … 253 253 254 254 CTimer::get("XIOS").resume() ; 255 MPI_Comm localComm;255 ep_lib::MPI_Comm localComm; 256 256 oasis_get_localcomm(localComm); 257 MPI_Comm_rank(localComm,&rank_) ;257 ep_lib::MPI_Comm_rank(localComm,&rank_) ; 258 258 259 259 // (1) Create server intraComm 260 260 if (!CXios::usingServer2) 261 261 { 262 MPI_Comm_dup(localComm, &intraComm);262 ep_lib::MPI_Comm_dup(localComm, &intraComm); 263 263 } 264 264 else 265 265 { 266 266 int globalRank; 267 MPI_Comm_size(localComm,&size) ;268 MPI_Comm_rank(CXios::globalComm,&globalRank) ;267 ep_lib::MPI_Comm_size(localComm,&size) ; 268 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 269 269 srvGlobalRanks = new int[size] ; 270 MPI_Allgather(&globalRank, 1, MPI_INT, srvGlobalRanks, 1, MPI_INT, localComm) ;270 ep_lib::MPI_Allgather(&globalRank, 1, EP_INT, srvGlobalRanks, 1, EP_INT, localComm) ; 271 271 272 272 int reqNbProc = size*CXios::ratioServer2/100.; … … 276 276 << "It is impossible to dedicate the requested number of processes = "<<reqNbProc 277 277 <<" to secondary server. XIOS will run in the classical server mode."<<endl; 278 MPI_Comm_dup(localComm, &intraComm);278 ep_lib::MPI_Comm_dup(localComm, &intraComm); 279 279 } 280 280 else … … 339 339 } 340 340 if (serverLevel != 2) myColor=0; 341 MPI_Comm_split(localComm, myColor, rank_, &intraComm) ;341 ep_lib::MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; 342 342 } 343 343 } … … 348 348 vector<string>::iterator it ; 349 349 350 MPI_Comm newComm ;350 ep_lib::MPI_Comm newComm ; 351 351 int globalRank ; 352 MPI_Comm_rank(CXios::globalComm,&globalRank);352 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank); 353 353 354 354 // (2) Create interComms with models … … 359 359 { 360 360 interCommLeft.push_back(newComm) ; 361 if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ;361 if (rank_==0) ep_lib::MPI_Send(&globalRank,1,EP_INT,0,0,newComm) ; 362 362 } 363 363 } … … 365 365 // (3) Create interComms between primary and secondary servers 366 366 int intraCommSize, intraCommRank ; 367 MPI_Comm_size(intraComm,&intraCommSize) ;368 MPI_Comm_rank(intraComm, &intraCommRank) ;367 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 368 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 369 369 370 370 if (serverLevel == 1) … … 375 375 info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<intraCommSize 376 376 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 377 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ;377 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; 378 378 interCommRight.push_back(newComm) ; 379 379 } … … 383 383 info(50)<<"intercommCreate::server (server level 2)"<<globalRank<<" intraCommSize : "<<intraCommSize 384 384 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvGlobalRanks[0] <<endl ; 385 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ;385 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ; 386 386 interCommLeft.push_back(newComm) ; 387 387 } … … 393 393 394 394 395 MPI_Comm_rank(intraComm, &rank) ;395 ep_lib::MPI_Comm_rank(intraComm, &rank) ; 396 396 if (rank==0) isRoot=true; 397 397 else isRoot=false; … … 406 406 delete eventScheduler ; 407 407 408 for (std::list< MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)409 MPI_Comm_free(&(*it));410 411 for (std::list< MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++)412 MPI_Comm_free(&(*it));408 for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 409 ep_lib::MPI_Comm_free(&(*it)); 410 411 for (std::list<ep_lib::MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) 412 ep_lib::MPI_Comm_free(&(*it)); 413 413 414 414 // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) … … 418 418 // MPI_Comm_free(&(*it)); 419 419 420 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)421 MPI_Comm_free(&(*it));422 423 MPI_Comm_free(&intraComm);420 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 421 ep_lib::MPI_Comm_free(&(*it)); 422 423 ep_lib::MPI_Comm_free(&intraComm); 424 424 425 425 if (!is_MPI_Initialized) 426 426 { 427 427 if (CXios::usingOasis) oasis_finalize(); 428 else MPI_Finalize() ;428 else ep_lib::MPI_Finalize() ; 429 429 } 430 430 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; … … 465 465 void CServer::listenFinalize(void) 466 466 { 467 list< MPI_Comm>::iterator it, itr;467 list<ep_lib::MPI_Comm>::iterator it, itr; 468 468 int msg ; 469 469 int flag ; … … 471 471 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 472 472 { 473 MPI_Status status ;473 ep_lib::MPI_Status status ; 474 474 traceOff() ; 475 MPI_Iprobe(0,0,*it,&flag,&status) ;475 ep_lib::MPI_Iprobe(0,0,*it,&flag,&status) ; 476 476 traceOn() ; 477 477 if (flag==true) 478 478 { 479 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ;479 ep_lib::MPI_Recv(&msg,1,EP_INT,0,0,*it,&status) ; 480 480 info(20)<<" CServer : Receive client finalize"<<endl ; 481 481 // Sending server finalize message to secondary servers (if any) 482 482 for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) 483 483 { 484 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ;484 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,*itr) ; 485 485 } 486 MPI_Comm_free(&(*it));486 ep_lib::MPI_Comm_free(&(*it)); 487 487 interCommLeft.erase(it) ; 488 488 break ; … … 493 493 { 494 494 int i,size ; 495 MPI_Comm_size(intraComm,&size) ;496 MPI_Request* requests= newMPI_Request[size-1] ;497 MPI_Status* status= newMPI_Status[size-1] ;498 499 for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ;500 MPI_Waitall(size-1,requests,status) ;495 ep_lib::MPI_Comm_size(intraComm,&size) ; 496 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size-1] ; 497 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size-1] ; 498 499 for(int i=1;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,4,intraComm,&requests[i-1]) ; 500 ep_lib::MPI_Waitall(size-1,requests,status) ; 501 501 502 502 finished=true ; … … 510 510 { 511 511 int flag ; 512 MPI_Status status ;512 ep_lib::MPI_Status status ; 513 513 int msg ; 514 514 515 515 traceOff() ; 516 MPI_Iprobe(0,4,intraComm, &flag, &status) ;516 ep_lib::MPI_Iprobe(0,4,intraComm, &flag, &status) ; 517 517 traceOn() ; 518 518 if (flag==true) 519 519 { 520 MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ;520 ep_lib::MPI_Recv(&msg,1,EP_INT,0,4,intraComm,&status) ; 521 521 finished=true ; 522 522 } … … 534 534 { 535 535 int flag ; 536 MPI_Status status ;537 list< MPI_Comm>::iterator it;536 ep_lib::MPI_Status status ; 537 list<ep_lib::MPI_Comm>::iterator it; 538 538 int msg ; 539 539 static int nbCompound=0 ; 540 540 int size ; 541 541 static bool sent=false ; 542 static MPI_Request* allRequests ;543 static MPI_Status* allStatus ;542 static ep_lib::MPI_Request* allRequests ; 543 static ep_lib::MPI_Status* allStatus ; 544 544 545 545 546 546 if (sent) 547 547 { 548 MPI_Comm_size(intraComm,&size) ;549 MPI_Testall(size,allRequests, &flag, allStatus) ;548 ep_lib::MPI_Comm_size(intraComm,&size) ; 549 ep_lib::MPI_Testall(size,allRequests, &flag, allStatus) ; 550 550 if (flag==true) 551 551 { … … 559 559 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 560 560 { 561 MPI_Status status ;561 ep_lib::MPI_Status status ; 562 562 traceOff() ; 563 MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5563 ep_lib::MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5 564 564 traceOn() ; 565 565 if (flag==true) 566 566 { 567 MPI_Recv(&msg,1,MPI_INT,0,5,*it,&status) ; // tags oasis_endded = 5567 ep_lib::MPI_Recv(&msg,1,EP_INT,0,5,*it,&status) ; // tags oasis_endded = 5 568 568 nbCompound++ ; 569 569 if (nbCompound==interCommLeft.size()) 570 570 { 571 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)571 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 572 572 { 573 MPI_Send(&msg,1,MPI_INT,0,5,*it) ; // tags oasis_endded = 5573 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,*it) ; // tags oasis_endded = 5 574 574 } 575 MPI_Comm_size(intraComm,&size) ;576 allRequests= new MPI_Request[size] ;577 allStatus= new MPI_Status[size] ;578 for(int i=0;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5575 ep_lib::MPI_Comm_size(intraComm,&size) ; 576 allRequests= new ep_lib::MPI_Request[size] ; 577 allStatus= new ep_lib::MPI_Status[size] ; 578 for(int i=0;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5 579 579 sent=true ; 580 580 } … … 590 590 { 591 591 int flag ; 592 MPI_Status status ;592 ep_lib::MPI_Status status ; 593 593 const int root=0 ; 594 594 int msg ; … … 607 607 608 608 traceOff() ; 609 MPI_Iprobe(root,5,intraComm, &flag, &status) ;609 ep_lib::MPI_Iprobe(root,5,intraComm, &flag, &status) ; 610 610 traceOn() ; 611 611 if (flag==true) 612 612 { 613 MPI_Recv(&msg,1,MPI_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5613 ep_lib::MPI_Recv(&msg,1,EP_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5 614 614 boost::hash<string> hashString; 615 615 size_t hashId = hashString("oasis_enddef"); … … 626 626 { 627 627 628 MPI_Status status ;628 ep_lib::MPI_Status status ; 629 629 int flag ; 630 630 static char* buffer ; 631 static MPI_Request request ;631 static ep_lib::MPI_Request request ; 632 632 static bool recept=false ; 633 633 int rank ; … … 637 637 { 638 638 traceOff() ; 639 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 639 #ifdef _usingMPI 640 ep_lib::MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 641 #elif _usingEP 642 ep_lib::MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 643 #endif 640 644 traceOn() ; 641 645 if (flag==true) 642 646 { 647 #ifdef _usingMPI 643 648 rank=status.MPI_SOURCE ; 644 MPI_Get_count(&status,MPI_CHAR,&count) ; 649 #elif _usingEP 650 rank=status.ep_src ; 651 #endif 652 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 645 653 buffer=new char[count] ; 646 MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ;654 ep_lib::MPI_Irecv((void*)buffer,count,EP_CHAR,rank,1,CXios::globalComm,&request) ; 647 655 recept=true ; 648 656 } … … 651 659 { 652 660 traceOff() ; 653 MPI_Test(&request,&flag,&status) ;661 ep_lib::MPI_Test(&request,&flag,&status) ; 654 662 traceOn() ; 655 663 if (flag==true) 656 664 { 665 #ifdef _usingMPI 657 666 rank=status.MPI_SOURCE ; 658 MPI_Get_count(&status,MPI_CHAR,&count) ; 667 #elif _usingEP 668 rank=status.ep_src ; 669 #endif 670 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 659 671 recvContextMessage((void*)buffer,count) ; 660 672 delete [] buffer ; … … 689 701 { 690 702 int size ; 691 MPI_Comm_size(intraComm,&size) ;703 ep_lib::MPI_Comm_size(intraComm,&size) ; 692 704 // MPI_Request* requests= new MPI_Request[size-1] ; 693 705 // MPI_Status* status= new MPI_Status[size-1] ; 694 MPI_Request* requests= newMPI_Request[size] ;695 MPI_Status* status= newMPI_Status[size] ;706 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size] ; 707 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size] ; 696 708 697 709 CMessage msg ; … … 705 717 for(int i=0; i<size; i++) 706 718 { 707 MPI_Isend(sendBuff,sendBuffer.count(),MPI_CHAR,i,2,intraComm,&requests[i]) ;719 ep_lib::MPI_Isend(sendBuff,sendBuffer.count(),EP_CHAR,i,2,intraComm,&requests[i]) ; 708 720 } 709 721 … … 717 729 void CServer::listenRootContext(void) 718 730 { 719 MPI_Status status ;731 ep_lib::MPI_Status status ; 720 732 int flag ; 721 733 static std::vector<void*> buffers; 722 static std::vector< MPI_Request> requests ;734 static std::vector<ep_lib::MPI_Request> requests ; 723 735 static std::vector<int> counts ; 724 736 static std::vector<bool> isEventRegistered ; 725 737 static std::vector<bool> isEventQueued ; 726 MPI_Request request;738 ep_lib::MPI_Request request; 727 739 728 740 int rank ; … … 733 745 // (1) Receive context id from the root, save it into a buffer 734 746 traceOff() ; 735 MPI_Iprobe(root,2,intraComm, &flag, &status) ;747 ep_lib::MPI_Iprobe(root,2,intraComm, &flag, &status) ; 736 748 traceOn() ; 737 749 if (flag==true) 738 750 { 739 751 counts.push_back(0); 740 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ;752 ep_lib::MPI_Get_count(&status,EP_CHAR,&(counts.back())) ; 741 753 buffers.push_back(new char[counts.back()]) ; 742 754 requests.push_back(request); 743 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ;755 ep_lib::MPI_Irecv((void*)(buffers.back()),counts.back(),EP_CHAR,root,2,intraComm,&(requests.back())) ; 744 756 isEventRegistered.push_back(false); 745 757 isEventQueued.push_back(false); … … 750 762 { 751 763 // (2) If context id is received, register an event 752 MPI_Test(&requests[ctxNb],&flag,&status) ;764 ep_lib::MPI_Test(&requests[ctxNb],&flag,&status) ; 753 765 if (flag==true && !isEventRegistered[ctxNb]) 754 766 { … … 787 799 // (1) create interComm (with a client) 788 800 // (2) initialize client and server (contextClient and contextServer) 789 MPI_Comm inter;801 ep_lib::MPI_Comm inter; 790 802 if (serverLevel < 2) 791 803 { 792 MPI_Comm contextInterComm;793 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm);794 MPI_Intercomm_merge(contextInterComm,1,&inter);795 MPI_Barrier(inter);796 MPI_Comm_free(&inter);804 ep_lib::MPI_Comm contextInterComm; 805 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 806 ep_lib::MPI_Intercomm_merge(contextInterComm,1,&inter); 807 ep_lib::MPI_Barrier(inter); 808 ep_lib::MPI_Comm_free(&inter); 797 809 context->initServer(intraComm,contextInterComm); 798 810 contextInterComms.push_back(contextInterComm); … … 807 819 else if (serverLevel == 2) 808 820 { 809 MPI_Comm_dup(interCommLeft.front(), &inter);821 ep_lib::MPI_Comm_dup(interCommLeft.front(), &inter); 810 822 contextInterComms.push_back(inter); 811 823 context->initServer(intraComm, contextInterComms.back()); … … 818 830 { 819 831 int i = 0, size; 820 MPI_Comm_size(intraComm, &size) ;821 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i)832 ep_lib::MPI_Comm_size(intraComm, &size) ; 833 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) 822 834 { 823 835 StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); … … 829 841 CBufferOut buffer(buff,messageSize) ; 830 842 buffer<<msg ; 831 MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ;832 MPI_Comm_dup(*it, &inter);843 ep_lib::MPI_Send(buff, buffer.count(), EP_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; 844 ep_lib::MPI_Comm_dup(*it, &inter); 833 845 contextInterComms.push_back(inter); 834 MPI_Comm_dup(intraComm, &inter);846 ep_lib::MPI_Comm_dup(intraComm, &inter); 835 847 contextIntraComms.push_back(inter); 836 848 context->initClient(contextIntraComms.back(), contextInterComms.back()) ; … … 862 874 { 863 875 int rank; 864 MPI_Comm_rank(intraComm,&rank);876 ep_lib::MPI_Comm_rank(intraComm,&rank); 865 877 return rank; 866 878 } … … 885 897 int size = 0; 886 898 int id; 887 MPI_Comm_size(CXios::globalComm, &size);899 ep_lib::MPI_Comm_size(CXios::globalComm, &size); 888 900 while (size) 889 901 {
Note: See TracChangeset
for help on using the changeset viewer.