- Timestamp:
- 02/23/17 15:51:56 (7 years ago)
- Location:
- XIOS/dev/branch_yushan
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/bld.cfg
r1058 r1060 36 36 #test_remap.exe 37 37 #bld::target test_new_features.exe test_unstruct_complete.exe 38 bld::target test_client.exe test_complete.exe38 bld::target test_client.exe #test_complete.exe 39 39 bld::exe_dep 40 40 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_create.cpp
r1037 r1060 185 185 } 186 186 187 printf("ep_lib::MPI_Comm_create_endpoints() OK from void*\n");187 //printf("ep_lib::MPI_Comm_create_endpoints() OK from void*\n"); 188 188 189 189 return 0; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_fortran.cpp
r1056 r1060 32 32 { 33 33 fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 34 printf("EP_Comm_c2f : MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm);34 //printf("EP_Comm_c2f : MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 35 35 } 36 36 … … 52 52 MPI_Comm comm_ptr; 53 53 comm_ptr = it->second; 54 printf("EP_Comm_f2c : MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr);54 //printf("EP_Comm_f2c : MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr); 55 55 return comm_ptr; 56 56 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm.cpp
r1053 r1060 56 56 if( leader_ranks[1] * leader_ranks[4] == 1) 57 57 { 58 if(ep_rank == local_leader) printf("calling MPI_Intercomm_create_unique_leader\n");58 if(ep_rank == local_leader) Debug("calling MPI_Intercomm_create_unique_leader\n"); 59 59 local_comm.ep_comm_ptr->comm_label = -99; 60 60 … … 64 64 { 65 65 // change leader 66 if(ep_rank == local_leader) printf("calling MPI_Intercomm_create_from_world\n");66 if(ep_rank == local_leader) Debug("calling MPI_Intercomm_create_from_world\n"); 67 67 68 68 int new_local_leader; … … 179 179 } 180 180 181 if(ep_rank == local_leader) printf("calling MPI_Intercomm_create_kernel\n");181 if(ep_rank == local_leader) Debug("calling MPI_Intercomm_create_kernel\n"); 182 182 183 183 return MPI_Intercomm_create_kernel(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_lib.cpp
r1037 r1060 52 52 if( comm.ep_comm_ptr->intercomm->remote_rank_map->at(i).first == inter_rank ) 53 53 { 54 printf("get_ep_rank for intercomm, ep_rank_loc = %d, mpi_rank = %d => ep_src = %d\n", ep_rank_loc, mpi_rank, i);54 //printf("get_ep_rank for intercomm, ep_rank_loc = %d, mpi_rank = %d => ep_src = %d\n", ep_rank_loc, mpi_rank, i); 55 55 return i; 56 56 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_probe.cpp
r1053 r1060 36 36 37 37 #pragma omp critical (_query) 38 if( comm.ep_comm_ptr->message_queue->size() > 0)38 if(!comm.ep_comm_ptr->message_queue->empty()) 39 39 { 40 for(Message_list::iterator it = comm.ep_comm_ptr->message_queue->begin(); it!= comm.ep_comm_ptr->message_queue->end(); it++)40 for(Message_list::iterator it = comm.ep_comm_ptr->message_queue->begin(); it!= comm.ep_comm_ptr->message_queue->end(); ++it) 41 41 { 42 42 bool src_matched = src<0? true: it->ep_src == src; … … 107 107 108 108 #pragma omp critical (_query) 109 if( comm.ep_comm_ptr->message_queue->size() > 0)109 if(! comm.ep_comm_ptr->message_queue->empty()) 110 110 { 111 for(Message_list::iterator it = comm.ep_comm_ptr->message_queue->begin(); it!= comm.ep_comm_ptr->message_queue->end(); it++)111 for(Message_list::iterator it = comm.ep_comm_ptr->message_queue->begin(); it!= comm.ep_comm_ptr->message_queue->end(); ++it) 112 112 { 113 113 bool src_matched = src<0? true: it->ep_src == src; -
XIOS/dev/branch_yushan/inputs/iodef.xml
r1056 r1060 12 12 13 13 14 <file_definition type=" one_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE.">14 <file_definition type="multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 15 15 <file id="output" name="output"> 16 16 <field field_ref="field_A_zoom" name="field_A" /> -
XIOS/dev/branch_yushan/src/client.cpp
r1053 r1060 24 24 StdOFStream CClient::m_errorStream; 25 25 26 void CClient::initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm)26 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 27 27 { 28 28 int initialized ; … … 161 161 162 162 if (!CXios::isServer) 163 { 164 165 166 163 { 167 164 int size,rank,globalRank ; 168 165 size_t message_size ; … … 191 188 delete [] buff ; 192 189 193 printf("====== Client: begin context_init \n");190 //printf("====== Client: begin context_init \n"); 194 191 195 192 … … 197 194 info(10)<<"Register new Context : "<<id<<endl ; 198 195 199 cout<<"Register new Context : "<<id<<endl ;196 //cout<<"Register new Context : "<<id<<endl ; 200 197 201 198 202 MPI_Comm inter ;203 MPI_Intercomm_merge(contextInterComm,0,&inter) ;204 MPI_Barrier(inter) ;199 // MPI_Comm inter ; 200 // MPI_Intercomm_merge(contextInterComm,0,&inter) ; 201 // MPI_Barrier(inter) ; 205 202 206 203 207 204 context->initClient(contextComm,contextInterComm) ; 208 205 209 printf("====== Client: context_init OK\n");206 //printf("====== Client: context_init OK\n"); 210 207 211 208 contextInterComms.push_back(contextInterComm); 212 MPI_Comm_free(&inter);209 // MPI_Comm_free(&inter); 213 210 } 214 211 else … … 252 249 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 253 250 MPI_Comm_free(&(*it)); 251 254 252 MPI_Comm_free(&interComm); 255 253 MPI_Comm_free(&intraComm); -
XIOS/dev/branch_yushan/src/context_client.cpp
r1053 r1060 199 199 map<int,CClientBuffer*>::iterator itBuff; 200 200 bool pending = false; 201 if(! buffers.empty()) 201 202 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) pending |= itBuff->second->checkBuffer(); 202 203 return pending; … … 307 308 checkBuffers(); 308 309 stop = false; 309 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop |= itBuff->second->hasPendingRequest();310 for (itBuff = buffers.begin(); itBuff != buffers.end(); ++itBuff) stop |= itBuff->second->hasPendingRequest(); 310 311 } 311 312 CTimer::get("Blocking time").suspend(); -
XIOS/dev/branch_yushan/src/context_server.cpp
r1053 r1060 122 122 123 123 //printf("enter checkPendingRequest\n"); 124 124 if(!pendingRequest.empty()) 125 125 for(it=pendingRequest.begin();it!=pendingRequest.end();it++) 126 126 { -
XIOS/dev/branch_yushan/src/cxios.cpp
r1053 r1060 33 33 //! Parse configuration file and create some objects from it 34 34 void CXios::initialize() 35 { 36 37 35 { 38 36 set_new_handler(noMemory); 39 37 parseFile(rootFile); … … 85 83 int tmp_size; 86 84 MPI_Comm_size(globalComm, &tmp_size); 87 printf("globalcomm size = %d\n", tmp_size); 85 if(isClient) printf("Client : globalcomm size = %d\n", tmp_size); 86 if(isServer) printf("Server : globalcomm size = %d\n", tmp_size); 88 87 89 88 … … 176 175 // Initialize all aspects MPI 177 176 CServer::initialize(); 177 178 178 if (CServer::getRank()==0) globalRegistry = new CRegistry(CServer::intraComm) ; 179 179 … … 193 193 CServer::eventLoop(); 194 194 195 printf("server eventloop OK\n");195 printf("server start finalize \n"); 196 196 197 197 // Finalize -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1053 r1060 63 63 int initialized; 64 64 MPI_Initialized(&initialized); 65 //if (initialized) local_comm.mpi_comm = MPI_Comm_f2c(*f_local_comm); 65 66 66 if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 67 67 else local_comm = MPI_COMM_NULL; … … 73 73 *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 74 74 75 printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm);75 //printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm); 76 76 77 77 CTimer::get("XIOS init").suspend(); … … 91 91 CClient::registerContext(str, comm); 92 92 93 printf("icdata.cpp: client register context OK\n");93 //printf("icdata.cpp: client register context OK\n"); 94 94 95 95 CTimer::get("XIOS init context").suspend(); … … 128 128 129 129 CContext* context = CContext::getCurrent(); 130 //printf("CContext* context = CContext::getCurrent();\n");131 130 context->finalize(); 132 131 133 //printf("client context_finalize OK\n");132 printf("client context_finalize OK\n"); 134 133 135 134 CTimer::get("XIOS context finalize").suspend(); -
XIOS/dev/branch_yushan/src/node/context.cpp
r1056 r1060 369 369 closeAllFile(); 370 370 registryOut->hierarchicalGatherRegistry() ; 371 //registryOut->gatherRegistry() ;372 371 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 373 372 } -
XIOS/dev/branch_yushan/src/server.cpp
r1037 r1060 32 32 void CServer::initialize(void) 33 33 { 34 // int initialized ; 35 // MPI_Initialized(&initialized) ; 36 // if (initialized) is_MPI_Initialized=true ; 37 // else is_MPI_Initialized=false ; 38 34 39 // Not using OASIS 35 40 if (!CXios::usingOasis) 36 41 { 42 // if (!is_MPI_Initialized) 43 // { 44 // MPI_Init(NULL, NULL); 45 // } 37 46 38 47 CTimer::get("XIOS").resume() ; … … 79 88 80 89 serverLeader=leaders[hashServer] ; 81 for(it=leaders.begin();it!=leaders.end(); it++)90 for(it=leaders.begin();it!=leaders.end();++it) 82 91 { 83 92 if (it->first!=hashServer) … … 92 101 MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 93 102 interComm.push_back(newComm) ; 94 printf("after inter create, interComm.size = %lu\n", interComm.size());103 //printf("after inter create, interComm.size = %lu\n", interComm.size()); 95 104 } 96 105 } … … 131 140 } 132 141 133 // int rank;134 142 MPI_Comm_rank(intraComm,&rank) ; 135 143 if (rank==0) isRoot=true; … … 147 155 148 156 149 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)157 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); ++it) 150 158 MPI_Comm_free(&(*it)); 151 159 152 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++)160 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); ++it) 153 161 MPI_Comm_free(&(*it)); 154 162 … … 214 222 { 215 223 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 216 printf(" CServer : Receive client finalize\n");224 printf(" CServer : Receive finalize sign from client 0\n"); 217 225 info(20)<<" CServer : Receive client finalize"<<endl ; 218 226 … … 410 418 MPI_Intercomm_create(intraComm,0,CXios::globalComm,leaderRank,10+leaderRank,&contextIntercomm); 411 419 412 MPI_Comm inter;413 MPI_Intercomm_merge(contextIntercomm,1,&inter);414 MPI_Barrier(inter);420 // MPI_Comm inter; 421 // MPI_Intercomm_merge(contextIntercomm,1,&inter); 422 // MPI_Barrier(inter); 415 423 416 424 … … 424 432 425 433 426 MPI_Comm_free(&inter);427 428 printf(" **** server: register context OK\n");434 // MPI_Comm_free(&inter); 435 436 //printf(" **** server: register context OK\n"); 429 437 } 430 438 -
XIOS/dev/branch_yushan/src/xios_server.f90
r501 r1060 1 1 PROGRAM server_main 2 2 USE xios 3 USE mod_wait 3 4 IMPLICIT NONE 4 5 INCLUDE "mpif.h" 5 6 INTEGER :: ierr 6 7 8 CALL MPI_INIT(ierr) 9 CALL init_wait 7 10 CALL xios_init_server 8 11
Note: See TracChangeset
for help on using the changeset viewer.