Changeset 1053
- Timestamp:
- 02/17/17 17:55:37 (7 years ago)
- Location:
- XIOS/dev/branch_yushan
- Files:
-
- 89 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.env
r395 r1053 1 export HDF5_INC_DIR=$HOME/hdf5/include 2 export HDF5_LIB_DIR=$HOME/hdf5/lib 1 module unload netcdf 2 module unload hdf5 3 3 4 4 export NETCDF_INC_DIR=$HOME/netcdf4/include5 export NETCDF_LIB_DIR=$HOME/netcdf4/lib5 export HDF5_INC_DIR=$HOME/lib/hdf5/include 6 export HDF5_LIB_DIR=$HOME/lib/hdf5/lib 6 7 8 export NETCDF_INC_DIR=$HOME/lib/netcdf/include 9 export NETCDF_LIB_DIR=$HOME/lib/netcdf/lib 10 -
XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.fcm
r591 r1053 3 3 ################################################################################ 4 4 5 %CCOMPILER mpicc 6 %FCOMPILER mpif90 7 %LINKER mpif90 5 %CCOMPILER mpicc -fopenmp -D_openmpi -D_usingEP 6 %FCOMPILER mpif90 -fopenmp 7 %LINKER mpif90 -fopenmp -D_openmpi -D_usingEP 8 8 9 9 %BASE_CFLAGS -ansi -w -
XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.path
r475 r1053 1 NETCDF_INCDIR="-I $NETCDF_INC_DIR "2 NETCDF_LIBDIR="-L $NETCDF_LIB_DIR "3 NETCDF_LIB=" -lnetcdff-lnetcdf"1 NETCDF_INCDIR="-I $NETCDF_INC_DIR -I $HOME/lib/netcdf_f/include" 2 NETCDF_LIBDIR="-L $NETCDF_LIB_DIR -L $HOME/lib/netcdf_f/lib" 3 NETCDF_LIB=" -lnetcdf" 4 4 5 5 MPI_INCDIR="" … … 9 9 HDF5_INCDIR="-I $HDF5_INC_DIR" 10 10 HDF5_LIBDIR="-L $HDF5_LIB_DIR" 11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz "11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz -ldl -lcurl" 12 12 13 13 OASIS_INCDIR="-I$PWD/../../oasis3-mct/BLD/build/lib/psmile.MPI1" -
XIOS/dev/branch_yushan/arch/arch-X64_CURIE.fcm
r1002 r1053 3 3 ################################################################################ 4 4 5 %CCOMPILER mpicc 6 %FCOMPILER mpif90 5 %CCOMPILER mpicc -openmp -D_openmpi -D_usingEP 6 %FCOMPILER mpif90 -openmp -D_openmpi -D_usingEP 7 7 %LINKER mpif90 -nofor-main 8 8 -
XIOS/dev/branch_yushan/extern/remap/src/mapper.cpp
r923 r1053 12 12 13 13 #include "mapper.hpp" 14 14 15 15 16 namespace sphereRemap { -
XIOS/dev/branch_yushan/extern/remap/src/mapper.hpp
r844 r1053 3 3 #include "parallel_tree.hpp" 4 4 #include "mpi.hpp" 5 6 #ifdef _usingEP 7 #include "ep_declaration.hpp" 8 #endif 5 9 6 10 namespace sphereRemap { … … 18 22 { 19 23 public: 20 Mapper( MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {}24 Mapper(ep_lib::MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 25 ~Mapper(); 22 26 void setVerbosity(verbosity v) {verbose=v ;} … … 67 71 68 72 CParallelTree sstree; 69 MPI_Comm communicator ;73 ep_lib::MPI_Comm communicator ; 70 74 std::vector<Elt> sourceElements ; 71 75 std::vector<Node> sourceMesh ; -
XIOS/dev/branch_yushan/extern/remap/src/mpi_routing.cpp
r694 r1053 5 5 #include "timerRemap.hpp" 6 6 #include <iostream> 7 #ifdef _usingEP 8 #include "ep_declaration.hpp" 9 #endif 7 10 8 11 namespace sphereRemap { … … 122 125 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 126 124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); 125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); 127 MPI_Info info_null; 128 129 MPI_Alloc_mem(nbTarget *sizeof(int), info_null, &targetRank); 130 MPI_Alloc_mem(nbSource *sizeof(int), info_null, &sourceRank); 126 131 127 132 targetRankToIndex = new int[mpiSize]; -
XIOS/dev/branch_yushan/extern/remap/src/parallel_tree.hpp
r694 r1053 6 6 #include "mpi_cascade.hpp" 7 7 #include "mpi.hpp" 8 #ifdef _usingEP 9 #include "ep_declaration.hpp" 10 #endif 8 11 9 12 namespace sphereRemap { … … 12 15 { 13 16 public: 14 CParallelTree( MPI_Comm comm);17 CParallelTree(ep_lib::MPI_Comm comm); 15 18 ~CParallelTree(); 16 19 … … 34 37 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 38 CMPICascade cascade; 36 MPI_Comm communicator ;39 ep_lib::MPI_Comm communicator ; 37 40 38 41 }; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_alltoall.cpp
r1037 r1053 11 11 { 12 12 ::MPI_Aint typesize, llb; 13 ::MPI_Type_get_extent(s endtype, &llb, &typesize);13 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(sendtype), &llb, &typesize); 14 14 15 15 for(int i=0; i<comm.ep_comm_ptr->size_rank_info[0].second; i++) -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_declaration.cpp
r1037 r1053 21 21 #undef MPI_COMM_NULL 22 22 23 #undef MPI_STATUS_IGNORE 24 //#undef MPI_INFO_NULL 25 #undef MPI_REQUEST_NULL 26 27 #ifdef _openmpi 28 //#undef MPI_Fint 29 #endif 30 23 31 // _STD defined in ep_type.cpp 24 32 … … 38 46 extern ::MPI_Comm MPI_COMM_NULL_STD; 39 47 48 extern ::MPI_Status MPI_STATUS_IGNORE_STD; 49 //extern ::MPI_Info MPI_INFO_NULL_STD; 50 extern ::MPI_Request MPI_REQUEST_NULL_STD; 51 40 52 ep_lib::MPI_Datatype MPI_INT = MPI_INT_STD; 41 53 ep_lib::MPI_Datatype MPI_FLOAT = MPI_FLOAT_STD; … … 53 65 ep_lib::MPI_Comm MPI_COMM_NULL(MPI_COMM_NULL_STD); 54 66 67 //ep_lib::MPI_Info MPI_INFO_NULL(MPI_INFO_NULL_STD); 68 ep_lib::MPI_Request MPI_REQUEST_NULL(MPI_REQUEST_NULL_STD); 69 70 //ep_lib::MPI_Status MPI_STATUS_IGNORE_STD = MPI_STATUS_IGNORE_STD; 71 55 72 //ep_lib::MPI_Comm EP_COMM_WORLD; 56 73 //ep_lib::MPI_Comm EP_COMM_NULL; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_declaration.hpp
r1037 r1053 17 17 extern ::MPI_Comm MPI_COMM_NULL_STD; 18 18 19 extern ::MPI_Status MPI_STATUS_IGNORE_STD; 20 //extern ::MPI_Info MPI_INFO_NULL_STD; 21 extern ::MPI_Request MPI_REQUEST_NULL_STD; 22 19 23 #undef MPI_INT 20 24 #undef MPI_FLOAT … … 31 35 #undef MPI_COMM_WORLD 32 36 #undef MPI_COMM_NULL 37 38 //#undef MPI_INFO_NULL 39 #undef MPI_REQUEST_NULL 40 41 #ifdef _openmpi 42 //#undef MPI_Fint 43 #endif 44 45 #undef MPI_STATUS_IGNORE 33 46 34 47 extern ep_lib::MPI_Datatype MPI_INT; … … 47 60 extern ep_lib::MPI_Comm MPI_COMM_NULL; 48 61 49 50 62 extern ep_lib::MPI_Status MPI_STATUS_IGNORE; 63 //extern ep_lib::MPI_Info MPI_INFO_NULL; 64 extern ep_lib::MPI_Request MPI_REQUEST_NULL; 51 65 52 66 #endif // EP_DECLARATION_HPP_INCLUDED -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_exscan.cpp
r1037 r1053 435 435 if(!comm.is_ep) 436 436 { 437 ::MPI_Exscan( sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype),437 ::MPI_Exscan(const_cast<void*>(sendbuf), recvbuf, count, static_cast< ::MPI_Datatype>(datatype), 438 438 static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 439 439 return 0; … … 481 481 if(ep_rank_loc == 0) 482 482 { 483 #ifdef _serialized484 #pragma omp critical (_mpi_call)485 #endif // _serialized486 483 ::MPI_Exscan(local_sum, mpi_scan_recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 487 484 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_finalize.cpp
r1037 r1053 15 15 if(id == 0) 16 16 { 17 #ifdef _serialized18 #pragma omp critical (_mpi_call)19 #endif // _serialized20 17 ::MPI_Finalize(); 21 18 } … … 30 27 { 31 28 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm); 32 33 #ifdef _serialized34 #pragma omp critical (_mpi_call)35 #endif // _serialized36 29 ::MPI_Abort(mpi_comm, errorcode); 37 30 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_fortran.cpp
r1037 r1053 4 4 #include <map> 5 5 #include <utility> 6 7 #ifdef _intelmpi 6 8 #undef MPI_Comm_f2c(comm) 7 9 #undef MPI_Comm_c2f(comm) 10 #endif 8 11 12 #ifdef _openmpi 13 //#undef MPI_Fint 14 #endif 9 15 10 16 namespace ep_lib 11 17 { 12 18 19 int EP_Comm_c2f(MPI_Comm comm) 20 { 21 Debug("MPI_Comm_c2f"); 22 int fint; 23 #ifdef _intelmpi 24 fint = (::MPI_Fint)(comm.mpi_comm); 25 #elif _openmpi 26 fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 27 #endif 28 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 29 30 it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); 31 if(it == fc_comm_map.end()) 32 { 33 fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 34 printf("MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 35 } 36 37 38 return fint; 39 40 } 41 42 MPI_Comm EP_Comm_f2c(int comm) 43 { 44 Debug("MPI_Comm_f2c"); 45 46 47 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 48 49 it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); 50 if(it != fc_comm_map.end()) 51 { 52 MPI_Comm comm_ptr; 53 comm_ptr = it->second; 54 printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr); 55 return comm_ptr; 56 } 57 else 58 { 59 MPI_Comm return_comm; 60 return_comm.mpi_comm = ::MPI_Comm_f2c(comm); 61 return return_comm; 62 } 63 } 64 65 #ifdef _intelmpi 13 66 14 67 MPI_Fint MPI_Comm_c2f(MPI_Comm comm) … … 32 85 33 86 } 87 88 34 89 35 90 … … 57 112 } 58 113 114 115 116 #elif _openmpi 117 118 int MPI_Comm_c2f(MPI_Comm comm) 119 { 120 Debug("MPI_Comm_c2f"); 121 int fint; 122 fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 123 124 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 125 126 it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); 127 if(it == fc_comm_map.end()) 128 { 129 fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 130 printf("MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 131 } 132 133 return fint; 134 135 } 136 137 ep_lib::MPI_Comm MPI_Comm_f2c(MPI_Fint comm) 138 { 139 Debug("MPI_Comm_f2c"); 140 141 142 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 143 144 it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); 145 if(it != fc_comm_map.end()) 146 { 147 MPI_Comm comm_ptr; 148 comm_ptr = it->second; 149 printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr); 150 return comm_ptr; 151 } 152 else 153 { 154 MPI_Comm return_comm; 155 return_comm.mpi_comm = (::MPI_Comm)(comm); 156 return return_comm; 157 } 158 } 159 #endif 160 59 161 } 60 162 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_free.cpp
r1037 r1053 10 10 int MPI_Comm_free(MPI_Comm *comm) 11 11 { 12 //return 0; 13 14 if(comm == NULL) return 0; 12 //if(comm == NULL) return 0; 15 13 16 14 MPI_Barrier(*comm); … … 19 17 if(! comm->is_ep) 20 18 { 21 if(comm->mpi_comm )19 if(comm->mpi_comm != MPI_COMM_NULL_STD) 22 20 { 23 21 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm); … … 31 29 else 32 30 { 33 if( ! comm->mpi_comm) return 0;31 if(comm->mpi_comm == MPI_COMM_NULL_STD ) return 0; 34 32 35 33 int ep_rank, ep_rank_loc, mpi_rank; … … 47 45 48 46 #pragma omp critical (memory_free) 49 if(comm->is_intercomm && comm->ep_comm_ptr->intercomm )47 if(comm->is_intercomm && comm->ep_comm_ptr->intercomm != NULL) 50 48 { 51 49 if(comm->ep_comm_ptr->intercomm->local_rank_map) comm->ep_comm_ptr->intercomm->local_rank_map->clear(); … … 62 60 63 61 /* 64 if(comm->my_buffer )62 if(comm->my_buffer != NULL) 65 63 { 66 if(comm->my_buffer->buf_int ) delete[] comm->my_buffer->buf_int; Debug("buf_int freed\n");67 if(comm->my_buffer->buf_float ) delete[] comm->my_buffer->buf_float; Debug("buf_float freed\n");68 if(comm->my_buffer->buf_double ) delete[] comm->my_buffer->buf_double; Debug("buf_double freed\n");69 if(comm->my_buffer->buf_long ) delete[] comm->my_buffer->buf_long; Debug("buf_long freed\n");70 if(comm->my_buffer->buf_ulong ) delete[] comm->my_buffer->buf_ulong; Debug("buf_ulong freed\n");71 if(comm->my_buffer->buf_char ) delete[] comm->my_buffer->buf_char; Debug("buf_char freed\n");64 if(comm->my_buffer->buf_int != NULL) delete[] comm->my_buffer->buf_int; Debug("buf_int freed\n"); 65 if(comm->my_buffer->buf_float != NULL) delete[] comm->my_buffer->buf_float; Debug("buf_float freed\n"); 66 if(comm->my_buffer->buf_double != NULL) delete[] comm->my_buffer->buf_double; Debug("buf_double freed\n"); 67 if(comm->my_buffer->buf_long != NULL) delete[] comm->my_buffer->buf_long; Debug("buf_long freed\n"); 68 if(comm->my_buffer->buf_ulong != NULL) delete[] comm->my_buffer->buf_ulong; Debug("buf_ulong freed\n"); 69 if(comm->my_buffer->buf_char != NULL) delete[] comm->my_buffer->buf_char; Debug("buf_char freed\n"); 72 70 } 73 71 */ 74 if(comm->ep_barrier )72 if(comm->ep_barrier != NULL) 75 73 { 76 74 comm->ep_barrier->~OMPbarrier(); … … 79 77 80 78 81 if(comm->rank_map )79 if(comm->rank_map != NULL) 82 80 { 83 81 comm->rank_map->clear(); … … 86 84 87 85 88 if(comm->is_intercomm && comm->ep_comm_ptr->intercomm->mpi_inter_comm )86 if(comm->is_intercomm && comm->ep_comm_ptr->intercomm->mpi_inter_comm != MPI_COMM_NULL_STD) 89 87 { 90 88 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->ep_comm_ptr->intercomm->mpi_inter_comm); 91 89 92 90 ::MPI_Comm_free(&mpi_comm); 93 comm->ep_comm_ptr->intercomm->mpi_inter_comm = NULL;91 //comm->ep_comm_ptr->intercomm->mpi_inter_comm = NULL; 94 92 Debug("mpi_intercomm freed\n"); 95 93 } … … 99 97 for(int i=0; i<num_ep; i++) 100 98 { 101 if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue )99 if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue != NULL) 102 100 { 103 101 comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue->clear(); … … 105 103 } 106 104 107 if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr )105 if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr != NULL) 108 106 { 109 107 delete comm->ep_comm_ptr->comm_list[i].ep_comm_ptr; … … 112 110 } 113 111 114 if(comm->mpi_comm )112 if(comm->mpi_comm != MPI_COMM_NULL_STD) 115 113 { 116 114 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm); 117 #ifdef _serialized118 #pragma omp critical (_mpi_call)119 #endif // _serialized120 115 ::MPI_Comm_free(&mpi_comm); 121 comm->mpi_comm = NULL;116 //comm->mpi_comm = NULL; 122 117 // printf("label = %d, mpi_comm freed\n", comm->ep_comm_ptr->comm_label); 123 118 } 124 119 125 if(comm ) {delete[] comm->ep_comm_ptr->comm_list; Debug("comm freed\n");}120 if(comm != NULL) {delete[] comm->ep_comm_ptr->comm_list; Debug("comm freed\n");} 126 121 127 122 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_gather.cpp
r1037 r1053 320 320 if(!comm.is_ep && comm.mpi_comm) 321 321 { 322 #ifdef _serialized 323 #pragma omp critical (_mpi_call) 324 #endif // _serialized 325 ::MPI_Gather(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 322 ::MPI_Gather(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 326 323 root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 327 324 return 0; … … 351 348 352 349 ::MPI_Aint datasize, lb; 353 #ifdef _serialized 354 #pragma omp critical (_mpi_call) 355 #endif // _serialized 350 356 351 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 357 352 … … 377 372 gatherv_displs = new int[mpi_size]; 378 373 379 #ifdef _serialized 380 #pragma omp critical (_mpi_call) 381 #endif // _serialized 374 382 375 ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 383 376 … … 388 381 } 389 382 390 #ifdef _serialized 391 #pragma omp critical (_mpi_call) 392 #endif // _serialized 383 393 384 ::MPI_Gatherv(local_gather_recvbuf, count*num_ep, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 394 385 gatherv_displs, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 442 433 if(!comm.is_ep && comm.mpi_comm) 443 434 { 444 #ifdef _serialized 445 #pragma omp critical (_mpi_call) 446 #endif // _serialized 447 ::MPI_Allgather(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 435 ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 448 436 static_cast< ::MPI_Comm>(comm.mpi_comm)); 449 437 return 0; … … 469 457 470 458 ::MPI_Aint datasize, lb; 471 #ifdef _serialized 472 #pragma omp critical (_mpi_call) 473 #endif // _serialized 459 474 460 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 475 461 … … 495 481 gatherv_displs = new int[mpi_size]; 496 482 497 #ifdef _serialized498 #pragma omp critical (_mpi_call)499 #endif // _serialized500 483 ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 501 484 … … 506 489 } 507 490 508 #ifdef _serialized509 #pragma omp critical (_mpi_call)510 #endif // _serialized511 491 ::MPI_Allgatherv(local_gather_recvbuf, count*num_ep, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 512 492 gatherv_displs, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Comm>(comm.mpi_comm)); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_gatherv.cpp
r1037 r1053 328 328 if(!comm.is_ep && comm.mpi_comm) 329 329 { 330 #ifdef _serialized 331 #pragma omp critical (_mpi_call) 332 #endif // _serialized 333 ::MPI_Gatherv(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcounts, displs, 330 ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, const_cast<int*>(recvcounts), const_cast<int*>(displs), 334 331 static_cast< ::MPI_Datatype>(recvtype), root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 335 332 return 0; … … 362 359 363 360 ::MPI_Aint datasize, lb; 364 #ifdef _serialized 365 #pragma omp critical (_mpi_call) 366 #endif // _serialized 361 367 362 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 368 363 … … 395 390 //gatherv_displs = new int[mpi_size]; 396 391 397 #ifdef _serialized 398 #pragma omp critical (_mpi_call) 399 #endif // _serialized 392 400 393 ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 401 394 … … 406 399 } 407 400 408 #ifdef _serialized 409 #pragma omp critical (_mpi_call) 410 #endif // _serialized 401 411 402 ::MPI_Gatherv(local_gather_recvbuf, gatherv_cnt, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 412 403 gatherv_displs, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 462 453 if(!comm.is_ep && comm.mpi_comm) 463 454 { 464 #ifdef _serialized465 #pragma omp critical (_mpi_call)466 #endif // _serialized467 455 ::MPI_Allgatherv(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcounts, displs, 468 456 static_cast< ::MPI_Datatype>(recvtype), static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 493 481 494 482 ::MPI_Aint datasize, lb; 495 #ifdef _serialized 496 #pragma omp critical (_mpi_call) 497 #endif // _serialized 483 498 484 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 499 485 … … 525 511 gatherv_displs = new int[mpi_size]; 526 512 527 #ifdef _serialized528 #pragma omp critical (_mpi_call)529 #endif // _serialized530 513 ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 531 514 gatherv_displs[0] = displs[0]; … … 534 517 gatherv_displs[i] = gatherv_recvcnt[i-1] + gatherv_displs[i-1]; 535 518 } 536 #ifdef _serialized 537 #pragma omp critical (_mpi_call) 538 #endif // _serialized 519 539 520 ::MPI_Allgatherv(local_gather_recvbuf, gatherv_cnt, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 540 521 gatherv_displs, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Comm>(comm.mpi_comm)); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_init.cpp
r1037 r1053 11 11 int MPI_Init_thread(int *argc, char*** argv, int required, int*provided) 12 12 { 13 printf("MPI_Init_thread\n");13 //printf("MPI_Init_thread\n"); 14 14 15 15 int id = omp_get_thread_num(); … … 24 24 int MPI_Init(int *argc, char ***argv) 25 25 { 26 printf("MPI_init called\n");26 //printf("MPI_init called\n"); 27 27 int id = omp_get_thread_num(); 28 28 … … 36 36 int MPI_Initialized(int *flag) 37 37 { 38 printf("MPI_initialized called\n");38 //printf("MPI_initialized called\n"); 39 39 40 40 ::MPI_Initialized(flag); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm.cpp
r1037 r1053 143 143 if(ep_rank == new_local_leader) 144 144 { 145 #ifdef _serialized146 #pragma omp critical (_mpi_call)147 #endif // _serialized148 145 ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &leader_in_world[0]); 149 146 } … … 190 187 int MPI_Comm_test_inter(MPI_Comm comm, int *flag) 191 188 { 189 *flag = false; 192 190 if(comm.is_ep) 193 191 { … … 195 193 return 0; 196 194 } 197 else 195 else if(comm.mpi_comm != MPI_COMM_NULL_STD) 198 196 { 199 197 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm> (comm.mpi_comm); 200 #ifdef _serialized 201 #pragma omp critical (_mpi_call) 202 #endif 198 203 199 ::MPI_Comm_test_inter(mpi_comm, flag); 204 200 return 0; 205 201 } 202 return 0; 206 203 } 207 204 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_kernel.cpp
r1037 r1053 35 35 ::MPI_Comm local_mpi_comm = static_cast< ::MPI_Comm>(local_comm.mpi_comm); 36 36 37 #ifdef _serialized 38 #pragma omp critical (_mpi_call) 39 #endif // _serialized 40 { 41 ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 42 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent); 43 } 37 38 ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 39 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent); 40 44 41 45 42 bool is_proc_master = false; … … 83 80 send_buf[2] = num_ep; 84 81 85 #ifdef _serialized86 #pragma omp critical (_mpi_call)87 #endif // _serialized88 82 ::MPI_Allgather(send_buf.data(), 3, MPI_INT_STD, recv_buf.data(), 3, MPI_INT_STD, local_mpi_comm); 89 83 … … 100 94 leader_info[1] = remote_leader; 101 95 102 #ifdef _serialized103 #pragma omp critical (_mpi_call)104 #endif // _serialized105 96 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(peer_comm.mpi_comm), &rank_in_peer_mpi[0]); 106 97 … … 126 117 send_buf[4] = rank_in_peer_mpi[1]; 127 118 128 #ifdef _serialized129 #pragma omp critical (_mpi_call)130 #endif // _serialized131 119 ::MPI_Bcast(send_buf.data(), 5, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 132 120 … … 156 144 MPI_Send(send_buf.data(), 3*size_info[0], MPI_INT_STD, remote_leader, tag, peer_comm); 157 145 MPI_Recv(recv_buf.data(), 3*size_info[1], MPI_INT_STD, remote_leader, tag, peer_comm, &status); 158 159 } 160 161 #ifdef _serialized 162 #pragma omp critical (_mpi_call) 163 #endif // _serialized 146 } 147 164 148 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 165 149 … … 283 267 } 284 268 285 #ifdef _serialized286 #pragma omp critical (_mpi_call)287 #endif // _serialized288 269 ::MPI_Bcast(&size_info[2], 2, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 289 270 … … 307 288 } 308 289 309 310 #ifdef _serialized311 #pragma omp critical (_mpi_call)312 #endif // _serialized313 290 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 314 291 … … 330 307 ::MPI_Comm intercomm; 331 308 332 #ifdef _serialized333 #pragma omp critical (_mpi_call)334 #endif // _serialized335 309 ::MPI_Comm_group(local_mpi_comm, &local_group); 336 310 337 #ifdef _serialized338 #pragma omp critical (_mpi_call)339 #endif // _serialized340 311 ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 341 312 342 #ifdef _serialized343 #pragma omp critical (_mpi_call)344 #endif // _serialized345 313 ::MPI_Comm_create(local_mpi_comm, new_group, &new_comm); 346 314 … … 349 317 if(is_local_leader) 350 318 { 351 #ifdef _serialized352 #pragma omp critical (_mpi_call)353 #endif // _serialized354 319 ::MPI_Comm_rank(new_comm, &leader_info[2]); 355 320 } 356 321 357 #ifdef _serialized358 #pragma omp critical (_mpi_call)359 #endif // _serialized360 322 ::MPI_Bcast(&leader_info[2], 1, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 361 323 … … 363 325 { 364 326 365 #ifdef _serialized366 #pragma omp critical (_mpi_call)367 #endif // _serialized368 327 ::MPI_Barrier(new_comm); 369 328 370 #ifdef _serialized371 #pragma omp critical (_mpi_call)372 #endif // _serialized373 329 ::MPI_Intercomm_create(new_comm, leader_info[2], static_cast< ::MPI_Comm>(peer_comm.mpi_comm), rank_in_peer_mpi[1], tag, &intercomm); 374 330 375 331 int id; 376 #ifdef _serialized 377 #pragma omp critical (_mpi_call) 378 #endif // _serialized 332 379 333 ::MPI_Comm_rank(new_comm, &id); 380 334 int my_num_ep = new_ep_info[0][id]; … … 604 558 int rank_in_peer_mpi[2]; 605 559 606 #ifdef _serialized607 #pragma omp critical (_mpi_call)608 #endif // _serialized609 560 ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 610 561 … … 663 614 { 664 615 ::MPI_Comm mpi_dup; 665 666 #ifdef _serialized 667 #pragma omp critical (_mpi_call) 668 #endif // _serialized 616 669 617 ::MPI_Comm_dup(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &mpi_dup); 670 618 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_world.cpp
r1037 r1053 281 281 std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 282 282 283 MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, mpi_remote_leader, tag, peer_comm);284 MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, mpi_remote_leader, tag, peer_comm, &mpi_status);283 ::MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, mpi_remote_leader, tag, peer_comm); 284 ::MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, mpi_remote_leader, tag, peer_comm, &mpi_status); 285 285 286 286 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_lib.hpp
r1037 r1053 20 20 #endif 21 21 22 #ifdef _intelmpi 22 23 #define MPI_ANY_SOURCE -2 23 24 #define MPI_ANY_TAG -1 25 #elif _openmpi 26 #define MPI_ANY_SOURCE -1 27 #define MPI_ANY_TAG -1 28 #endif 24 29 25 30 int MPI_Init_thread(int* argc, char*** argv, int required, int*provided); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_lib_fortran.hpp
r1037 r1053 7 7 { 8 8 9 #ifdef _intelmpi 10 11 MPI_Fint MPI_Comm_c2f(MPI_Comm comm); 12 MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 13 14 #elif _openmpi 15 16 int MPI_Comm_c2f(MPI_Comm comm); 17 ep_lib::MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 18 19 #endif 9 20 10 MPI_Fint MPI_Comm_c2f(MPI_Comm comm); 11 //int MPI_Comm_c2f(MPI_Comm comm); 12 13 14 MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 15 //void MPI_Comm_f2c(MPI_Fint comm); 16 21 int EP_Comm_c2f(MPI_Comm comm); 22 MPI_Comm EP_Comm_f2c(int comm); 17 23 } 18 24 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_memory.cpp
r1037 r1053 10 10 int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr) 11 11 { 12 #ifdef _serialized 13 #pragma omp critical (_mpi_call) 14 #endif // _serialized 15 ::MPI_Alloc_mem(size.mpi_aint, info.mpi_info, baseptr); 12 ::MPI_Alloc_mem(size.mpi_aint, static_cast< ::MPI_Info>(info.mpi_info), baseptr); 16 13 return 0; 17 14 } … … 19 16 int MPI_Alloc_mem(unsigned long size, MPI_Info info, void *baseptr) 20 17 { 21 #ifdef _serialized 22 #pragma omp critical (_mpi_call) 23 #endif // _serialized 24 ::MPI_Alloc_mem(size, info.mpi_info, baseptr); 18 ::MPI_Alloc_mem(size, static_cast< ::MPI_Info>(info.mpi_info), baseptr); 25 19 return 0; 26 20 } 27 21 28 22 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_merge.cpp
r1037 r1053 39 39 local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 40 40 41 42 if(local_ep_rank == 0) 43 { 44 MPI_Status status; 45 MPI_Request req_s, req_r; 46 MPI_Isend(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_s); 47 MPI_Irecv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_r); 48 49 MPI_Wait(&req_s, &status); 50 MPI_Wait(&req_r, &status); 51 } 52 53 54 MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm)); 55 56 // printf("%d, %d, %d, %d\n", local_ep_size, remote_ep_size, local_high, remote_high); 57 58 59 MPI_Comm_dup(inter_comm, newintracomm); 60 61 int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 62 63 64 int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; 65 int intra_ep_size, intra_num_ep, intra_mpi_size; 66 67 intra_ep_rank = newintracomm->ep_comm_ptr->size_rank_info[0].first; 68 intra_ep_rank_loc = newintracomm->ep_comm_ptr->size_rank_info[1].first; 69 intra_mpi_rank = newintracomm->ep_comm_ptr->size_rank_info[2].first; 70 intra_ep_size = newintracomm->ep_comm_ptr->size_rank_info[0].second; 71 intra_num_ep = newintracomm->ep_comm_ptr->size_rank_info[1].second; 72 intra_mpi_size = newintracomm->ep_comm_ptr->size_rank_info[2].second; 73 74 75 MPI_Barrier_local(*newintracomm); 76 77 78 int *reorder; 79 if(intra_ep_rank_loc == 0) 80 { 81 reorder = new int[intra_ep_size]; 82 } 83 84 85 MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 86 if(intra_ep_rank_loc == 0) 87 { 88 ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm)); 89 90 vector< pair<int, int> > tmp_rank_map(intra_ep_size); 91 92 93 for(int i=0; i<intra_ep_size; i++) 94 { 95 tmp_rank_map[reorder[i]] = newintracomm->rank_map->at(i) ; 96 } 97 98 newintracomm->rank_map->swap(tmp_rank_map); 99 100 tmp_rank_map.clear(); 101 } 102 103 MPI_Barrier_local(*newintracomm); 104 105 (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 106 107 if(intra_ep_rank_loc == 0) 108 { 109 delete[] reorder; 110 } 111 112 return MPI_SUCCESS; 113 } 114 115 116 117 118 119 int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm) 120 { 121 122 assert(inter_comm.is_intercomm); 123 124 if(inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->comm_label == -99) 125 { 126 return MPI_Intercomm_merge_unique_leader(inter_comm, high, newintracomm); 127 } 128 129 130 Debug("intercomm_merge kernel\n"); 131 132 int ep_rank, ep_rank_loc, mpi_rank; 133 int ep_size, num_ep, mpi_size; 134 135 ep_rank = inter_comm.ep_comm_ptr->size_rank_info[0].first; 136 ep_rank_loc = inter_comm.ep_comm_ptr->size_rank_info[1].first; 137 mpi_rank = inter_comm.ep_comm_ptr->size_rank_info[2].first; 138 ep_size = inter_comm.ep_comm_ptr->size_rank_info[0].second; 139 num_ep = inter_comm.ep_comm_ptr->size_rank_info[1].second; 140 mpi_size = inter_comm.ep_comm_ptr->size_rank_info[2].second; 141 142 143 int local_ep_rank, local_ep_rank_loc, local_mpi_rank; 144 int local_ep_size, local_num_ep, local_mpi_size; 145 146 147 local_ep_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].first; 148 local_ep_rank_loc = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].first; 149 local_mpi_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].first; 150 local_ep_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].second; 151 local_num_ep = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].second; 152 local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 153 154 int remote_ep_size = inter_comm.ep_comm_ptr->intercomm->remote_rank_map->size(); 155 156 int local_high = high; 157 int remote_high; 158 159 MPI_Barrier(inter_comm); 41 160 42 161 // if(local_ep_rank == 0 && high == false) … … 65 184 } 66 185 67 68 186 MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm)); 69 187 70 // printf("%d, %d, %d, %d\n", local_ep_size, remote_ep_size, local_high, remote_high); 71 72 73 MPI_Comm_dup(inter_comm, newintracomm); 188 int intercomm_high; 189 if(ep_rank == 0) intercomm_high = local_high; 190 MPI_Bcast(&intercomm_high, 1, MPI_INT, 0, inter_comm); 191 192 //printf("remote_ep_size = %d, local_high = %d, remote_high = %d, intercomm_high = %d\n", remote_ep_size, local_high, remote_high, intercomm_high); 193 194 195 ::MPI_Comm mpi_intracomm; 196 MPI_Comm *ep_intracomm; 197 198 if(ep_rank_loc == 0) 199 { 200 201 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm); 202 203 ::MPI_Intercomm_merge(mpi_comm, intercomm_high, &mpi_intracomm); 204 MPI_Info info; 205 MPI_Comm_create_endpoints(mpi_intracomm, num_ep, info, ep_intracomm); 206 207 inter_comm.ep_comm_ptr->comm_list->mem_bridge = ep_intracomm; 208 209 } 210 211 212 213 MPI_Barrier_local(inter_comm); 214 215 *newintracomm = inter_comm.ep_comm_ptr->comm_list->mem_bridge[ep_rank_loc]; 74 216 75 217 int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 76 77 218 78 219 int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; … … 87 228 88 229 230 89 231 MPI_Barrier_local(*newintracomm); 90 232 … … 97 239 98 240 241 99 242 MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 100 243 if(intra_ep_rank_loc == 0) 101 244 { 102 #ifdef _serialized 103 #pragma omp critical (_mpi_call) 104 #endif // _serialized 245 105 246 ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm)); 106 247 … … 122 263 (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 123 264 124 if(intra_ep_rank_loc == 0)125 {126 delete[] reorder;127 }128 129 return MPI_SUCCESS;130 }131 132 133 134 135 136 int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm)137 {138 139 assert(inter_comm.is_intercomm);140 141 if(inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->comm_label == -99)142 {143 return MPI_Intercomm_merge_unique_leader(inter_comm, high, newintracomm);144 }145 146 147 Debug("intercomm_merge kernel\n");148 149 int ep_rank, ep_rank_loc, mpi_rank;150 int ep_size, num_ep, mpi_size;151 152 ep_rank = inter_comm.ep_comm_ptr->size_rank_info[0].first;153 ep_rank_loc = inter_comm.ep_comm_ptr->size_rank_info[1].first;154 mpi_rank = inter_comm.ep_comm_ptr->size_rank_info[2].first;155 ep_size = inter_comm.ep_comm_ptr->size_rank_info[0].second;156 num_ep = inter_comm.ep_comm_ptr->size_rank_info[1].second;157 mpi_size = inter_comm.ep_comm_ptr->size_rank_info[2].second;158 159 160 int local_ep_rank, local_ep_rank_loc, local_mpi_rank;161 int local_ep_size, local_num_ep, local_mpi_size;162 163 164 local_ep_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].first;165 local_ep_rank_loc = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].first;166 local_mpi_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].first;167 local_ep_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].second;168 local_num_ep = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].second;169 local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second;170 171 int remote_ep_size = inter_comm.ep_comm_ptr->intercomm->remote_rank_map->size();172 173 int local_high = high;174 int remote_high;175 176 MPI_Barrier(inter_comm);177 178 // if(local_ep_rank == 0 && high == false)179 // {180 // MPI_Status status;181 // MPI_Send(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm);182 // MPI_Recv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &status);183 // }184 //185 // if(local_ep_rank == 0 && high == true)186 // {187 // MPI_Status status;188 // MPI_Recv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &status);189 // MPI_Send(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm);190 // }191 192 if(local_ep_rank == 0)193 {194 MPI_Status status;195 MPI_Request req_s, req_r;196 MPI_Isend(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_s);197 MPI_Irecv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_r);198 199 MPI_Wait(&req_s, &status);200 MPI_Wait(&req_r, &status);201 }202 203 MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm));204 205 int intercomm_high;206 if(ep_rank == 0) intercomm_high = local_high;207 MPI_Bcast(&intercomm_high, 1, MPI_INT, 0, inter_comm);208 209 //printf("remote_ep_size = %d, local_high = %d, remote_high = %d, intercomm_high = %d\n", remote_ep_size, local_high, remote_high, intercomm_high);210 211 212 ::MPI_Comm mpi_intracomm;213 MPI_Comm *ep_intracomm;214 215 if(ep_rank_loc == 0)216 {217 218 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm);219 #ifdef _serialized220 #pragma omp critical (_mpi_call)221 #endif // _serialized222 ::MPI_Intercomm_merge(mpi_comm, intercomm_high, &mpi_intracomm);223 MPI_Info info;224 MPI_Comm_create_endpoints(mpi_intracomm, num_ep, info, ep_intracomm);225 226 inter_comm.ep_comm_ptr->comm_list->mem_bridge = ep_intracomm;227 228 }229 230 231 232 MPI_Barrier_local(inter_comm);233 234 *newintracomm = inter_comm.ep_comm_ptr->comm_list->mem_bridge[ep_rank_loc];235 236 int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size;237 238 int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank;239 int intra_ep_size, intra_num_ep, intra_mpi_size;240 241 intra_ep_rank = newintracomm->ep_comm_ptr->size_rank_info[0].first;242 intra_ep_rank_loc = newintracomm->ep_comm_ptr->size_rank_info[1].first;243 intra_mpi_rank = newintracomm->ep_comm_ptr->size_rank_info[2].first;244 intra_ep_size = newintracomm->ep_comm_ptr->size_rank_info[0].second;245 intra_num_ep = newintracomm->ep_comm_ptr->size_rank_info[1].second;246 intra_mpi_size = newintracomm->ep_comm_ptr->size_rank_info[2].second;247 248 249 250 MPI_Barrier_local(*newintracomm);251 252 253 int *reorder;254 if(intra_ep_rank_loc == 0)255 {256 reorder = new int[intra_ep_size];257 }258 259 260 261 MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm);262 if(intra_ep_rank_loc == 0)263 {264 #ifdef _serialized265 #pragma omp critical (_mpi_call)266 #endif // _serialized267 ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm));268 269 vector< pair<int, int> > tmp_rank_map(intra_ep_size);270 271 272 for(int i=0; i<intra_ep_size; i++)273 {274 tmp_rank_map[reorder[i]] = newintracomm->rank_map->at(i) ;275 }276 277 newintracomm->rank_map->swap(tmp_rank_map);278 279 tmp_rank_map.clear();280 }281 282 MPI_Barrier_local(*newintracomm);283 284 (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank;285 286 265 287 266 if(intra_ep_rank_loc == 0) -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_probe.cpp
r1037 r1053 34 34 35 35 #pragma omp flush 36 #pragma critical (_query) 36 37 #pragma omp critical (_query) 37 38 if(comm.ep_comm_ptr->message_queue->size() > 0) 38 39 { … … 104 105 105 106 #pragma omp flush 106 #pragma critical (_query) 107 108 #pragma omp critical (_query) 107 109 if(comm.ep_comm_ptr->message_queue->size() > 0) 108 110 { … … 127 129 message->ep_src = it->ep_src; 128 130 129 #pragma omp critical (_query )131 #pragma omp critical (_query2) 130 132 { 131 133 //printf("local message erased. src = %d, dest = %d, tag = %d\n", it->ep_src, it->ep_dest, it->ep_tag); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_rank.cpp
r1037 r1053 28 28 29 29 30 if(comm .mpi_comm)30 if(comm != MPI_COMM_NULL) 31 31 { 32 32 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_recv.cpp
r1037 r1053 68 68 69 69 70 request->mpi_request = NULL;70 request->mpi_request = MPI_REQUEST_NULL_STD; 71 71 request->buf = buf; 72 72 request->comm = comm; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_reduce.cpp
r1037 r1053 447 447 if(!comm.is_ep && comm.mpi_comm) 448 448 { 449 #ifdef _serialized450 #pragma omp critical (_mpi_call)451 #endif // _serialized452 449 ::MPI_Reduce(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), root, 453 450 static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 473 470 474 471 ::MPI_Aint recvsize, lb; 475 #ifdef _serialized 476 #pragma omp critical (_mpi_call) 477 #endif // _serialized 472 478 473 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &recvsize); 479 474 … … 489 484 if(ep_rank_loc==0) 490 485 { 491 #ifdef _serialized492 #pragma omp critical (_mpi_call)493 #endif // _serialized494 486 ::MPI_Reduce(local_recvbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 495 487 } … … 522 514 if(!comm.is_ep && comm.mpi_comm) 523 515 { 524 #ifdef _serialized525 #pragma omp critical (_mpi_call)526 #endif // _serialized527 516 ::MPI_Allreduce(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), 528 517 static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 545 534 546 535 ::MPI_Aint recvsize, lb; 547 #ifdef _serialized 548 #pragma omp critical (_mpi_call) 549 #endif // _serialized 536 550 537 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &recvsize); 551 538 … … 561 548 if(ep_rank_loc==0) 562 549 { 563 #ifdef _serialized564 #pragma omp critical (_mpi_call)565 #endif // _serialized566 550 ::MPI_Allreduce(local_recvbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 567 551 } … … 590 574 if(!comm.is_ep && comm.mpi_comm) 591 575 { 592 #ifdef _serialized593 #pragma omp critical (_mpi_call)594 #endif // _serialized595 576 ::MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), 596 577 static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 616 597 617 598 ::MPI_Aint datasize, lb; 618 #ifdef _serialized 619 #pragma omp critical (_mpi_call) 620 #endif // _serialized 599 621 600 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 622 601 … … 636 615 local_recvcnt[i] = accumulate(recvcounts+ep_rank, recvcounts+ep_rank+num_ep, 0); 637 616 } 638 #ifdef _serialized 639 #pragma omp critical (_mpi_call) 640 #endif // _serialized 617 641 618 ::MPI_Reduce_scatter(local_buf, local_buf2, local_recvcnt, static_cast< ::MPI_Datatype>(datatype), 642 619 static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scan.cpp
r1037 r1053 432 432 if(!comm.is_ep) 433 433 { 434 #ifdef _serialized 435 #pragma omp critical (_mpi_call) 436 #endif // _serialized 434 437 435 ::MPI_Scan(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), 438 436 static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 455 453 456 454 ::MPI_Aint datasize, lb; 457 #ifdef _serialized 458 #pragma omp critical (_mpi_call) 459 #endif // _serialized 455 460 456 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 461 457 … … 484 480 if(ep_rank_loc == 0) 485 481 { 486 #ifdef _serialized487 #pragma omp critical (_mpi_call)488 #endif // _serialized489 482 ::MPI_Exscan(local_sum, mpi_scan_recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 490 483 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scatter.cpp
r1037 r1053 287 287 if(!comm.is_ep) 288 288 { 289 #ifdef _serialized290 #pragma omp critical (_mpi_call)291 #endif // _serialized292 289 ::MPI_Scatter(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 293 290 root, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 317 314 318 315 ::MPI_Aint datasize, lb; 319 #ifdef _serialized 320 #pragma omp critical (_mpi_call) 321 #endif // _serialized 316 322 317 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 323 318 … … 343 338 local_recvbuf = new void*[datasize*mpi_sendcnt]; 344 339 345 #ifdef _serialized346 #pragma omp critical (_mpi_call)347 #endif // _serialized348 340 ::MPI_Gather(&mpi_sendcnt, 1, MPI_INT_STD, mpi_scatterv_sendcnt, 1, MPI_INT_STD, root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 349 341 … … 355 347 if(root_ep_loc!=0) 356 348 { 357 #ifdef _serialized358 #pragma omp critical (_mpi_call)359 #endif // _serialized360 349 ::MPI_Scatterv(master_sendbuf, mpi_scatterv_sendcnt, displs, static_cast< ::MPI_Datatype>(datatype), 361 350 local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 363 352 else 364 353 { 365 #ifdef _serialized366 #pragma omp critical (_mpi_call)367 #endif // _serialized368 354 ::MPI_Scatterv(sendbuf, mpi_scatterv_sendcnt, displs, static_cast< ::MPI_Datatype>(datatype), 369 355 local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scatterv.cpp
r1037 r1053 296 296 if(!comm.is_ep) 297 297 { 298 #ifdef _serialized299 #pragma omp critical (_mpi_call)300 #endif // _serialized301 298 ::MPI_Scatterv(sendbuf, sendcounts, displs, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, 302 299 static_cast< ::MPI_Datatype>(recvtype), root, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 329 326 330 327 ::MPI_Aint datasize, lb; 331 #ifdef _serialized 332 #pragma omp critical (_mpi_call) 333 #endif // _serialized 328 334 329 ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 335 330 … … 358 353 local_recvbuf = new void*[datasize*mpi_sendcnt]; 359 354 360 #ifdef _serialized361 #pragma omp critical (_mpi_call)362 #endif // _serialized363 355 ::MPI_Gather(&mpi_sendcnt, 1, MPI_INT_STD, mpi_scatterv_sendcnt, 1, MPI_INT_STD, root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 364 356 … … 370 362 if(root_ep_loc!=0) 371 363 { 372 #ifdef _serialized373 #pragma omp critical (_mpi_call)374 #endif // _serialized375 364 ::MPI_Scatterv(master_sendbuf, mpi_scatterv_sendcnt, mpi_displs, static_cast< ::MPI_Datatype>(datatype), 376 365 local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); … … 378 367 else 379 368 { 380 #ifdef _serialized381 #pragma omp critical (_mpi_call)382 #endif // _serialized383 369 ::MPI_Scatterv(sendbuf, mpi_scatterv_sendcnt, mpi_displs, static_cast< ::MPI_Datatype>(datatype), 384 370 local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_send.cpp
r1037 r1053 82 82 // EP intracomm 83 83 84 check_sum_send(buf, count, datatype, dest, tag, comm, 1);84 //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 85 85 86 86 int ep_src_loc = comm.ep_comm_ptr->size_rank_info[1].first; … … 139 139 // EP intracomm 140 140 141 check_sum_send(buf, count, datatype, dest, tag, comm, 1);141 //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 142 142 143 143 int ep_src_loc = comm.ep_comm_ptr->size_rank_info[1].first; … … 171 171 Debug("MPI_Isend with intercomm\n"); 172 172 173 check_sum_send(buf, count, datatype, dest, tag, comm, 1);173 //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 174 174 175 175 int dest_remote_ep_rank = comm.ep_comm_ptr->intercomm->remote_rank_map->at(dest).first; … … 244 244 Debug("MPI_Issend with intercomm\n"); 245 245 246 check_sum_send(buf, count, datatype, dest, tag, comm, 1);246 //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 247 247 248 248 int dest_remote_ep_rank = comm.ep_comm_ptr->intercomm->remote_rank_map->at(dest).first; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_size.cpp
r1037 r1053 31 31 Debug("Calling EP_Comm_size\n"); 32 32 33 if(comm.mpi_comm )33 if(comm.mpi_comm != MPI_COMM_NULL_STD) 34 34 { 35 35 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm); … … 54 54 if(!comm.is_ep) 55 55 { 56 if(comm.mpi_comm )56 if(comm.mpi_comm != MPI_COMM_NULL_STD) 57 57 { 58 58 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm); 59 #ifdef _serialized60 #pragma omp critical (_mpi_call)61 #endif // _serialized62 59 ::MPI_Comm_remote_size(mpi_comm, size); 63 60 return 0; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_split.cpp
r1037 r1053 144 144 int master_color = 1; 145 145 if(matched_number_loc[j] == 0) master_color = MPI_UNDEFINED; 146 #ifdef _serialized 147 #pragma omp critical (_mpi_call) 148 #endif // _serialized 146 149 147 ::MPI_Comm_split(static_cast< ::MPI_Comm>(comm.mpi_comm), master_color, mpi_rank, &split_mpi_comm[j]); 150 148 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_test.cpp
r1037 r1053 63 63 status->mpi_status = new ::MPI_Status(mpi_status); 64 64 status->ep_src = request->ep_src; 65 status->ep_tag = request->ep_tag;66 status->ep_datatype = request->ep_datatype;65 status->ep_tag = request->ep_tag; 66 status->ep_datatype = request->ep_datatype; 67 67 int count; 68 68 MPI_Get_count(status, request->ep_datatype, &count); 69 check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2);69 //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 70 70 } 71 71 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.cpp
r1037 r1053 6 6 7 7 ::MPI_Comm MPI_COMM_WORLD_STD = MPI_COMM_WORLD; 8 //#undef MPI_COMM_WORLD8 #undef MPI_COMM_WORLD 9 9 10 10 11 11 ::MPI_Comm MPI_COMM_NULL_STD = MPI_COMM_NULL; 12 //#undef MPI_COMM_NULL12 #undef MPI_COMM_NULL 13 13 14 //::MPI_Info MPI_INFO_NULL_STD = MPI_INFO_NULL; 15 //#undef MPI_INFO_NULL 16 17 ::MPI_Request MPI_REQUEST_NULL_STD = MPI_REQUEST_NULL; 18 #undef MPI_REQUEST_NULL 14 19 15 20 ::MPI_Datatype MPI_INT_STD = MPI_INT; … … 30 35 31 36 32 //ep_lib::MPI_Datatype MPI_INT = MPI_INT_STD;33 //ep_lib::MPI_Datatype MPI_FLOAT = MPI_FLOAT_STD;34 //ep_lib::MPI_Datatype MPI_DOUBLE = MPI_DOUBLE_STD;35 //ep_lib::MPI_Datatype MPI_LONG = MPI_LONG_STD;36 //ep_lib::MPI_Datatype MPI_CHAR = MPI_CHAR_STD;37 //ep_lib::MPI_Datatype MPI_UNSIGNED_LONG = MPI_UNSIGNED_LONG_STD;38 39 40 41 42 37 ::MPI_Op MPI_SUM_STD = MPI_SUM; 43 38 ::MPI_Op MPI_MAX_STD = MPI_MAX; … … 48 43 #undef MPI_MIN 49 44 50 //ep_lib::MPI_Op MPI_SUM = MPI_SUM_STD; 51 //ep_lib::MPI_Op MPI_MAX = MPI_MAX_STD; 52 //ep_lib::MPI_Op MPI_MIN = MPI_MIN_STD; 53 54 // ep_lib::MPI_Comm::MPI_Comm(const MPI_Comm & comm) 55 // { 56 // printf("calling MPI_Comm copy constructor\n"); 57 // is_ep = comm.is_ep; 58 // is_intercomm = comm.is_intercomm; 59 60 // int my_rank = comm.ep_comm_ptr->size_rank_info[1].first; 61 // int num_ep = comm.ep_comm_ptr->size_rank_info[1].second; 62 63 64 // if(0 == my_rank) 65 // { 66 // MPI_Info info; 67 // MPI_Comm *out_comm; 68 // ::MPI_Comm mpi_dup; 69 70 // ::MPI_Comm in_comm = static_cast< ::MPI_Comm>(comm.mpi_comm); 71 72 // ::MPI_Comm_dup(in_comm, &mpi_dup); 73 74 // MPI_Comm_create_endpoints(mpi_dup, num_ep, info, out_comm); 75 // comm.ep_comm_ptr->comm_list->mem_bridge = out_comm; 76 // } 77 78 // MPI_Barrier(comm); 79 80 // *this = (comm.ep_comm_ptr->comm_list->mem_bridge[my_rank]); 81 // // // my_buffer = NULL; 82 // // // ep_barrier = NULL; 83 // // // rank_map = NULL; 84 // // // ep_comm_ptr = NULL; 85 // // // mem_bridge = NULL; 86 // // // mpi_bridge = NULL; 87 // // // mpi_comm = comm; 88 // } 45 #ifdef _openmpi 46 //#undef MPI_Fint 47 #endif 89 48 90 49 … … 97 56 98 57 58 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.hpp
r1037 r1053 12 12 #include <numeric> 13 13 #include <bitset> 14 #include <memory.h>14 //#include <memory.h> 15 15 #include <algorithm> 16 16 #include <assert.h> … … 42 42 { 43 43 #define MPI_UNDEFINED -32766 44 #define MPI_STATUS_IGNORE NULL45 #define MPI_INFO_NULL MPI_Info()44 //#define MPI_STATUS_IGNORE NULL 45 //#define MPI_INFO_NULL MPI_Info(MPI_INFO_NULL_STD) 46 46 47 47 class ep_communicator; … … 84 84 void* mpi_status; 85 85 86 MPI_Message() 87 { 88 mpi_message = 0; 89 mpi_status = 0; 90 } 86 MPI_Message() {} 87 #ifdef _intelmpi 88 MPI_Message(int message): mpi_message(message) {} 89 #elif _openmpi 90 MPI_Message(void* message): mpi_message(message) {} 91 #endif 91 92 }; 92 93 … … 289 290 mem_bridge = NULL; 290 291 mpi_bridge = NULL; 291 mpi_comm = 0;292 } 293 292 } 293 294 #ifdef _intelmpi 294 295 MPI_Comm(int comm) 295 296 { … … 305 306 } 306 307 307 //MPI_Comm(const MPI_Comm &comm); 308 #elif _openmpi 309 310 MPI_Comm(void* comm) 311 { 312 is_ep = false; 313 is_intercomm = false; 314 my_buffer = NULL; 315 ep_barrier = NULL; 316 rank_map = NULL; 317 ep_comm_ptr = NULL; 318 mem_bridge = NULL; 319 mpi_bridge = NULL; 320 mpi_comm = comm; 321 } 322 #endif 308 323 309 324 … … 338 353 #endif 339 354 340 MPI_Info() 341 { 342 mpi_info = 0; 343 } 355 MPI_Info() {} 356 357 #ifdef _intelmpi 358 MPI_Info(int info): mpi_info(info) {} 359 #elif _openmpi 360 MPI_Info(void* info): mpi_info(info) {} 361 #endif 344 362 }; 345 363 … … 368 386 MPI_Comm comm; //! EP communicator related to the communication 369 387 370 MPI_Request() 371 { 372 mpi_request = 0; 373 } 388 MPI_Request() {} 389 390 #ifdef _intelmpi 391 MPI_Request(int request): mpi_request(request) {} 392 #elif _openmpi 393 MPI_Request(void* request): mpi_request(request) {} 394 #endif 395 }; 396 397 398 class MPI_Aint 399 { 400 public: 401 402 unsigned long mpi_aint; 403 404 MPI_Aint() {} 405 MPI_Aint(int a): mpi_aint(a) {} 374 406 }; 375 407 … … 379 411 380 412 int mpi_fint; 381 }; 382 383 class MPI_Aint 384 { 385 public: 386 387 unsigned long mpi_aint; 413 414 MPI_Fint() {} 415 MPI_Fint(int f): mpi_fint(f) {} 416 388 417 }; 389 418 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_wait.cpp
r1037 r1053 52 52 status->ep_datatype = request->ep_datatype; 53 53 54 check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2);54 //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 55 55 56 56 return 0; … … 70 70 int count; 71 71 MPI_Get_count(status, request->ep_datatype, &count); 72 check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2);72 //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 73 73 } 74 74 return MPI_SUCCESS; … … 106 106 int check_count; 107 107 MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &check_count); 108 check_sum_recv(array_of_requests[i].buf, count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2);108 //check_sum_recv(array_of_requests[i].buf, count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 109 109 } 110 110 finished++; … … 124 124 MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &recv_count); 125 125 MPI_Mrecv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, &message, &array_of_statuses[i]); 126 check_sum_recv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2);126 //check_sum_recv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 127 127 128 128 finished++; -
XIOS/dev/branch_yushan/src/buffer_server.hpp
r717 r1053 4 4 #include "xios_spl.hpp" 5 5 #include "buffer.hpp" 6 #include "mpi .hpp"6 #include "mpi_std.hpp" 7 7 #include "cxios.hpp" 8 8 -
XIOS/dev/branch_yushan/src/client.cpp
r1037 r1053 31 31 else is_MPI_Initialized=false ; 32 32 33 //return;34 35 33 // don't use OASIS 36 34 if (!CXios::usingOasis) -
XIOS/dev/branch_yushan/src/client.hpp
r1037 r1053 16 16 static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 17 17 static void finalize(void); 18 static void registerContext(const string& id, MPI_Comm contextComm);18 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 19 19 20 20 static MPI_Comm intraComm; -
XIOS/dev/branch_yushan/src/client_client_dht_template.hpp
r941 r1053 13 13 #include "xios_spl.hpp" 14 14 #include "array_new.hpp" 15 #include "mpi .hpp"15 #include "mpi_std.hpp" 16 16 #include "policy.hpp" 17 17 #include <boost/unordered_map.hpp> … … 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const MPI_Comm& clientIntraComm);42 const ep_lib::MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm);45 const ep_lib::MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel,64 const ep_lib::MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const MPI_Comm& intraCommLevel,68 const ep_lib::MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const MPI_Comm& intraCommLevel,75 const ep_lib::MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const MPI_Comm& clientIntraComm,88 std::vector< MPI_Request>& requestSendInfo);87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 89 90 90 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 const MPI_Comm& clientIntraComm,92 std::vector< MPI_Request>& requestRecvInfo);91 const ep_lib::MPI_Comm& clientIntraComm, 92 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 93 93 94 94 // Send global index to clients 95 95 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 96 const MPI_Comm& clientIntraComm,97 std::vector< MPI_Request>& requestSendIndexGlobal);96 const ep_lib::MPI_Comm& clientIntraComm, 97 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 98 98 99 99 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 const MPI_Comm& clientIntraComm,101 std::vector< MPI_Request>& requestRecvIndex);100 const ep_lib::MPI_Comm& clientIntraComm, 101 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 102 102 103 103 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp
r1037 r1053 18 18 { 19 19 template<typename T, typename H> 20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 21 21 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 22 22 { … … 38 38 template<typename T, typename H> 39 39 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 40 const MPI_Comm& clientIntraComm)40 const ep_lib::MPI_Comm& clientIntraComm) 41 41 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 42 42 { … … 68 68 template<typename T, typename H> 69 69 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 70 const MPI_Comm& clientIntraComm)70 const ep_lib::MPI_Comm& clientIntraComm) 71 71 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 72 72 { … … 104 104 template<typename T, typename H> 105 105 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 106 const MPI_Comm& commLevel,106 const ep_lib::MPI_Comm& commLevel, 107 107 int level) 108 108 { … … 178 178 recvIndexBuff = new unsigned long[recvNbIndexCount]; 179 179 180 std::vector< MPI_Request> request;180 std::vector<ep_lib::MPI_Request> request; 181 181 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 182 182 iteRecvIndex = recvRankClient.end(), … … 199 199 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 200 200 201 std::vector< MPI_Status> status(request.size());201 std::vector<ep_lib::MPI_Status> status(request.size()); 202 202 203 203 //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); … … 259 259 } 260 260 261 std::vector< MPI_Request> requestOnReturn;261 std::vector<ep_lib::MPI_Request> requestOnReturn; 262 262 currentIndex = 0; 263 263 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 310 310 } 311 311 312 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());312 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 313 313 //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 314 314 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); … … 380 380 template<typename T, typename H> 381 381 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 382 const MPI_Comm& commLevel,382 const ep_lib::MPI_Comm& commLevel, 383 383 int level) 384 384 { … … 465 465 // it will send a message to the correct clients. 466 466 // Contents of the message are index and its corresponding informatioin 467 std::vector< MPI_Request> request;467 std::vector<ep_lib::MPI_Request> request; 468 468 int currentIndex = 0; 469 469 int nbRecvClient = recvRankClient.size(); … … 504 504 505 505 //printf("check 8 OK. clientRank = %d\n", clientRank); 506 std::vector< MPI_Status> status(request.size());506 std::vector<ep_lib::MPI_Status> status(request.size()); 507 507 508 508 MPI_Waitall(request.size(), &request[0], &status[0]); … … 564 564 template<typename T, typename H> 565 565 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 566 const MPI_Comm& clientIntraComm,567 std::vector< MPI_Request>& requestSendIndex)568 { 569 MPI_Request request;566 const ep_lib::MPI_Comm& clientIntraComm, 567 std::vector<ep_lib::MPI_Request>& requestSendIndex) 568 { 569 ep_lib::MPI_Request request; 570 570 requestSendIndex.push_back(request); 571 571 … … 583 583 template<typename T, typename H> 584 584 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 585 const MPI_Comm& clientIntraComm,586 std::vector< MPI_Request>& requestRecvIndex)587 { 588 MPI_Request request;585 const ep_lib::MPI_Comm& clientIntraComm, 586 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 587 { 588 ep_lib::MPI_Request request; 589 589 requestRecvIndex.push_back(request); 590 590 … … 603 603 template<typename T, typename H> 604 604 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 605 const MPI_Comm& clientIntraComm,606 std::vector< MPI_Request>& requestSendInfo)607 { 608 MPI_Request request;605 const ep_lib::MPI_Comm& clientIntraComm, 606 std::vector<ep_lib::MPI_Request>& requestSendInfo) 607 { 608 ep_lib::MPI_Request request; 609 609 requestSendInfo.push_back(request); 610 610 //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); … … 623 623 template<typename T, typename H> 624 624 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 625 const MPI_Comm& clientIntraComm,626 std::vector< MPI_Request>& requestRecvInfo)627 { 628 MPI_Request request;625 const ep_lib::MPI_Comm& clientIntraComm, 626 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 627 { 628 ep_lib::MPI_Request request; 629 629 requestRecvInfo.push_back(request); 630 630 … … 699 699 { 700 700 recvNbElements.resize(recvNbRank.size()); 701 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());702 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());701 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 702 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 703 703 704 704 int nRequest = 0; … … 751 751 std::vector<int> recvBuff(recvBuffSize*2,0); 752 752 753 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);754 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);753 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 754 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 755 755 756 756 int nRequest = 0; -
XIOS/dev/branch_yushan/src/client_server_mapping.hpp
r1037 r1053 41 41 42 42 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 43 MPI_Comm& clientIntraComm,43 ep_lib::MPI_Comm& clientIntraComm, 44 44 const std::vector<int>& connectedServerRank); 45 45 -
XIOS/dev/branch_yushan/src/client_server_mapping_distributed.hpp
r835 r1053 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 37 const MPI_Comm& clientIntraComm,37 const ep_lib::MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/dev/branch_yushan/src/context_client.cpp
r1037 r1053 20 20 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 21 21 */ 22 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_, CContext* cxtSer)22 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 23 23 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 24 24 { … … 163 163 for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) 164 164 { 165 retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 165 CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize); 166 //retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 167 //int m_size = retBuffer.size(); 168 //retBuffer.resize(m_size+1); 169 //m_size = retBuffer.size(); 170 retBuffer.push_back(m_buf); 166 171 } 167 172 return retBuffer; -
XIOS/dev/branch_yushan/src/context_client.hpp
r1037 r1053 31 31 public: 32 32 // Contructor 33 CContextClient(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm, CContext* parentServer = 0);33 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 34 34 35 35 // Send event to server … … 66 66 int serverSize; //!< Size of server group 67 67 68 MPI_Comm interComm; //!< Communicator of server group68 ep_lib::MPI_Comm interComm; //!< Communicator of server group 69 69 70 MPI_Comm intraComm; //!< Communicator of client group70 ep_lib::MPI_Comm intraComm; //!< Communicator of client group 71 71 72 72 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/dev/branch_yushan/src/context_server.cpp
r1037 r1053 10 10 #include "file.hpp" 11 11 #include "grid.hpp" 12 #include "mpi .hpp"12 #include "mpi_std.hpp" 13 13 #include "tracer.hpp" 14 14 #include "timer.hpp" … … 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_) 26 26 { 27 27 context=parent; … … 71 71 int count; 72 72 char * addr; 73 MPI_Status status;73 ep_lib::MPI_Status status; 74 74 map<int,CServerBuffer*>::iterator it; 75 75 … … 101 101 { 102 102 addr=(char*)it->second->getBuffer(count); 103 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);103 ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 104 104 bufferRequest[rank]=addr; 105 105 //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize); … … 113 113 void CContextServer::checkPendingRequest(void) 114 114 { 115 map<int, MPI_Request>::iterator it;115 map<int,ep_lib::MPI_Request>::iterator it; 116 116 list<int> recvRequest; 117 117 list<int>::iterator itRecv; … … 119 119 int flag; 120 120 int count; 121 MPI_Status status;121 ep_lib::MPI_Status status; 122 122 123 123 //printf("enter checkPendingRequest\n"); -
XIOS/dev/branch_yushan/src/context_server.hpp
r1037 r1053 14 14 public: 15 15 16 CContextServer(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm) ;16 CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 17 17 bool eventLoop(void) ; 18 18 void listen(void) ; … … 25 25 bool hasFinished(void); 26 26 27 MPI_Comm intraComm ;27 ep_lib::MPI_Comm intraComm ; 28 28 int intraCommSize ; 29 29 int intraCommRank ; 30 30 31 MPI_Comm interComm ;31 ep_lib::MPI_Comm interComm ; 32 32 int commSize ; 33 33 34 34 map<int,CServerBuffer*> buffers ; 35 map<int, MPI_Request> pendingRequest ;35 map<int,ep_lib::MPI_Request> pendingRequest ; 36 36 map<int,char*> bufferRequest ; 37 37 -
XIOS/dev/branch_yushan/src/cxios.cpp
r1037 r1053 79 79 MPI_Info info; 80 80 MPI_Comm *ep_comm; 81 MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm); 81 MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm); // servers should reach here too. 82 82 83 83 globalComm = ep_comm[0]; -
XIOS/dev/branch_yushan/src/cxios.hpp
r1037 r1053 19 19 public: 20 20 static void initialize(void) ; 21 static void initClientSide(const string & codeId, MPI_Comm& localComm,MPI_Comm& returnComm) ;21 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 22 22 static void initServerSide(void) ; 23 23 static void clientFinalize(void) ; -
XIOS/dev/branch_yushan/src/dht_auto_indexing.cpp
r1037 r1053 22 22 23 23 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const MPI_Comm& clientIntraComm)24 const ep_lib::MPI_Comm& clientIntraComm) 25 25 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 26 { … … 58 58 */ 59 59 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const MPI_Comm& clientIntraComm)60 const ep_lib::MPI_Comm& clientIntraComm) 61 61 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 62 { -
XIOS/dev/branch_yushan/src/dht_auto_indexing.hpp
r1037 r1053 28 28 29 29 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 30 const MPI_Comm& clientIntraComm);30 const ep_lib::MPI_Comm& clientIntraComm); 31 31 32 32 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 33 const MPI_Comm& clientIntraComm);33 const ep_lib::MPI_Comm& clientIntraComm); 34 34 35 35 size_t getNbIndexesGlobal() const; -
XIOS/dev/branch_yushan/src/filter/filter.cpp
r1037 r1053 14 14 CDataPacketPtr outputPacket = engine->apply(data); 15 15 if (outputPacket) 16 { 17 printf("filter/filter.cpp : deliverOuput(outputPacket)\n"); 16 18 deliverOuput(outputPacket); 19 printf("filter/filter.cpp : deliverOuput(outputPacket) OKOK\n"); 20 } 17 21 } 18 22 } // namespace xios -
XIOS/dev/branch_yushan/src/filter/input_pin.cpp
r1037 r1053 33 33 // Unregister before calling onInputReady in case the filter registers again 34 34 gc.unregisterFilter(this, packet->timestamp); 35 printf("filter/input_pin.cpp : onInputReady\n"); 35 36 onInputReady(it->second.packets); 37 printf("filter/input_pin.cpp : onInputReady OKOK\n"); 36 38 inputs.erase(it); 37 39 } -
XIOS/dev/branch_yushan/src/filter/output_pin.cpp
r1037 r1053 22 22 for (it = outputs.begin(), itEnd = outputs.end(); it != itEnd; ++it) 23 23 { 24 printf("filter/output_pin.cpp : setInput\n"); 24 25 it->first->setInput(it->second, packet); 26 printf("filter/output_pin.cpp : setInput OKOK\n"); 25 27 } 26 28 } -
XIOS/dev/branch_yushan/src/filter/source_filter.cpp
r1037 r1053 29 29 grid->inputField(data, packet->data); 30 30 31 printf("filter/source_filter.cpp : deliverOuput(packet) \n"); 31 32 deliverOuput(packet); 33 printf("filter/source_filter.cpp : deliverOuput(packet) OKOK\n"); 32 34 } 33 35 -
XIOS/dev/branch_yushan/src/filter/spatial_transform_filter.cpp
r1037 r1053 150 150 151 151 idxSendBuff = 0; 152 std::vector< MPI_Request> sendRecvRequest;152 std::vector<ep_lib::MPI_Request> sendRecvRequest; 153 153 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 154 154 { … … 160 160 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 161 161 } 162 sendRecvRequest.push_back( MPI_Request());162 sendRecvRequest.push_back(ep_lib::MPI_Request()); 163 163 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 164 164 } … … 178 178 int srcRank = itRecv->first; 179 179 int countSize = itRecv->second.size(); 180 sendRecvRequest.push_back( MPI_Request());180 sendRecvRequest.push_back(ep_lib::MPI_Request()); 181 181 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 182 182 currentBuff += countSize; 183 183 } 184 std::vector< MPI_Status> status(sendRecvRequest.size());184 std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 185 185 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 186 186 -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1037 r1053 23 23 #include "context.hpp" 24 24 #include "context_client.hpp" 25 #include "mpi .hpp"25 #include "mpi_std.hpp" 26 26 #include "timer.hpp" 27 27 #include "array_new.hpp" … … 54 54 { 55 55 std::string str; 56 MPI_Comm local_comm;57 MPI_Comm return_comm;56 ep_lib::MPI_Comm local_comm; 57 ep_lib::MPI_Comm return_comm; 58 58 59 fc_comm_map.clear();59 ep_lib::fc_comm_map.clear(); 60 60 61 61 if (!cstr2string(client_id, len_client_id, str)) return; … … 63 63 int initialized; 64 64 MPI_Initialized(&initialized); 65 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 65 //if (initialized) local_comm.mpi_comm = MPI_Comm_f2c(*f_local_comm); 66 if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 66 67 else local_comm = MPI_COMM_NULL; 67 68 … … 69 70 70 71 CXios::initClientSide(str, local_comm, return_comm); 71 *f_return_comm = MPI_Comm_c2f(return_comm); 72 73 *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 72 74 73 75 printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm); … … 80 82 { 81 83 std::string str; 82 MPI_Comm comm;84 ep_lib::MPI_Comm comm; 83 85 84 86 if (!cstr2string(context_id, len_context_id, str)) return; 85 87 CTimer::get("XIOS").resume(); 86 88 CTimer::get("XIOS init context").resume(); 87 comm =MPI_Comm_f2c(*f_comm);89 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 88 90 89 91 CClient::registerContext(str, comm); 90 92 91 //printf("client register context OK\n");93 printf("icdata.cpp: client register context OK\n"); 92 94 93 95 CTimer::get("XIOS init context").suspend(); -
XIOS/dev/branch_yushan/src/interface/c/oasis_cinterface.cpp
r1037 r1053 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 comm=MPI_Comm_f2c(f_comm) ;28 //comm=MPI_Comm_f2c(f_comm) ; 29 29 } 30 30 … … 34 34 35 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 36 comm_client_server=MPI_Comm_f2c(f_comm) ;36 //comm_client_server=MPI_Comm_f2c(f_comm) ; 37 37 } 38 38 … … 42 42 43 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 44 comm_client_server=MPI_Comm_f2c(f_comm) ;44 //comm_client_server=MPI_Comm_f2c(f_comm) ; 45 45 } 46 46 } -
XIOS/dev/branch_yushan/src/interface/fortran/idata.F90
r1037 r1053 476 476 477 477 !print*, "in fortran, world_f = ", MPI_COMM_WORLD 478 478 479 print*, "in fortran, f_return_comm = ", f_return_comm 479 480 -
XIOS/dev/branch_yushan/src/io/inetcdf4.cpp
r948 r1053 18 18 } 19 19 mpi = comm && !multifile; 20 MPI_Info m_info; 20 21 21 22 // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 22 23 // even if Parallel NetCDF ends up being used. 23 24 if (mpi) 24 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 25 26 else 26 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/dev/branch_yushan/src/io/inetcdf4.hpp
r802 r1053 7 7 #include "array_new.hpp" 8 8 9 #include "mpi .hpp"9 #include "mpi_std.hpp" 10 10 #include "netcdf.hpp" 11 11 -
XIOS/dev/branch_yushan/src/io/nc4_data_output.cpp
r1037 r1053 26 26 CNc4DataOutput::CNc4DataOutput 27 27 (const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 28 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)28 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 29 29 : SuperClass() 30 30 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) … … 450 450 StdString domainName = domain->name; 451 451 domain->assignMesh(domainName, domain->nvertex); 452 domain->mesh->createMeshEpsilon(s erver->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv);452 domain->mesh->createMeshEpsilon(static_cast<MPI_Comm>(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 453 453 454 454 StdString node_x = domainName + "_node_x"; -
XIOS/dev/branch_yushan/src/io/nc4_data_output.hpp
r887 r1053 27 27 (const StdString & filename, bool exist, bool useClassicFormat, 28 28 bool useCFConvention, 29 MPI_Comm comm_file, bool multifile, bool isCollective = true,29 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 30 const StdString& timeCounterName = "time_counter"); 31 31 … … 116 116 117 117 /// Propriétés privées /// 118 MPI_Comm comm_file;118 ep_lib::MPI_Comm comm_file; 119 119 const StdString filename; 120 120 std::map<Time, StdSize> timeToRecordCache; -
XIOS/dev/branch_yushan/src/io/netCdfInterface.hpp
r1037 r1053 16 16 #endif 17 17 18 #include "mpi.hpp" 19 //#include <mpi.h> 18 #include "mpi_std.hpp" 20 19 #include "netcdf.hpp" 21 20 -
XIOS/dev/branch_yushan/src/io/netcdf.hpp
r685 r1053 1 1 #ifndef __XIOS_NETCDF_HPP__ 2 2 #define __XIOS_NETCDF_HPP__ 3 #include "mpi .hpp"3 #include "mpi_std.hpp" 4 4 #define MPI_INCLUDED 5 5 #include <netcdf.h> … … 18 18 extern "C" 19 19 { 20 #include <netcdf_par.h>20 #include <netcdf_par.h> 21 21 } 22 22 # endif … … 30 30 namespace xios 31 31 { 32 inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)32 inline int nc_create_par(const char *path, int cmode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 33 33 { 34 34 #if defined(USING_NETCDF_PAR) 35 return ::nc_create_par(path, cmode, comm, info, ncidp) ;35 return ::nc_create_par(path, cmode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 36 36 #else 37 37 ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", … … 41 41 } 42 42 43 inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)43 inline int nc_open_par(const char *path, int mode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 44 44 { 45 45 #if defined(USING_NETCDF_PAR) 46 return ::nc_open_par(path, mode, comm, info, ncidp) ;46 return ::nc_open_par(path, mode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 47 47 #else 48 48 ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", -
XIOS/dev/branch_yushan/src/io/onetcdf4.cpp
r1037 r1053 3 3 #include "onetcdf4.hpp" 4 4 #include "group_template.hpp" 5 //#include "mpi_std.hpp"6 5 #include "netcdf.hpp" 7 6 #include "netCdfInterface.hpp" … … 12 11 /// ////////////////////// Définitions ////////////////////// /// 13 12 14 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 15 bool useCFConvention, 16 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 13 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 14 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 17 15 : path() 18 16 , wmpi(false) … … 32 30 33 31 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 34 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)32 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 35 33 { 36 34 this->useClassicFormat = useClassicFormat; … … 58 56 { 59 57 if (wmpi) 60 CNetCdfInterface::createPar(filename, mode, *comm, info_null, this->ncidp); 58 { 59 CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 60 printf("creating file with createPar\n"); 61 } 61 62 else 63 { 62 64 CNetCdfInterface::create(filename, mode, this->ncidp); 65 printf("creating file with create\n"); 66 } 67 63 68 64 69 this->appendMode = false; … … 68 73 mode |= NC_WRITE; 69 74 if (wmpi) 70 CNetCdfInterface::openPar(filename, mode, *comm, info_null, this->ncidp); 75 { 76 CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 77 printf("opening file with openPar\n"); 78 } 71 79 else 80 { 72 81 CNetCdfInterface::open(filename, mode, this->ncidp); 82 printf("opening file with open\n"); 83 } 73 84 74 85 this->appendMode = true; -
XIOS/dev/branch_yushan/src/io/onetcdf4.hpp
r1037 r1053 7 7 #include "data_output.hpp" 8 8 #include "array_new.hpp" 9 #include "mpi.hpp" 10 //#include <mpi.h> 9 #include "mpi_std.hpp" 11 10 #include "netcdf.hpp" 12 11 … … 29 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 30 29 bool useCFConvention = true, 31 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 32 31 const StdString& timeCounterName = "time_counter"); 33 32 … … 38 37 /// Initialisation /// 39 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 40 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 41 40 void close(void); 42 41 void sync(void); -
XIOS/dev/branch_yushan/src/mpi.hpp
r1037 r1053 12 12 13 13 #ifdef _usingEP 14 #include "../extern/src_ep /ep_lib.hpp"14 #include "../extern/src_ep_dev/ep_lib.hpp" 15 15 using namespace ep_lib; 16 16 #elif _usingMPI -
XIOS/dev/branch_yushan/src/node/axis.cpp
r1037 r1053 742 742 CContextServer* server = CContext::getCurrent()->server; 743 743 axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 744 MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);745 MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);744 ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 745 ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 746 746 axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 747 747 } -
XIOS/dev/branch_yushan/src/node/context.cpp
r1037 r1053 236 236 237 237 //! Initialize client side 238 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)238 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 239 239 { 240 240 hasClient=true; … … 248 248 registryOut->setPath(getId()) ; 249 249 250 MPI_Comm intraCommServer, interCommServer;250 ep_lib::MPI_Comm intraCommServer, interCommServer; 251 251 if (cxtServer) // Attached mode 252 252 { … … 311 311 312 312 //! Initialize server 313 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)313 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 314 314 { 315 315 hasServer=true; … … 323 323 registryOut->setPath(getId()) ; 324 324 325 MPI_Comm intraCommClient, interCommClient;325 ep_lib::MPI_Comm intraCommClient, interCommClient; 326 326 if (cxtClient) // Attached mode 327 327 { … … 369 369 closeAllFile(); 370 370 registryOut->hierarchicalGatherRegistry() ; 371 //registryOut->gatherRegistry() ; 371 372 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 372 373 } 373 374 374 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)375 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 375 376 MPI_Comm_free(&(*it)); 376 377 comms.clear(); -
XIOS/dev/branch_yushan/src/node/context.hpp
r1037 r1053 88 88 public : 89 89 // Initialize server or client 90 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);91 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);90 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 91 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 92 92 bool isInitialized(void); 93 93 … … 229 229 StdString idServer_; 230 230 CGarbageCollector garbageCollector; 231 std::list< MPI_Comm> comms; //!< Communicators allocated internally231 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 232 232 233 233 public: // Some function maybe removed in the near future -
XIOS/dev/branch_yushan/src/node/domain.cpp
r1037 r1053 475 475 { 476 476 CContext* context = CContext::getCurrent(); 477 477 CContextClient* client = context->client; 478 478 lon_g.resize(ni_glo) ; 479 479 lat_g.resize(nj_glo) ; -
XIOS/dev/branch_yushan/src/node/field_impl.hpp
r1037 r1053 20 20 if (clientSourceFilter) 21 21 { 22 printf("file_impl.hpp : clientSourceFilter->streamData\n"); 22 23 clientSourceFilter->streamData(CContext::getCurrent()->getCalendar()->getCurrentDate(), _data); 24 printf("file_impl.hpp : clientSourceFilter->streamData OKOK\n"); 23 25 } 24 26 else if (!field_ref.isEmpty() || !content.empty()) 27 { 25 28 ERROR("void CField::setData(const CArray<double, N>& _data)", 26 29 << "Impossible to receive data from the model for a field [ id = " << getId() << " ] with a reference or an arithmetic operation."); 30 } 27 31 } 28 32 -
XIOS/dev/branch_yushan/src/node/file.cpp
r1037 r1053 564 564 565 565 if (isOpen) data_out->closeFile(); 566 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective));567 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name));566 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective)); 567 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective, time_counter_name)); 568 568 isOpen = true; 569 569 } -
XIOS/dev/branch_yushan/src/node/file.hpp
r1037 r1053 159 159 bool isOpen; 160 160 bool allDomainEmpty; 161 MPI_Comm fileComm;161 ep_lib::MPI_Comm fileComm; 162 162 163 163 private : -
XIOS/dev/branch_yushan/src/node/mesh.cpp
r1037 r1053 493 493 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 494 494 */ 495 void CMesh::createMeshEpsilon(const MPI_Comm& comm,495 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 496 496 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 497 497 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 1534 1534 */ 1535 1535 1536 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1536 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1537 1537 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1538 1538 CArray<int, 2>& nghbFaces) … … 1690 1690 */ 1691 1691 1692 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1692 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1693 1693 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1694 1694 CArray<int, 2>& nghbFaces) … … 1871 1871 */ 1872 1872 1873 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm,1873 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 1874 1874 const CArray<int, 1>& face_idx, 1875 1875 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/dev/branch_yushan/src/node/mesh.hpp
r931 r1053 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 84 84 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 85 85 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 86 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);87 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);86 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 87 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 88 88 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 89 89 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/dev/branch_yushan/src/policy.hpp
r855 r1053 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm);33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const MPI_Comm& internalComm_;43 const ep_lib::MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/dev/branch_yushan/src/registry.cpp
r1037 r1053 1 1 #include "registry.hpp" 2 2 #include "type.hpp" 3 #include <mpi.hpp>4 3 #include <fstream> 5 4 #include <sstream> … … 261 260 void CRegistry::hierarchicalGatherRegistry(void) 262 261 { 263 //hierarchicalGatherRegistry(communicator) ;262 hierarchicalGatherRegistry(communicator) ; 264 263 } 265 264 … … 288 287 if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 289 288 else color=1 ; 289 290 290 MPI_Comm_split(comm,color,mpiRank,&commDown) ; 291 291 292 if (color==0) gatherRegistry(commDown) ; 293 printf("gatherRegistry OKOK\n"); 292 294 MPI_Comm_free(&commDown) ; 293 295 } -
XIOS/dev/branch_yushan/src/registry.hpp
r1037 r1053 28 28 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 29 29 30 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {}30 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 31 31 32 32 … … 127 127 128 128 /** MPI communicator used for broadcast and gather operation */ 129 MPI_Comm communicator ;129 ep_lib::MPI_Comm communicator ; 130 130 } ; 131 131 -
XIOS/dev/branch_yushan/src/test/test_client.f90
r1037 r1053 42 42 43 43 CALL MPI_COMM_RANK(comm,rank,ierr) 44 print*, "test_client MPI_COMM_RANK OK" 44 print*, "test_client MPI_COMM_RANK OK", rank 45 45 CALL MPI_COMM_SIZE(comm,size,ierr) 46 print*, "test_client MPI_COMM_SIZE OK", size 46 47 47 48 … … 138 139 PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 139 140 !DO ts=1,24*10 140 DO ts=1, 24141 DO ts=1,6 141 142 CALL xios_update_calendar(ts) 142 143 print*, "xios_update_calendar OK, ts = ", ts -
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp
r1037 r1053 173 173 174 174 // Sending global index of grid source to corresponding process as well as the corresponding mask 175 std::vector< MPI_Request> requests;176 std::vector< MPI_Status> status;175 std::vector<ep_lib::MPI_Request> requests; 176 std::vector<ep_lib::MPI_Status> status; 177 177 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 178 178 boost::unordered_map<int, double* > sendValueToDest; … … 184 184 sendValueToDest[recvRank] = new double [recvSize]; 185 185 186 requests.push_back( MPI_Request());186 requests.push_back(ep_lib::MPI_Request()); 187 187 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 188 188 } … … 206 206 207 207 // Send global index source and mask 208 requests.push_back( MPI_Request());208 requests.push_back(ep_lib::MPI_Request()); 209 209 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 210 210 } … … 215 215 //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 216 216 217 std::vector< MPI_Request>().swap(requests);218 std::vector< MPI_Status>().swap(status);217 std::vector<ep_lib::MPI_Request>().swap(requests); 218 std::vector<ep_lib::MPI_Status>().swap(status); 219 219 220 220 // Okie, on destination side, we will wait for information of masked index of source … … 224 224 int recvSize = itSend->second; 225 225 226 requests.push_back( MPI_Request());226 requests.push_back(ep_lib::MPI_Request()); 227 227 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 228 228 } … … 242 242 } 243 243 // Okie, now inform the destination which source index are masked 244 requests.push_back( MPI_Request());244 requests.push_back(ep_lib::MPI_Request()); 245 245 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 246 246 } -
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp
r933 r1053 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 17 15 18 namespace xios { 16 19 -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp
r1037 r1053 371 371 CContextClient* client=context->client; 372 372 373 MPI_Comm poleComme(MPI_COMM_NULL);374 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);373 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 374 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 375 375 if (MPI_COMM_NULL != poleComme) 376 376 { 377 377 int nbClientPole; 378 MPI_Comm_size(poleComme, &nbClientPole);378 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 379 379 380 380 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 541 541 double* sendWeightBuff = new double [sendBuffSize]; 542 542 543 std::vector< MPI_Request> sendRequest;543 std::vector<ep_lib::MPI_Request> sendRequest; 544 544 545 545 int sendOffSet = 0, l = 0; … … 562 562 } 563 563 564 sendRequest.push_back( MPI_Request());564 sendRequest.push_back(ep_lib::MPI_Request()); 565 565 MPI_Isend(sendIndexDestBuff + sendOffSet, 566 566 k, … … 570 570 client->intraComm, 571 571 &sendRequest.back()); 572 sendRequest.push_back( MPI_Request());572 sendRequest.push_back(ep_lib::MPI_Request()); 573 573 MPI_Isend(sendIndexSrcBuff + sendOffSet, 574 574 k, … … 578 578 client->intraComm, 579 579 &sendRequest.back()); 580 sendRequest.push_back( MPI_Request());580 sendRequest.push_back(ep_lib::MPI_Request()); 581 581 MPI_Isend(sendWeightBuff + sendOffSet, 582 582 k, … … 597 597 while (receivedSize < recvBuffSize) 598 598 { 599 MPI_Status recvStatus;599 ep_lib::MPI_Status recvStatus; 600 600 MPI_Recv((recvIndexDestBuff + receivedSize), 601 601 recvBuffSize, … … 637 637 } 638 638 639 std::vector<MPI_Status> requestStatus(sendRequest.size()); 640 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 639 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 640 ep_lib::MPI_Status stat_ignore; 641 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 642 //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 641 643 642 644 delete [] sendIndexDestBuff; … … 724 726 725 727 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 726 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);728 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 727 729 728 730 std::vector<StdSize> start(1, startIndex - localNbWeight); 729 731 std::vector<StdSize> count(1, localNbWeight); 730 732 731 WriteNetCdf netCdfWriter(filename, client->intraComm);733 WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 732 734 733 735 // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp
r1037 r1053 13 13 #include "transformation.hpp" 14 14 #include "nc4_data_output.hpp" 15 #ifdef _usingEP 16 #include "ep_declaration.hpp" 17 #endif 15 18 16 19 namespace xios { -
XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp
r1037 r1053 475 475 476 476 // Sending global index of grid source to corresponding process as well as the corresponding mask 477 std::vector< MPI_Request> requests;478 std::vector< MPI_Status> status;477 std::vector<ep_lib::MPI_Request> requests; 478 std::vector<ep_lib::MPI_Status> status; 479 479 boost::unordered_map<int, unsigned char* > recvMaskDst; 480 480 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; … … 486 486 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 487 487 488 requests.push_back( MPI_Request());488 requests.push_back(ep_lib::MPI_Request()); 489 489 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 490 requests.push_back( MPI_Request());490 requests.push_back(ep_lib::MPI_Request()); 491 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 492 492 } … … 524 524 525 525 // Send global index source and mask 526 requests.push_back( MPI_Request());526 requests.push_back(ep_lib::MPI_Request()); 527 527 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 528 requests.push_back( MPI_Request());528 requests.push_back(ep_lib::MPI_Request()); 529 529 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 530 530 } … … 536 536 537 537 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 538 std::vector< MPI_Request>().swap(requests);539 std::vector< MPI_Status>().swap(status);538 std::vector<ep_lib::MPI_Request>().swap(requests); 539 std::vector<ep_lib::MPI_Status>().swap(status); 540 540 // Okie, on destination side, we will wait for information of masked index of source 541 541 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 544 544 int recvSize = itSend->second; 545 545 546 requests.push_back( MPI_Request());546 requests.push_back(ep_lib::MPI_Request()); 547 547 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 548 548 } … … 581 581 582 582 // Okie, now inform the destination which source index are masked 583 requests.push_back( MPI_Request());583 requests.push_back(ep_lib::MPI_Request()); 584 584 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 585 585 }
Note: See TracChangeset
for help on using the changeset viewer.