Changeset 1067
- Timestamp:
- 03/08/17 16:55:00 (8 years ago)
- Location:
- XIOS/dev/branch_yushan
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/bld.cfg
r1060 r1067 36 36 #test_remap.exe 37 37 #bld::target test_new_features.exe test_unstruct_complete.exe 38 bld::target test_client.exe #test_complete.exe38 bld::target test_client.exe test_complete.exe 39 39 bld::exe_dep 40 40 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_dup.cpp
r1063 r1067 5 5 namespace ep_lib 6 6 { 7 8 9 10 int MPI_Comm_dup_dev(MPI_Comm comm, MPI_Comm *newcomm)11 {12 if(!comm.is_ep)13 {14 Debug("Comm_dup MPI\n");15 newcomm = new MPI_Comm;16 newcomm->is_ep = comm.is_ep;17 18 ::MPI_Comm input = static_cast< ::MPI_Comm>(comm.mpi_comm);19 ::MPI_Comm output;20 21 22 ::MPI_Comm_dup(input, &output);23 24 newcomm->mpi_comm = output;25 26 return 0;27 }28 29 if(!comm.mpi_comm) return 0;30 31 32 int my_rank = comm.ep_comm_ptr->size_rank_info[1].first;33 int num_ep = comm.ep_comm_ptr->size_rank_info[1].second;34 35 36 if(0 == my_rank)37 {38 MPI_Info info;39 ::MPI_Comm mpi_dup;40 41 ::MPI_Comm in_comm = static_cast< ::MPI_Comm>(comm.mpi_comm);42 43 ::MPI_Comm_dup(in_comm, &mpi_dup);44 45 MPI_Comm_create_endpoints(mpi_dup, num_ep, info, newcomm);46 comm.ep_comm_ptr->comm_list->mem_bridge = newcomm;47 }48 49 MPI_Barrier_local(comm);50 51 newcomm = &(comm.ep_comm_ptr->comm_list->mem_bridge[my_rank]);52 53 return MPI_SUCCESS;54 }55 56 57 7 58 8 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_fortran.cpp
r1060 r1067 1 //#include "ep_lib.hpp"1 #include "ep_lib.hpp" 2 2 #include "ep_lib_fortran.hpp" 3 3 #include <mpi.h> 4 4 #include <map> 5 5 #include <utility> 6 #include "ep_declaration.hpp" 6 7 7 8 #ifdef _intelmpi … … 26 27 fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 27 28 #endif 28 std::map<std::pair<int, int>, MPI_Comm 29 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 29 30 30 31 it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); … … 32 33 { 33 34 fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 34 //printf("EP_Comm_c2f : MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm);35 printf("EP_Comm_c2f : MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 35 36 } 36 37 … … 45 46 46 47 47 std::map<std::pair<int, int>, MPI_Comm 48 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 48 49 49 50 it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); … … 51 52 { 52 53 MPI_Comm comm_ptr; 53 comm_ptr = 54 //printf("EP_Comm_f2c : MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr);54 comm_ptr = it->second; 55 printf("EP_Comm_f2c : MAP find: %d, %d, %p\n", it->first.first, it->first.second, it->second); 55 56 return comm_ptr; 56 57 } … … 58 59 { 59 60 MPI_Comm return_comm; 60 return_comm.mpi_comm = ::MPI_Comm_f2c(comm); 61 if(omp_get_thread_num() == 0) 62 { 63 ::MPI_Comm base_comm = ::MPI_Comm_f2c(comm); 64 if(base_comm != MPI_COMM_NULL_STD) 65 { 66 int num_ep = omp_get_num_threads(); 67 MPI_Comm *new_comm; 68 MPI_Info info; 69 MPI_Comm_create_endpoints(base_comm, num_ep, info, new_comm); 70 return_comm = new_comm[omp_get_thread_num()]; 71 } 72 return MPI_COMM_NULL; 73 } 74 75 61 76 return return_comm; 62 77 } … … 122 137 fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 123 138 124 std::map<std::pair<int, int>, MPI_Comm 139 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 125 140 126 141 it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); … … 140 155 141 156 142 std::map<std::pair<int, int>, MPI_Comm 157 std::map<std::pair<int, int>, MPI_Comm > ::iterator it; 143 158 144 159 it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); … … 147 162 MPI_Comm comm_ptr; 148 163 comm_ptr = it->second; 149 printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr);164 printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, comm_ptr); 150 165 return comm_ptr; 151 166 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_kernel.cpp
r1053 r1067 351 351 #pragma omp critical (write_to_tag_list) 352 352 tag_list.push_back(make_pair( make_pair(tag, min(leader_info[0], leader_info[1])) , ep_intercomm)); 353 printf("tag_list size = %lu\n", tag_list.size()); 353 354 } 354 355 … … 419 420 *newintercomm = iter->second[my_position]; 420 421 found = true; 422 tag_list.erase(iter); 421 423 break; 422 424 } … … 426 428 427 429 MPI_Barrier_local(local_comm); 430 // if(is_proc_master) 431 // { 432 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 433 // { 434 // if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 435 // { 436 // tag_list.erase(iter); 437 // break; 438 // } 439 // } 440 // } 428 441 429 442 int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; … … 674 687 *newintercomm = iter->second[my_position]; 675 688 found = true; 689 tag_list.erase(iter); 676 690 break; 677 691 } … … 679 693 } 680 694 } 695 696 // if(leader_rank_in_peer[0] < leader_rank_in_peer[1]) 697 // { 698 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 699 // { 700 // if((*iter).first == make_pair(tag_label[0], tag_label[1])) 701 // { 702 // tag_list.erase(iter); 703 // } 704 // } 705 // } 681 706 682 707 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_world.cpp
r1053 r1067 417 417 418 418 found = true; 419 tag_list.erase(iter); 419 420 break; 420 421 } … … 424 425 425 426 MPI_Barrier_local(local_comm); 427 428 // if(is_proc_master) 429 // { 430 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 431 // { 432 // if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 433 // { 434 // tag_list.erase(iter); 435 // } 436 // } 437 // } 438 439 426 440 427 441 int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; … … 920 934 921 935 found = true; 936 tag_list.erase(iter); 922 937 break; 923 938 } … … 927 942 928 943 MPI_Barrier_local(local_comm); 944 945 // if(is_proc_master) 946 // { 947 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 948 // { 949 // if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 950 // { 951 // tag_list.erase(iter); 952 // } 953 // } 954 // } 929 955 930 956 int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_message.cpp
r1037 r1067 24 24 25 25 if(comm.is_intercomm) 26 26 { 27 27 return Message_Check_intercomm(comm); 28 28 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_test.cpp
r1053 r1067 65 65 status->ep_tag = request->ep_tag; 66 66 status->ep_datatype = request->ep_datatype; 67 int count;68 MPI_Get_count(status, request->ep_datatype, &count);67 //int count; 68 //MPI_Get_count(status, request->ep_datatype, &count); 69 69 //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 70 70 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.hpp
r1053 r1067 282 282 MPI_Comm() 283 283 { 284 is_ep = false;284 is_ep = true; 285 285 is_intercomm = false; 286 286 my_buffer = NULL; … … 338 338 bool c = mpi_comm != right.mpi_comm; 339 339 bool d = is_ep ? ep_comm_ptr != right.ep_comm_ptr : true; 340 340 341 return a||b||c||d; 341 342 } … … 424 425 static std::list<std::pair<std::pair<int, int>, MPI_Comm * > > tag_list; 425 426 426 static std::map<std::pair<int, int>, MPI_Comm 427 static std::map<std::pair<int, int>, MPI_Comm > fc_comm_map; 427 428 // <MPI_Fint,thread_num> EP_Comm 428 429 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_wait.cpp
r1053 r1067 68 68 status->ep_datatype = request->ep_datatype; 69 69 70 int count;71 MPI_Get_count(status, request->ep_datatype, &count);70 //int count; 71 //MPI_Get_count(status, request->ep_datatype, &count); 72 72 //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 73 73 } … … 104 104 if(array_of_requests[i].type == 3) 105 105 { 106 int check_count;107 MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &check_count);106 //int check_count; 107 //MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &check_count); 108 108 //check_sum_recv(array_of_requests[i].buf, count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 109 109 } -
XIOS/dev/branch_yushan/src/client.cpp
r1063 r1067 51 51 int myColor ; 52 52 int i,c ; 53 MPI_Comm newComm ;53 //MPI_Comm newComm ; 54 54 55 55 MPI_Comm_size(CXios::globalComm,&size) ; … … 187 187 MPI_Send(buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 188 188 delete [] buff ; 189 190 //printf("====== Client: begin context_init \n"); 191 189 192 190 193 191 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 194 192 info(10)<<"Register new Context : "<<id<<endl ; 195 196 //cout<<"Register new Context : "<<id<<endl ; 197 198 199 // MPI_Comm inter ; 200 // MPI_Intercomm_merge(contextInterComm,0,&inter) ; 201 // MPI_Barrier(inter) ; 193 194 195 MPI_Comm inter ; 196 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 197 MPI_Barrier(inter) ; 202 198 203 199 204 200 context->initClient(contextComm,contextInterComm) ; 205 201 206 //printf("====== Client: context_init OK\n");207 202 208 203 contextInterComms.push_back(contextInterComm); 209 //MPI_Comm_free(&inter);204 MPI_Comm_free(&inter); 210 205 } 211 206 else -
XIOS/dev/branch_yushan/src/context_server.cpp
r1063 r1067 80 80 { 81 81 traceOff(); 82 MPI_Iprobe(rank,20,interComm,&flag,&status);82 ep_lib::MPI_Iprobe(rank,20,interComm,&flag,&status); 83 83 traceOn(); 84 84 if (flag==true) … … 89 89 { 90 90 StdSize buffSize = 0; 91 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status);91 ep_lib::MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 92 92 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 93 93 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 97 97 { 98 98 99 MPI_Get_count(&status,MPI_CHAR,&count);99 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count); 100 100 if (it->second->isBufferFree(count)) 101 101 { … … 121 121 ep_lib::MPI_Status status; 122 122 123 //printf("enter checkPendingRequest\n");124 123 if(!pendingRequest.empty()) 125 124 for(it=pendingRequest.begin();it!=pendingRequest.end();++it) … … 127 126 rank=it->first; 128 127 traceOff(); 129 MPI_Test(& it->second, &flag, &status);128 ep_lib::MPI_Test(& it->second, &flag, &status); 130 129 traceOn(); 131 130 if (flag==true) 132 131 { 133 132 recvRequest.push_back(rank); 134 MPI_Get_count(&status,MPI_CHAR,&count);133 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count); 135 134 processRequest(rank,bufferRequest[rank],count); 136 135 } … … 149 148 150 149 CBufferIn buffer(buff,count); 151 char* startBuffer,endBuffer; 152 int size, offset; 150 //char* startBuffer,endBuffer; 151 int size; 152 //int offset; 153 153 size_t timeLine; 154 154 map<size_t,CEventServer*>::iterator it; -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1060 r1067 1 1 /* ************************************************************************** * 2 * Copyright ©IPSL/LSCE, xios, Avril 2010 - Octobre 2011 *2 * Copyright IPSL/LSCE, xios, Avril 2010 - Octobre 2011 * 3 3 * ************************************************************************** */ 4 4 … … 29 29 extern "C" 30 30 { 31 // /////////////////////////////// D éfinitions ////////////////////////////// //32 33 // ----------------------- Red éfinition de types ----------------------------31 // /////////////////////////////// Dfinitions ////////////////////////////// // 32 33 // ----------------------- Redfinition de types ---------------------------- 34 34 35 35 typedef enum { NETCDF4 = 0 } XFileType; … … 37 37 typedef xios::CContext* XContextPtr; 38 38 39 // -------------------- Traitement des donn ées ------------------------------39 // -------------------- Traitement des donnes ------------------------------ 40 40 41 41 // This function is not exported to the public Fortran interface, … … 88 88 CTimer::get("XIOS init context").resume(); 89 89 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 90 91 //ep_lib::MPI_Comm ctx_comm; 92 //ep_lib::MPI_Comm_dup(comm, &ctx_comm); 90 93 91 CClient::registerContext(str, 94 CClient::registerContext(str,comm); 92 95 93 //printf("icdata.cpp: client register context OK\n");96 printf("icdata.cpp: client register context %s : %p\n", context_id, &comm); 94 97 95 98 CTimer::get("XIOS init context").suspend(); … … 384 387 385 388 386 // ---------------------- Ecriture des donn ées ------------------------------389 // ---------------------- Ecriture des donnes ------------------------------ 387 390 388 391 void cxios_write_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) … … 719 722 } 720 723 721 // ---------------------- Lecture des donn ées ------------------------------724 // ---------------------- Lecture des donnes ------------------------------ 722 725 723 726 void cxios_read_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) -
XIOS/dev/branch_yushan/src/node/context.cpp
r1063 r1067 261 261 comms.push_back(interCommServer); 262 262 263 printf("comm_dup OK\n");264 263 } 265 264 server = new CContextServer(this,intraCommServer,interCommServer); -
XIOS/dev/branch_yushan/src/server.cpp
r1063 r1067 222 222 { 223 223 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 224 printf(" CServer : Receive finalize sign from client 0\n");225 224 info(20)<<" CServer : Receive client finalize"<<endl ; 226 225 … … 309 308 MPI_Get_count(&status,MPI_CHAR,&count) ; 310 309 recvContextMessage(buffer,count) ; 311 printf("listerContext register context OK, interComm size = %lu\n", interComm.size());312 310 313 311 delete [] buffer ; … … 353 351 MPI_Waitall(size-1,requests,status) ; 354 352 registerContext(buff,count,it->second.leaderRank) ; 355 printf("recvContextMessage register context OK\n");356 353 357 354 recvContextId.erase(it) ; … … 395 392 MPI_Get_count(&status,MPI_CHAR,&count) ; 396 393 registerContext(buffer,count) ; 397 printf("listenRootContext register context OK, interComm size = %lu\n", interComm.size());398 394 delete [] buffer ; 399 395 recept=false ; … … 418 414 MPI_Intercomm_create(intraComm,0,CXios::globalComm,leaderRank,10+leaderRank,&contextIntercomm); 419 415 420 // MPI_Comm inter; 421 // MPI_Intercomm_merge(contextIntercomm,1,&inter); 422 // MPI_Barrier(inter); 416 MPI_Comm inter; 417 MPI_Intercomm_merge(contextIntercomm,1,&inter); 418 MPI_Barrier(inter); 419 420 info(20) << "CServer : MPI_Intercomm_merge and MPI_Barrier " << contextId << endl; 423 421 424 422 … … 432 430 433 431 434 //MPI_Comm_free(&inter);435 436 //printf(" **** server: register context OK\n");432 MPI_Comm_free(&inter); 433 434 info(20) << "CServer : Register new Context OKOK " << contextId << endl; 437 435 } 438 436 -
XIOS/dev/branch_yushan/src/test/test_client.f90
r1057 r1067 36 36 CALL MPI_INIT(ierr) 37 37 CALL init_wait 38 39 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 40 if(rank < 2) then 38 41 39 42 CALL xios_initialize(id,return_comm=comm) … … 162 165 CALL xios_finalize() 163 166 167 else 168 169 CALL xios_init_server 170 171 endif 172 173 164 174 CALL MPI_FINALIZE(ierr) 165 175 -
XIOS/dev/branch_yushan/src/test/test_complete.f90
r1058 r1067 10 10 11 11 CHARACTER(len=*),PARAMETER :: id="client" 12 INTEGER :: comm 12 INTEGER :: comm, comm2 13 13 TYPE(xios_duration) :: dtime 14 14 TYPE(xios_context) :: ctx_hdl … … 48 48 !########################################################################### 49 49 50 !!! Initialisation des coordonn ées globales et locales pour la grille réguliÚre50 !!! Initialisation des coordonnes globales et locales pour la grille rgulire 51 51 52 52 DO j=1,nj_glo … … 80 80 81 81 CALL xios_context_initialize("atmosphere",comm) 82 print*, "init context atmosphere comm = ", comm 83 82 84 CALL xios_get_handle("atmosphere",ctx_hdl) 83 85 CALL xios_set_current_context(ctx_hdl) … … 103 105 CALL xios_set_fieldgroup_attr("field_definition",enabled=.TRUE.) 104 106 105 !!! Cr éation d un nouveau champ107 !!! Cration d un nouveau champ 106 108 107 109 CALL xios_get_handle("field_definition",fieldgroup_hdl) … … 123 125 CALL xios_set_timestep(timestep=dtime) 124 126 125 !!! Recupration des valeurs des longitudes et de taille des domaines locaux (pour test de fonctionnalit é)127 !!! Recupration des valeurs des longitudes et de taille des domaines locaux (pour test de fonctionnalit) 126 128 127 129 ni=0 ; lonvalue(:,:)=0 … … 162 164 !########################################################################### 163 165 164 !!! Initialisation des coordonn ées globales et locales pour la grille indexee (1 point sur 2)166 !!! Initialisation des coordonnes globales et locales pour la grille indexee (1 point sur 2) 165 167 166 168 nb_pt=ni*nj/2 … … 172 174 173 175 CALL xios_context_initialize("surface",comm) 176 print*, "init context surface comm = ", comm 177 174 178 CALL xios_get_handle("surface",ctx_hdl) 175 179 CALL xios_set_current_context(ctx_hdl) … … 185 189 CALL xios_set_domain_attr("domain_srf",lonvalue_2D=lon,latvalue_2D=lat) 186 190 187 !!! Cr éation d un nouveau champ191 !!! Cration d un nouveau champ 188 192 189 193 CALL xios_get_handle("field_definition",fieldgroup_hdl) … … 205 209 CALL xios_set_timestep(timestep=dtime) 206 210 207 !!! Recupration des valeurs des longitudes et de taille des domaines locaux (pour test de fonctionnalit é)211 !!! Recupration des valeurs des longitudes et de taille des domaines locaux (pour test de fonctionnalit) 208 212 209 213 ni=0 ; lonvalue(:,:)=0 … … 217 221 CALL xios_close_context_definition() 218 222 219 223 print *, "xios_close_context_definition(surface)" 220 224 221 225 … … 224 228 !#################################################################################### 225 229 226 !DO ts=1,24*10227 DO ts=1,24230 DO ts=1,24*10 231 !DO ts=1,24 228 232 229 233 CALL xios_get_handle("atmosphere",ctx_hdl) … … 264 268 !!! Fin des contextes 265 269 266 print *, "start : xios_context_finalize(surface)"270 !print *, "start : xios_context_finalize(surface)" 267 271 268 272 CALL xios_get_handle("surface",ctx_hdl) 269 print *, "xios_get_handleOK"273 !print *, "xios_get_handle (surface) OK" 270 274 CALL xios_set_current_context(ctx_hdl) 271 print *, "xios_set_current_contextOK"275 !print *, "xios_set_current_context (surface) OK" 272 276 CALL xios_context_finalize() 273 277 274 278 print *, "xios_context_finalize(surface)" 275 279 276 CALL xios_get_handle("atmosphere",ctx_hdl) 277 CALL xios_set_current_context(ctx_hdl) 278 CALL xios_context_finalize() 280 CALL xios_get_handle("atmosphere",ctx_hdl) 281 ! !print *, "xios_get_handle (atmosphere) OK" 282 CALL xios_set_current_context(ctx_hdl) 283 ! !print *, "xios_set_current_context (atmosphere) OK" 284 CALL xios_context_finalize() 279 285 280 286 print *, "xios_context_finalize(atmosphere)" 281 287 282 288 DEALLOCATE(lon, lat, field_A_atm, lonvalue) 283 DEALLOCATE(kindex, field_A_srf)289 ! DEALLOCATE(kindex, field_A_srf) 284 290 285 291 !!! Fin de XIOS
Note: See TracChangeset
for help on using the changeset viewer.