- Timestamp:
- 03/09/17 18:31:07 (7 years ago)
- Location:
- XIOS/dev/branch_yushan/src
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/src/buffer_client.cpp
r1037 r1070 81 81 traceOn(); 82 82 if (flag == true) pending = false; 83 84 //printf("\ncheckbuffer, flag = %d, pending = %d; request = %p, count = %d\n", flag, pending, &request, count);85 83 } 86 84 -
XIOS/dev/branch_yushan/src/client.cpp
r1068 r1070 51 51 int myColor ; 52 52 int i,c ; 53 //MPI_Comm newComm ;54 53 55 54 MPI_Comm_size(CXios::globalComm,&size) ; … … 229 228 230 229 MPI_Comm_rank(intraComm,&rank) ; 231 232 //printf("CClient::finalize called isServer = %d\n", CXios::isServer); 233 230 234 231 if (!CXios::isServer) 235 232 { … … 238 235 { 239 236 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 240 //printf(" CClient : send finalize sign to server 0\n");241 237 } 242 238 } … … 254 250 { 255 251 if (CXios::usingOasis) oasis_finalize(); 256 else {MPI_Finalize() ; printf("CClient::finalize called MPI_finalize\n");}252 else MPI_Finalize(); 257 253 } 258 254 -
XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp
r1053 r1070 107 107 int level) 108 108 { 109 int clientRank;110 MPI_Comm_rank(commLevel,&clientRank);111 109 int groupRankBegin = this->getGroupBegin()[level]; 112 110 int nbClient = this->getNbInGroup()[level]; … … 200 198 201 199 std::vector<ep_lib::MPI_Status> status(request.size()); 202 203 //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size());204 205 200 MPI_Waitall(request.size(), &request[0], &status[0]); 206 201 207 208 //printf(" 1(%d): calling wait all for %lu requests OK\n", clientRank, request.size());209 202 210 203 CArray<size_t,1>* tmpGlobalIndex; … … 311 304 312 305 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 313 //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size());314 306 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 315 307 316 //printf(" 2(%d): calling wait all for %lu requests OK\n", clientRank, requestOnReturn.size());317 308 318 309 Index2VectorInfoTypeMap indexToInfoMapping; … … 383 374 int level) 384 375 { 385 //printf("in computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, const MPI_Comm& commLevel, int level)\n");386 376 int clientRank; 387 377 MPI_Comm_rank(commLevel,&clientRank); … … 433 423 { 434 424 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 435 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]);436 425 ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 437 426 ++sendNbIndexBuff[indexClient]; 438 427 } 439 428 } 440 441 //printf("check 4 OK. clientRank = %d\n", clientRank);442 429 443 430 // Calculate from how many clients each client receive message. … … 446 433 sendRecvRank(level, sendBuff, sendNbIndexBuff, 447 434 recvRankClient, recvNbIndexClientCount); 448 //printf("sendRecvRank OK\n");449 435 450 436 int recvNbIndexCount = 0; … … 459 445 recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 460 446 } 461 462 //printf("check 5 OK. clientRank = %d\n", clientRank);463 447 464 448 // If a client holds information about index and the corresponding which don't belong to it, … … 483 467 } 484 468 485 //printf("check 6 OK. clientRank = %d\n", clientRank);486 487 469 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 488 470 iteIndex = client2ClientIndex.end(); … … 493 475 } 494 476 495 //printf("check 7 OK. clientRank = %d\n", clientRank);496 497 477 boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 498 478 iteInfo = client2ClientInfo.end(); … … 503 483 } 504 484 505 //printf("check 8 OK. clientRank = %d\n", clientRank);506 485 std::vector<ep_lib::MPI_Status> status(request.size()); 507 486 … … 528 507 } 529 508 530 //printf("check 9 OK. clientRank = %d\n", clientRank);531 532 509 if (0 != recvNbIndexCount) 533 510 { … … 543 520 delete [] it->second; 544 521 545 //printf("check 10 OK. clientRank = %d\n", clientRank);546 522 // Ok, now do something recursive 547 523 if (0 < level) … … 608 584 ep_lib::MPI_Request request; 609 585 requestSendInfo.push_back(request); 610 //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) );611 586 MPI_Isend(info, infoSize, MPI_CHAR, 612 587 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); … … 717 692 } 718 693 719 int clientRank; 720 MPI_Comm_rank(this->internalComm_,&clientRank); 721 //printf("4(%d): calling wait all for %lu requests\n", clientRank, sendNbRank.size()+recvNbRank.size()); 694 722 695 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 723 //printf(" 4(%d): calling wait all for %lu requests OK\n", clientRank, sendNbRank.size()+recvNbRank.size());724 696 } 725 697 … … 738 710 std::vector<int>& recvNbRank, std::vector<int>& recvNbElements) 739 711 { 740 int myRank;741 MPI_Comm_rank(MPI_COMM_WORLD, &myRank);742 //printf("myRank = %d, in sendRecvRank(int level, const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, std::vector<int>& recvNbRank, std::vector<int>& recvNbElements)\n", myRank);743 712 int groupBegin = this->getGroupBegin()[level]; 744 713 … … 757 726 for (int idx = 0; idx < recvBuffSize; ++idx) 758 727 { 759 //printf("myRank = %d starts irecv with src = %d, tag = %d, idx = %d\n", myRank, recvRank[idx], MPI_DHT_INDEX_0, idx);760 728 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 761 729 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 762 //printf("myRank = %d MPI_Irecv OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest);763 730 ++nRequest; 764 731 } 765 732 766 //printf("myRank = %d, check 1 OK\n", myRank);767 733 768 734 for (int idx = 0; idx < sendBuffSize; ++idx) … … 775 741 for (int idx = 0; idx < sendBuffSize; ++idx) 776 742 { 777 //printf("myRank = %d starts isend with dest = %d, tag = %d, idx = %d\n", myRank, sendRank[idx], MPI_DHT_INDEX_0, idx);778 743 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 779 744 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 780 //printf("myRank = %d MPI_Isend OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest);781 745 ++nRequest; 782 746 } … … 784 748 MPI_Barrier(this->internalComm_); 785 749 786 //printf("myRank = %d, check 2 OK\n", myRank); 787 788 int clientRank; 789 MPI_Comm_rank(this->internalComm_,&clientRank); 790 791 //printf("5(%d): calling wait all for %lu requests\n", myRank, sendBuffSize+recvBuffSize); 750 792 751 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 793 //printf(" 5(%d): calling wait all for %lu requests OK\n", myRank, sendBuffSize+recvBuffSize);794 //printf("check 3 OK\n");795 752 796 753 int nbRecvRank = 0, nbRecvElements = 0; … … 805 762 } 806 763 } 807 //printf("check 4 OK\n"); 808 } 809 810 } 764 } 765 766 } -
XIOS/dev/branch_yushan/src/context_client.cpp
r1060 r1070 116 116 while (parentServer->server->hasPendingEvent()) 117 117 { 118 parentServer->server->eventLoop(); //printf("parentServer->server->eventLoop()\n");118 parentServer->server->eventLoop(); 119 119 } 120 120 } -
XIOS/dev/branch_yushan/src/context_server.cpp
r1067 r1070 76 76 for(rank=0;rank<commSize;rank++) 77 77 { 78 //printf("in CContextServer::listen, rank = %d, commSize = %d, pendingRequest.find(rank) = %d\n", rank, commSize, pendingRequest.find(rank));79 78 if (pendingRequest.find(rank)==pendingRequest.end()) 80 79 { … … 92 91 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 93 92 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; 94 //printf("find message, is buffer end, receiving, buffSize = %d, rank = %d, commSize = %d\n", buffSize, rank, commSize);95 93 } 96 94 else … … 103 101 ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 104 102 bufferRequest[rank]=addr; 105 //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize);106 103 } 107 104 } … … 148 145 149 146 CBufferIn buffer(buff,count); 150 //char* startBuffer,endBuffer;151 147 int size; 152 //int offset;153 148 size_t timeLine; 154 149 map<size_t,CEventServer*>::iterator it; -
XIOS/dev/branch_yushan/src/cxios.cpp
r1069 r1070 86 86 87 87 globalComm = passage[omp_get_thread_num()]; 88 89 // int tmp_size;90 // MPI_Comm_size(globalComm, &tmp_size);91 // if(isClient) printf("Client : globalcomm size = %d\n", tmp_size);92 // if(isServer) printf("Server : globalcomm size = %d\n", tmp_size);93 94 88 95 89 } -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1069 r1070 95 95 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 96 96 97 98 97 CClient::registerContext(str,comm); 99 100 //printf("icdata.cpp: client register context %s : %p\n", context_id, &comm); 101 98 102 99 CTimer::get("XIOS init context").suspend(); 103 100 CTimer::get("XIOS").suspend(); … … 289 286 { 290 287 CVariable::get(context->getId(), varIdStr)->setData<double>(data); 291 //CVariable::get(context->getId(), varIdStr)->sendValue();292 288 } 293 289 … … 310 306 { 311 307 CVariable::get(context->getId(), varIdStr)->setData<float>(data); 312 //CVariable::get(context->getId(), varIdStr)->sendValue();313 308 } 314 309 … … 331 326 { 332 327 CVariable::get(context->getId(), varIdStr)->setData<int>(data); 333 //CVariable::get(context->getId(), varIdStr)->sendValue();334 328 } 335 329 … … 353 347 { 354 348 CVariable::get(context->getId(), varIdStr)->setData<bool>(data); 355 //CVariable::get(context->getId(), varIdStr)->sendValue();356 349 } 357 350 … … 379 372 { 380 373 CVariable::get(context->getId(), varIdStr)->setData<string>(dataStr); 381 //CVariable::get(context->getId(), varIdStr)->sendValue();382 374 } 383 375 -
XIOS/dev/branch_yushan/src/io/onetcdf4.cpp
r1069 r1070 47 47 wmpi = comm && !multifile; 48 48 49 //ep_lib::MPI_Info info_null;50 49 51 50 if (wmpi) … … 57 56 if (wmpi) 58 57 { 59 // printf("start creating file with createPar\n");60 //CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), static_cast<MPI_Info>(info_null.mpi_info), this->ncidp);61 58 CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 62 // printf("creating file with createPar\n");63 59 } 64 60 else 65 61 { 66 62 CNetCdfInterface::create(filename, mode, this->ncidp); 67 // printf("creating file with create\n");68 63 } 69 64 … … 76 71 if (wmpi) 77 72 { 78 // printf("start opening file with openPar\n");79 // CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), static_cast<MPI_Info>(info_null.mpi_info), this->ncidp);80 73 CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 81 // printf("opening file with openPar\n");82 74 } 83 75 else 84 76 { 85 77 CNetCdfInterface::open(filename, mode, this->ncidp); 86 // printf("opening file with open\n");87 78 } 88 79 -
XIOS/dev/branch_yushan/src/node/context.cpp
r1067 r1070 390 390 void CContext::closeDefinition(void) 391 391 { 392 int myRank;393 MPI_Comm_rank(MPI_COMM_WORLD, &myRank);394 395 //printf("myRank = %d, hasClient = %d, hasServer = %d\n", myRank, hasClient, hasServer);396 392 397 393 // There is nothing client need to send to server … … 400 396 // After xml is parsed, there are some more works with post processing 401 397 postProcessing(); 402 //printf("myRank = %d, postProcessing OK\n", myRank); 403 } 404 setClientServerBuffer(); //printf("myRank = %d, setClientServerBuffer OK\n", myRank); 405 406 //printf("hasClient = %d, hasServer = %d\n", hasClient, hasServer); 398 } 399 setClientServerBuffer(); 407 400 408 401 if (hasClient && !hasServer) 409 402 { 410 403 // Send all attributes of current context to server 411 this->sendAllAttributesToServer(); //printf("myRank = %d, this->sendAllAttributesToServer OK\n", myRank);404 this->sendAllAttributesToServer(); 412 405 413 406 // Send all attributes of current calendar 414 407 CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer(); 415 //printf("myRank = %d, CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer OK\n", myRank);416 408 417 409 // We have enough information to send to server 418 410 // First of all, send all enabled files 419 sendEnabledFiles(); //printf("myRank = %d, sendEnabledFiles OK\n", myRank);411 sendEnabledFiles(); 420 412 421 413 // Then, send all enabled fields 422 sendEnabledFields(); //printf("myRank = %d, sendEnabledFields OK\n", myRank);414 sendEnabledFields(); 423 415 424 416 // At last, we have all info of domain and axis, then send them 425 sendRefDomainsAxis(); //printf("myRank = %d, sendRefDomainsAxis OK\n", myRank); 426 417 sendRefDomainsAxis(); 427 418 // After that, send all grid (if any) 428 sendRefGrid(); //printf("myRank = %d, sendRefGrid OK\n", myRank);419 sendRefGrid(); 429 420 } 430 421 … … 435 426 if (hasClient) 436 427 { 437 this->buildFilterGraphOfEnabledFields(); //printf("myRank = %d, buildFilterGraphOfEnabledFields OK\n", myRank);438 buildFilterGraphOfFieldsWithReadAccess(); //printf("myRank = %d, buildFilterGraphOfFieldsWithReadAccess OK\n", myRank);439 this->solveAllRefOfEnabledFields(true); //printf("myRank = %d, solveAllRefOfEnabledFields OK\n", myRank);428 this->buildFilterGraphOfEnabledFields(); 429 buildFilterGraphOfFieldsWithReadAccess(); 430 this->solveAllRefOfEnabledFields(true); 440 431 } 441 432 … … 448 439 if (hasClient) 449 440 { 450 sendCreateFileHeader(); //printf("myRank = %d, sendCreateFileHeader OK\n", myRank);451 452 startPrefetchingOfEnabledReadModeFiles(); //printf("myRank = %d, startPrefetchingOfEnabledReadModeFiles OK\n", myRank);441 sendCreateFileHeader(); 442 443 startPrefetchingOfEnabledReadModeFiles(); 453 444 } 454 445 } -
XIOS/dev/branch_yushan/src/node/domain.cpp
r1053 r1070 1303 1303 void CDomain::checkAttributesOnClientAfterTransformation() 1304 1304 { 1305 int myRank; 1306 MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 1307 1308 CContext* context=CContext::getCurrent() ; //printf("myRank = %d, CContext::getCurrent OK\n", myRank); 1309 1310 //printf("myRank = %d, this->isClientAfterTransformationChecked = %d\n", myRank, this->isClientAfterTransformationChecked); 1305 1306 CContext* context=CContext::getCurrent() ; 1307 1311 1308 if (this->isClientAfterTransformationChecked) return; 1312 //printf("myRank = %d, context->hasClient = %d\n", myRank, context->hasClient);1313 1309 if (context->hasClient) 1314 1310 { 1315 this->checkMask(); //printf("myRank = %d, this->checkMask OK\n", myRank); 1316 //printf("myRank = %d, hasLonLat = %d, hasArea = %d, isCompressible_ = %d\n", myRank, hasLonLat, hasArea, isCompressible_); 1311 this->checkMask(); 1317 1312 if (hasLonLat || hasArea || isCompressible_) 1318 1313 { 1319 //printf("myRank = %d, start this->computeConnectedServer\n", myRank);1320 1314 this->computeConnectedServer(); 1321 //printf("myRank = %d, this->computeConnectedServer OK\n", myRank); 1322 } 1323 //printf("myRank = %d, hasLonLat = %d\n", myRank, hasLonLat); 1315 } 1324 1316 if (hasLonLat) 1325 1317 { 1326 1318 this->completeLonLatClient(); 1327 //printf("myRank = %d, this->completeLonLatClient OK\n", myRank);1328 1319 } 1329 1320 } … … 1460 1451 void CDomain::computeConnectedServer(void) 1461 1452 { 1462 int myRank;1463 MPI_Comm_rank(MPI_COMM_WORLD, &myRank);1464 1453 1465 1454 CContext* context=CContext::getCurrent() ; … … 1576 1565 else serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 1); 1577 1566 1578 //printf("myRank = %d, check 7 OK\n", myRank);1579 1580 1567 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 1581 1568 client->intraComm); 1582 //printf("myRank = %d new OK\n", myRank);1583 1584 1569 clientServerMap->computeServerIndexMapping(globalIndexDomain); 1585 //printf("myRank = %d, clientServerMap->computeServerIndexMapping(globalIndexDomain) OK\n", myRank);1586 1570 1587 1571 const CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 1588 //printf("myRank = %d, clientServerMap->getGlobalIndexOnServer OK\n", myRank);1589 1572 1590 //printf("myRank = %d, check 8 OK\n", myRank);1591 1592 1573 CClientServerMapping::GlobalIndexMap::const_iterator it = globalIndexDomainOnServer.begin(), 1593 1574 ite = globalIndexDomainOnServer.end(); … … 1622 1603 } 1623 1604 1624 //printf("myRank = %d, check 9 OK\n", myRank);1625 1626 1605 connectedServerRank_.clear(); 1627 1606 for (it = globalIndexDomainOnServer.begin(); it != ite; ++it) { … … 1632 1611 1633 1612 delete clientServerMap; 1634 //printf("myRank = %d, check 10 OK\n", myRank);1635 1613 } 1636 1614 -
XIOS/dev/branch_yushan/src/node/field.cpp
r1037 r1070 621 621 void CField::solveAllReferenceEnabledField(bool doSending2Server) 622 622 { 623 int myRank; 624 MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 625 626 CContext* context = CContext::getCurrent(); //printf("my_Rank = %d, CContext* context = CContext::getCurrent OK\n", myRank); 627 solveOnlyReferenceEnabledField(doSending2Server); //printf("my_Rank = %d, solveOnlyReferenceEnabledField(doSending2Server) OK\n", myRank); 623 624 CContext* context = CContext::getCurrent(); 625 solveOnlyReferenceEnabledField(doSending2Server); 628 626 629 627 if (!areAllReferenceSolved) … … 633 631 if (context->hasClient) 634 632 { 635 solveRefInheritance(true); //printf("my_Rank = %d, solveRefInheritance(true) OK\n", myRank);633 solveRefInheritance(true); 636 634 if (hasDirectFieldReference()) 637 635 { 638 636 getDirectFieldReference()->solveAllReferenceEnabledField(false); 639 //printf("my_Rank = %d, getDirectFieldReference()->solveAllReferenceEnabledField(false) OK\n", myRank);637 640 638 } 641 639 } … … 643 641 { 644 642 solveServerOperation(); 645 //printf("my_Rank = %d, solveServerOperation OK\n", myRank);643 646 644 } 647 645 648 solveGridReference(); //printf("my_Rank = %d, solveGridReference OK\n", myRank);649 } 650 651 solveGridDomainAxisRef(doSending2Server); //printf("my_Rank = %d, solveGridDomainAxisRef(doSending2Server) OK\n", myRank);646 solveGridReference(); 647 } 648 649 solveGridDomainAxisRef(doSending2Server); 652 650 653 651 if (context->hasClient) 654 652 { 655 solveTransformedGrid(); //printf("my_Rank = %d, solveTransformedGrid OK\n", myRank);656 } 657 658 solveCheckMaskIndex(doSending2Server); //printf("FIELD.CPP: my_Rank = %d, solveCheckMaskIndex(doSending2Server) OK\n", myRank);653 solveTransformedGrid(); 654 } 655 656 solveCheckMaskIndex(doSending2Server); 659 657 } 660 658 -
XIOS/dev/branch_yushan/src/node/grid.cpp
r1037 r1070 166 166 void CGrid::checkAttributesAfterTransformation() 167 167 { 168 int myRank; 169 MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 170 171 setAxisList(); //printf("myRank = %d, setAxisList OK\n", myRank); 172 std::vector<CAxis*> axisListP = this->getAxis(); //printf("myRank = %d, this->getAxis OK\n", myRank); 168 169 setAxisList(); 170 std::vector<CAxis*> axisListP = this->getAxis(); 173 171 if (!axisListP.empty()) 174 172 { … … 183 181 axisPositionInGrid_.push_back(idx); 184 182 ++idx; 185 //printf("myRank = %d, axisPositionInGrid_.push_back OK\n", myRank);183 186 184 } 187 185 else if (2 == elementDimension) idx += 2; … … 191 189 { 192 190 axisListP[i]->checkAttributesOnClientAfterTransformation(globalDim_,axisPositionInGrid_[i]); 193 //printf("myRank = %d, axisListP[%d/%d]->checkAttributesOnClientAfterTransformation OK\n", myRank, i, axisListP.size()); 194 } 195 } 196 197 setDomainList(); //printf("myRank = %d, setDomainList OK\n", myRank); 198 std::vector<CDomain*> domListP = this->getDomains(); //printf("myRank = %d, this->getDomains OK\n", myRank); 191 } 192 } 193 194 setDomainList(); 195 std::vector<CDomain*> domListP = this->getDomains(); 199 196 if (!domListP.empty()) 200 197 { 201 198 for (int i = 0; i < domListP.size(); ++i) 202 199 { 203 //printf("myRank = %d, start domListP[%d]->checkAttributesOnClientAfterTransformation\n", myRank, i);204 200 domListP[i]->checkAttributesOnClientAfterTransformation(); 205 //printf("myRank = %d, domListP[%d]->checkAttributesOnClientAfterTransformation OK\n", myRank, i);206 201 } 207 202 } -
XIOS/dev/branch_yushan/src/server.cpp
r1069 r1070 165 165 { 166 166 if (CXios::usingOasis) oasis_finalize(); 167 //else {MPI_Finalize() ; printf("CServer::finalize called MPI_finalize\n");}167 //else {MPI_Finalize() ;} 168 168 } 169 169 -
XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp
r1053 r1070 531 531 532 532 status.resize(requests.size()); 533 //printf("(%d) src/transformation/grid_transformation 1st waitall\n", clientRank);534 533 MPI_Waitall(requests.size(), &requests[0], &status[0]); 535 //printf("(%d) src/transformation/grid_transformation 1st waitall OK\n", clientRank);536 534 537 535 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return … … 585 583 } 586 584 status.resize(requests.size()); 587 //printf("(%d) src/transformation/grid_transformation 2nd waitall\n", clientRank);588 585 MPI_Waitall(requests.size(), &requests[0], &status[0]); 589 //printf("(%d) src/transformation/grid_transformation 2nd waitall OK\n", clientRank);590 586 591 587 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.