Ignore:
Timestamp:
01/22/19 16:15:03 (5 years ago)
Author:
yushan
Message:

dev on ADA

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/extern/remap/src/parallel_tree.cpp

    r923 r1638  
    115115 
    116116//CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 
    117 CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 
     117CParallelTree::CParallelTree(ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 
    118118{ 
    119119        treeCascade.reserve(cascade.num_levels); 
     
    151151 
    152152        int nrecv; // global number of samples  THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 
    153         MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 
     153        ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 
    154154        double ratio = blocSize / (1.0 * nrecv); 
    155155        int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size 
     
    157157 
    158158        int *counts = new int[comm.size]; 
    159         MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm); 
     159        ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm); 
    160160 
    161161        nrecv = 0; 
     
    183183        /* each process needs the sample elements from all processes */ 
    184184        double *recvBuffer = new double[nrecv*4]; 
    185         MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm); 
     185        ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm); 
    186186        delete[] sendBuffer; 
    187187        delete[] counts; 
     
    241241         << "   node size : "<<node.size()<<"   bloc size : "<<blocSize<<"  total number of leaf : "<<tree.leafs.size()<<endl ; 
    242242/* 
    243         MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator); 
     243        MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator); 
    244244        if (!allok) { 
    245245                MPI_Finalize(); 
     
    247247        } 
    248248*/ 
    249     MPI_Abort(MPI_COMM_WORLD,-1) ; 
     249    ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ; 
    250250  } 
    251251/* 
     
    265265{ 
    266266        CMPIRouting MPIRoute(communicator); 
    267         MPI_Barrier(communicator); 
     267        ep_lib::MPI_Barrier(communicator); 
    268268        CTimer::get("buildLocalTree(initRoute)").resume(); 
    269269        MPIRoute.init(route); 
     
    290290 
    291291        int mpiRank; 
    292         MPI_Comm_rank(communicator, &mpiRank); 
     292        ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
    293293        localTree.leafs.reserve(nbLocalElements); 
    294294        for (int i = 0; i < nbLocalElements; i++) 
     
    316316  nb1=node.size() ; nb2=node2.size() ; 
    317317  nb=nb1+nb2 ; 
    318   MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ; 
     318  ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ; 
    319319  int commSize ; 
    320   MPI_Comm_size(communicator,&commSize) ; 
     320  ep_lib::MPI_Comm_size(communicator,&commSize) ; 
    321321   
    322322        // make multiple of two 
     
    501501        // gather circles on this level of the cascade 
    502502        int pg_size; 
    503         MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 
     503        ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 
    504504        vector<Coord> allRootCentres(pg_size); 
    505505        vector<double> allRootRadia(pg_size); 
    506         MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm); 
    507         MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0],   1, MPI_DOUBLE, cascade.level[level].pg_comm); 
     506        ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm); 
     507        ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0],   1, EP_DOUBLE, cascade.level[level].pg_comm); 
    508508 
    509509        // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root 
Note: See TracChangeset for help on using the changeset viewer.