AMROC Main     Blockstructured Adaptive Mesh Refinement in object-oriented C++


Main Page   Class Hierarchy   Compound List   File List  

GridHierarchyComm.C

Go to the documentation of this file.
00001 
00006 #include "GridHierarchy.h"
00007 
00008 #ifndef OnlyProc
00009 #define OnlyProc (n)    if (comm_service::proc_me() == (n)) {
00010 #endif
00011 
00012 #ifndef EndOnlyProc
00013 #define EndOnlyProc     }
00014 #endif
00015 
00016 int GridHierarchy::DAGH_CommInit(MPI_Comm c)
00017   {
00018     //if ( 0 == c ) c = MPI_COMM_WORLD ;
00019     int R = comm_service::init(c);
00020     return R ;
00021   }
00022 
00023 void GridHierarchy::DAGH_CommKill(void)
00024   {
00025 #ifdef DAGH_NO_MPI
00026 #else 
00027    /*$if (comm_service::dec() && comm_service::proc_world() > 1) 
00028      MPI_Barrier(comm_service::comm_world());$*/
00029 #endif
00030    comm_service::kill(); 
00031   }
00032 
00033 void GridHierarchy::DAGH_IOEnd(void)
00034   {
00035 #ifdef DAGH_NO_MPI
00036 #else
00037    if (comm_service::io_enabled() &&
00038        comm_service::proc_world() > 1 && 
00039        comm_service::proc_me() != comm_service::proc_io()) {
00040      // comm_service::barrier(comm_service_comp);
00041  
00042      const int dest = comm_service::proc_io();
00043      const int me = comm_service::proc_me();
00044      const MPI_Comm &comm = comm_service::comm_io();
00045      MPI_Request req ;
00046 
00047      /* End IO Recvs... */
00048      int size = DAGHNull;
00049      int R = MPI_Isend(&size, 1 , MPI_INT , dest,
00050                        (DAGHIOTag|DAGHIOWriteReqTag), 
00051                        comm , &req );
00052      if ( MPI_SUCCESS != R )
00053        comm_service::error_die("GridHierarchy::DAGH_IOEnd","MPI_Isend",R);
00054      
00055      R = comm_service::serve( req );
00056      if ( MPI_SUCCESS != R )
00057        comm_service::error_die( "GridHirarchy::DAGH_IOEnd" , 
00058                                   "comm_service::serve" , R );
00059 
00060      /* End IO Sends... */
00061      struct gdhdr reqhdr;
00062      reqhdr.type = DAGHNull; reqhdr.owner = me;
00063 
00064      R = MPI_Isend((void *)&reqhdr, sizeof(struct gdhdr), MPI_BYTE, dest,
00065                        (DAGHIOTag|DAGHIOReadReqTag), comm, &req);
00066      if ( MPI_SUCCESS != R )
00067        comm_service::error_die( "GridHierarchy::DAGH_IOEnd" , "MPI_Isend" , R );
00068 
00069      R = comm_service::serve( req );
00070      if ( MPI_SUCCESS != R )
00071        comm_service::error_die( "GridHirarchy::DAGH_IOEnd" , 
00072                                   "comm_service::serve" , R );
00073 
00074      /* Close down IO Close Ping Server */
00075      DAGH_PingIONode(DAGHIOTag|DAGHIOCloseReqTag,DAGHFalse);
00076 
00077      /* End IO */
00078      DAGH_PingIONode(DAGHIOTag|DAGHIOEndReqTag,DAGHFalse);
00079      comm_service::reset_io_enable();
00080 
00081      /* And a barrier.. */
00082      //comm_service::barrier(comm_service_world);
00083    }
00084 #endif
00085   }
00086 
00087 void GridHierarchy::DAGH_PingIONode(const int tag, int flg)
00088   {
00089 #ifdef DAGH_NO_MPI
00090 #else
00091    if (!comm_service::dce() || 
00092        !comm_service::io_enabled() ||
00093        comm_service::proc_me() == comm_service::proc_io()) return;
00094 
00095    int ionode = comm_service::proc_io();
00096    MPI_Request req ;
00097 
00098 #ifdef DEBUG_PRINT_COMM_IO
00099    ( comm_service::log() << "GridHierarchy::DAGH_PingIONode "
00100                          << comm_service::proc_me() << " "
00101                          << " MPI_Isend: "
00102                          << "Tag: " << tag << " "
00103                          << "Size: " << 1 << " "
00104                          << "Flag: " << flg << " "
00105                          << endl ).flush();
00106 #endif
00107 
00108    //int flg = DAGHTrue;
00109    int R = MPI_Isend(&flg, 1, MPI_INT, ionode, tag, 
00110                      comm_service::comm_io() , &req );
00111    if ( MPI_SUCCESS != R ) 
00112      comm_service::error_die("GridHierarchy::DAGH_PingIONode","MPI_Isend",R);
00113 
00114    R = comm_service::serve( req );
00115    if ( MPI_SUCCESS != R ) 
00116      comm_service::error_die("GridHierarchy::DAGH_PingIONode",
00117                              "comm_service::serve",R);
00118 #endif
00119   }
00120 
00121 void GridHierarchy::DAGH_GlbConcat(void *snddata, int sndsize,
00122                                    void *&rcvdata, int &rcvsize,
00123                                    MPI_Comm Cin)
00124   {
00125     if (!comm_service::dce() || comm_service::proc_world() == 1) 
00126       { rcvsize = sndsize; rcvdata = snddata; return; }
00127 
00128 #ifdef DAGH_NO_MPI
00129 #else
00130     MPI_Comm C;
00131     if (Cin == 0) C = comm_service::comm_world();
00132     else C = Cin;
00133 
00134     int num = comm_service::proc_world();
00135 
00136     int R;
00137     /* get the size of the biggest bkt */
00138     R = MPI_Allreduce(&sndsize, &rcvsize, 1, MPI_INT, MPI_MAX, C);
00139     if ( MPI_SUCCESS != R ) 
00140       comm_service::error_die("DAGH::DAGH_GlbConcat","MPI_Allreduce",R);
00141 
00142 #ifdef DEBUG_PRINT_COMM
00143    ( comm_service::log() << "DAGH::DAGH_GlbConcat " 
00144                          << comm_service::proc_me() << " "
00145                          << "MPI_Allreduce (MPI_MAX) { "
00146                          << rcvsize
00147                          << " }"
00148                          << endl ).flush();
00149 #endif
00150 
00151     /* alloc rcv buffer */
00152     if (rcvdata) delete [] (char*)rcvdata;
00153     rcvdata = (void *) new char[rcvsize*num]; 
00154 
00155     void* tmpsnddata = 0;
00156     int tmpsndsize = 0;
00157 
00158 #ifndef MPICH
00159     if (sndsize < rcvsize) {
00160       tmpsnddata = (void *) new char[rcvsize]; 
00161       memcpy (tmpsnddata,snddata,sndsize);
00162     }
00163     else { 
00164       tmpsnddata = snddata;
00165     }
00166     tmpsndsize = rcvsize;
00167 #else
00168     tmpsnddata = snddata;
00169     tmpsndsize = sndsize;
00170 #endif
00171 
00172     R = MPI_Allgather(tmpsnddata, tmpsndsize, MPI_BYTE, rcvdata, rcvsize, 
00173                       MPI_BYTE,C); 
00174     if ( MPI_SUCCESS != R ) 
00175         comm_service::error_die( "DAGH::DAGH_GlbConcat", "MPI_Allgather", R );
00176         
00177 #ifndef MPICH
00178     if (sndsize < rcvsize) {
00179       delete [] (char*) tmpsnddata;
00180     }
00181 #endif
00182 
00183 #ifdef DEBUG_PRINT_COMM
00184    ( comm_service::log() << "DAGH::DAGH_GlbConcat " 
00185                          << comm_service::proc_me() << " "
00186                          << "MPI_Allgather { "
00187                          << rcvsize
00188                          << " }"
00189                          << endl ).flush();
00190 #endif
00191 #endif
00192   }
00193 
00194 void DAGHIO_EndIOPingFunction(GridHierarchy &GH) {}
00195 
00196 
00197 


Quickstart     Users Guide     Programmers Reference     Installation      Examples     Download



AMROC Main      Home      Contact
last update: 06/01/04