Geant4  10.03.p03
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
G4VUserMPIrunMerger Class Referenceabstract

#include <G4VUserMPIrunMerger.hh>

Inheritance diagram for G4VUserMPIrunMerger:

Public Member Functions

 G4VUserMPIrunMerger ()
 
 G4VUserMPIrunMerger (const G4Run *aRun, G4int destination=G4MPImanager::kRANK_MASTER, G4int verbosity=0)
 
virtual ~G4VUserMPIrunMerger ()
 
void SetRun (G4Run *r)
 
void SetDestinationRank (G4int i)
 
void SetVerbosity (G4int ver)
 
virtual void Merge ()
 

Protected Member Functions

virtual void Pack ()=0
 
virtual G4RunUnPack ()=0
 
void InputUserData (void *input_data, const MPI::Datatype &dt, int count)
 
void OutputUserData (void *input_data, const MPI::Datatype &dt, int count)
 
void SetupOutputBuffer (char *buff, G4int size, G4int position)
 
void DestroyBuffer ()
 
G4int GetPosition () const
 
char * GetBuffer () const
 
G4int GetBufferSize () const
 
void Send (const unsigned int destination)
 
void Receive (const unsigned int source)
 

Detailed Description

Definition at line 32 of file G4VUserMPIrunMerger.hh.

Constructor & Destructor Documentation

G4VUserMPIrunMerger::G4VUserMPIrunMerger ( )
G4VUserMPIrunMerger::G4VUserMPIrunMerger ( const G4Run aRun,
G4int  destination = G4MPImanager::kRANK_MASTER,
G4int  verbosity = 0 
)

Definition at line 33 of file G4VUserMPIrunMerger.cc.

35  :
36  outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),
37  ownsBuffer(false),
38  destinationRank(destination),
39  run(const_cast<G4Run*>(aRun)),
40  commSize(0),
41  verbose(ver),
42  bytesSent(0) {}
virtual G4VUserMPIrunMerger::~G4VUserMPIrunMerger ( )
inlinevirtual

Definition at line 38 of file G4VUserMPIrunMerger.hh.

38 { if ( ownsBuffer) DestroyBuffer(); }

Here is the call graph for this function:

Member Function Documentation

void G4VUserMPIrunMerger::DestroyBuffer ( )
inlineprotected

Definition at line 63 of file G4VUserMPIrunMerger.hh.

63  {
64  delete[] outputBuffer;
65  outputBuffer = nullptr;
66  outputBufferSize=0;
67  outputBufferPosition=0;
68  ownsBuffer = false;
69  }

Here is the caller graph for this function:

char* G4VUserMPIrunMerger::GetBuffer ( ) const
inlineprotected

Definition at line 72 of file G4VUserMPIrunMerger.hh.

72 { return outputBuffer; }
G4int G4VUserMPIrunMerger::GetBufferSize ( ) const
inlineprotected

Definition at line 73 of file G4VUserMPIrunMerger.hh.

73 { return outputBufferSize; }
G4int G4VUserMPIrunMerger::GetPosition ( ) const
inlineprotected

Definition at line 71 of file G4VUserMPIrunMerger.hh.

71 { return outputBufferPosition; }
void G4VUserMPIrunMerger::InputUserData ( void input_data,
const MPI::Datatype &  dt,
int  count 
)
inlineprotected

Definition at line 49 of file G4VUserMPIrunMerger.hh.

49  {
50  input_userdata.push_back( const_registered_data{input_data,dt,count} );
51  }

Here is the caller graph for this function:

void G4VUserMPIrunMerger::Merge ( )
virtual

Definition at line 137 of file G4VUserMPIrunMerger.cc.

138 {
139  DMSG(0, "G4VUserMPIrunMerger::Merge called");
140  const unsigned int myrank = MPI::COMM_WORLD.Get_rank();
141  commSize = MPI::COMM_WORLD.Get_size();
142  if ( commSize == 1 ) {
143  DMSG(1,"Comm world size is 1, nothing to do");
144  return;
145  }
146  COMM_G4COMMAND_ = MPI::COMM_WORLD.Dup();
147  bytesSent = 0;
148  const G4double sttime = MPI::Wtime();
149 
150  //Use G4MPIutils to optimize communications between ranks
151  typedef std::function<void(unsigned int)> handler_t;
152  using std::placeholders::_1;
153  handler_t sender = std::bind(&G4VUserMPIrunMerger::Send , this , _1);
154  handler_t receiver = std::bind(&G4VUserMPIrunMerger::Receive, this, _1);
155  std::function<void(void)> barrier =
156  std::bind(&MPI::Intracomm::Barrier,&COMM_G4COMMAND_);
157  G4mpi::Merge( sender , receiver , barrier , commSize , myrank );
158 
159  //OLD Style p2p communications
160 /*
161  if ( myrank != destinationRank ) {
162  DMSG(0,"Comm world size: "<<commSize<<" this rank is: "
163  <<myrank<<" sending to rank "<<destinationRank);
164  Send(destinationRank);
165  } else {
166  DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
167  <<myrank<<" receiving. ");
168  for ( unsigned int i = 0 ; i<commSize ; ++i) {
169  if ( i != myrank ) Receive(i);
170  }
171  }
172 */
173 
174  const G4double elapsed = MPI::Wtime() - sttime;
175  long total=0;
176  COMM_G4COMMAND_.Reduce(&bytesSent,&total,1,MPI::LONG,MPI::SUM,
177  destinationRank);
178  if ( verbose > 0 && myrank == destinationRank ) {
179  //Collect from ranks how much data was sent around
180  G4cout<<"G4VUserMPIrunMerger::Merge() - data transfer performances: "
181  <<double(total)/1000./elapsed<<" kB/s"
182  <<" (Total Data Transfer= "<<double(total)/1000<<" kB in "
183  <<elapsed<<" s)."<<G4endl;
184  }
185 
186 
187 
188  COMM_G4COMMAND_.Free();
189  DMSG(0,"G4VUserMPIrunMerger::Merge done");
190 }
void Merge(std::function< void(unsigned int)> senderF, std::function< void(unsigned int)> receiverF, std::function< void(void)> barrierF, unsigned int commSize, unsigned int myrank)
Definition: G4MPIutils.cc:161
G4GLOB_DLL std::ostream G4cout
void Send(const unsigned int destination)
G4double total(Particle const *const p1, Particle const *const p2)
void Receive(const unsigned int source)
#define G4endl
Definition: G4ios.hh:61
#define DMSG(LVL, MSG)
double G4double
Definition: G4Types.hh:76

Here is the call graph for this function:

Here is the caller graph for this function:

void G4VUserMPIrunMerger::OutputUserData ( void input_data,
const MPI::Datatype &  dt,
int  count 
)
inlineprotected

Definition at line 52 of file G4VUserMPIrunMerger.hh.

52  {
53  output_userdata.push_back( registered_data{input_data,dt,count} );
54  }

Here is the caller graph for this function:

virtual void G4VUserMPIrunMerger::Pack ( )
protectedpure virtual

Implemented in G4MPIrunMerger, and RunMerger.

Here is the caller graph for this function:

void G4VUserMPIrunMerger::Receive ( const unsigned int  source)
protected

Definition at line 87 of file G4VUserMPIrunMerger.cc.

88 {
89  DMSG( 1 , "G4VUserMPIrunMerger::Receive(...) , this rank : "
90  <<MPI::COMM_WORLD.Get_rank()<<" and receiving from : "<<source);
91  //DestroyBuffer();
92  //Receive from all but one
93  //for (G4int rank = 0; rank < commSize-1; ++rank)
94  //{
95  MPI::Status status;
96  COMM_G4COMMAND_.Probe(source, G4MPImanager::kTAG_RUN, status);
97  //const G4int source = status.Get_source();
98  const G4int newbuffsize = status.Get_count(MPI::PACKED);
99  DMSG(2,"Preparing to receive buffer of size: "<<newbuffsize);
100  char* buffer = outputBuffer;
101  if ( newbuffsize > outputBufferSize ) {
102  DMSG(3,"New larger buffer expected, resize");
103  //New larger buffer incoming, recreate buffer
104  delete[] outputBuffer;
105  buffer = new char[newbuffsize];
106  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
107  //small cpu penalty, we can live with that).)
108  std::fill(buffer,buffer+newbuffsize,0);
109  ownsBuffer = true;
110  }
111  SetupOutputBuffer(buffer,newbuffsize,0);
112  COMM_G4COMMAND_.Recv(buffer, newbuffsize, MPI::PACKED,source,
113  G4MPImanager::kTAG_RUN, status);
114  DMSG(3,"Buffer Size: "<<outputBufferSize<< " bytes at: "<<(void*)outputBuffer);
115  output_userdata.clear();
116  //User code, if implemented will return the concrete G4Run class
117  G4Run* aNewRun = UnPack();
118  if ( aNewRun == nullptr ) aNewRun = new G4Run;
119  //Add number of events counter
120  G4int nevets = 0;
121  OutputUserData(&nevets,MPI::INT,1);
122  //now userdata contains all data references, do the real unpacking
123  for ( const registered_data& el : output_userdata ) {
124  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
125  el.p_data,el.count,el.dt,COMM_G4COMMAND_);
126  }
127  for ( G4int i = 0 ; i<nevets ; ++i ) aNewRun->RecordEvent(nullptr);
128 
129  //Now merge received MPI run with global one
130  DMSG(2,"Before G4Run::Merge : "<<run->GetNumberOfEvent());
131  run->Merge( aNewRun );
132  DMSG(2,"After G4Run::Merge : "<<run->GetNumberOfEvent());
133  delete aNewRun;
134  //}
135 }
int MPI_Unpack(const void *, int, int *, void *, int, MPI_Datatype, MPI_Comm)
Definition: dummy_mpi.h:35
virtual void Merge(const G4Run *)
Definition: G4Run.cc:54
#define buffer
Definition: xmlparse.cc:628
int G4int
Definition: G4Types.hh:78
G4int GetNumberOfEvent() const
Definition: G4Run.hh:79
virtual G4Run * UnPack()=0
Definition: G4Run.hh:46
void OutputUserData(void *input_data, const MPI::Datatype &dt, int count)
virtual void RecordEvent(const G4Event *)
Definition: G4Run.cc:51
static G4String Status(G4StepStatus stps)
void SetupOutputBuffer(char *buff, G4int size, G4int position)
#define DMSG(LVL, MSG)

Here is the call graph for this function:

Here is the caller graph for this function:

void G4VUserMPIrunMerger::Send ( const unsigned int  destination)
protected

Definition at line 46 of file G4VUserMPIrunMerger.cc.

47 {
48  assert(run!=nullptr);
49  G4int nevts = run->GetNumberOfEvent();
50  DMSG( 1 , "G4VUserMPIrunMerger::Send() : Sending a G4run ("
51  <<run<<") with "<<nevts<<" events to: "<<destination);
52  input_userdata.clear();
53  Pack();//User code
54  InputUserData(&nevts,MPI::INT,1);
55 
56  DestroyBuffer();
57  G4int newbuffsize = 0;
58  for ( const const_registered_data& el : input_userdata ) {
59  newbuffsize += (el.dt.Get_size()*el.count);
60  }
61  char* buffer = new char[newbuffsize];
62  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
63  //small cpu penalty, we can live with that).)
64  std::fill(buffer,buffer+newbuffsize,0);
65  ownsBuffer=true;
66  SetupOutputBuffer(buffer,newbuffsize,0);
67  DMSG(3,"Buffer size: "<<newbuffsize<<" bytes at: "<<(void*)outputBuffer);
68 
69  //Now userdata contains all data to be send, do the real packing
70  for ( const const_registered_data& el : input_userdata ) {
71 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
72  MPI_Pack(const_cast<void*>(el.p_data),el.count,el.dt,
73 #else
74  MPI_Pack(el.p_data,el.count,el.dt,
75 #endif
76  outputBuffer,outputBufferSize,
77  &outputBufferPosition,COMM_G4COMMAND_);
78  }
79  assert(outputBufferSize==outputBufferPosition);
80  COMM_G4COMMAND_.Send(outputBuffer , outputBufferSize , MPI::PACKED ,
81  destination , G4MPImanager::kTAG_RUN);
82  bytesSent+=outputBufferSize;
83  DMSG(2 , "G4VUserMPIrunMerger::Send() : Done ");
84 }
#define buffer
Definition: xmlparse.cc:628
int G4int
Definition: G4Types.hh:78
G4int GetNumberOfEvent() const
Definition: G4Run.hh:79
int MPI_Pack(const void *, int, MPI_Datatype, void *, int, int *, MPI_Comm)
Definition: dummy_mpi.h:34
virtual void Pack()=0
void SetupOutputBuffer(char *buff, G4int size, G4int position)
#define DMSG(LVL, MSG)
void InputUserData(void *input_data, const MPI::Datatype &dt, int count)

Here is the call graph for this function:

Here is the caller graph for this function:

void G4VUserMPIrunMerger::SetDestinationRank ( G4int  i)
inline

Definition at line 40 of file G4VUserMPIrunMerger.hh.

40 { destinationRank = i; }
void G4VUserMPIrunMerger::SetRun ( G4Run r)
inline

Definition at line 39 of file G4VUserMPIrunMerger.hh.

39 { run = r; }
void G4VUserMPIrunMerger::SetupOutputBuffer ( char *  buff,
G4int  size,
G4int  position 
)
inlineprotected

Definition at line 58 of file G4VUserMPIrunMerger.hh.

58  {
59  outputBuffer = buff;
60  outputBufferSize=size;
61  outputBufferPosition=position;
62  }
#define position
Definition: xmlparse.cc:622

Here is the caller graph for this function:

void G4VUserMPIrunMerger::SetVerbosity ( G4int  ver)
inline

Definition at line 41 of file G4VUserMPIrunMerger.hh.

41 { verbose = ver; }

Here is the caller graph for this function:

virtual G4Run* G4VUserMPIrunMerger::UnPack ( )
protectedpure virtual

Implemented in G4MPIrunMerger, and RunMerger.

Here is the caller graph for this function:


The documentation for this class was generated from the following files: