Geant4  10.02.p01
G4MPIscorerMerger.cc
Go to the documentation of this file.
1 //
2 // ********************************************************************
3 // * License and Disclaimer *
4 // * *
5 // * The Geant4 software is copyright of the Copyright Holders of *
6 // * the Geant4 Collaboration. It is provided under the terms and *
7 // * conditions of the Geant4 Software License, included in the file *
8 // * LICENSE and available at http://cern.ch/geant4/license . These *
9 // * include a list of copyright holders. *
10 // * *
11 // * Neither the authors of this software system, nor their employing *
12 // * institutes,nor the agencies providing financial support for this *
13 // * work make any representation or warranty, express or implied, *
14 // * regarding this software system or assume any liability for its *
15 // * use. Please see the license in the file LICENSE and URL above *
16 // * for the full disclaimer and the limitation of liability. *
17 // * *
18 // * This code implementation is the result of the scientific and *
19 // * technical work of the GEANT4 collaboration. *
20 // * By using, copying, modifying or distributing the software (or *
21 // * any work based on the software) you agree to acknowledge its *
22 // * use in resulting scientific publications, and indicate your *
23 // * acceptance of all terms of the Geant4 Software license. *
24 // ********************************************************************
25 //
26 #include "G4MPIscorerMerger.hh"
27 #include <map>
28 #include <ostream>
29 #include <algorithm>
30 #include <assert.h>
31 #include <functional>
32 #include "G4MPIutils.hh"
33 
35  outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
36  ownsBuffer(false),scoringManager(nullptr),commSize(0),
37  destinationRank(G4MPImanager::kRANK_MASTER),verbose(0)
38 {}
39 
41  G4int destination,
42  G4int verbosity) :
43  outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
44  ownsBuffer(false),
45  scoringManager(mgr), commSize(0), destinationRank(destination),
46  verbose(verbosity)
47 {
48 }
49 
51  if ( ownsBuffer ) delete[] outputBuffer;
52 }
53 
54 #define DMSG( LVL , MSG ) { if ( verbose > LVL ) { G4cout << MSG << G4endl; } }
55 
56 
57 /* Format of the message.
58  *
59  * Input:
60  * A vector of G4VScoringMesh, each of that is a
61  * std::map<name:G4String,G4THitsMap<G4double>*> where
62  * G4THitsMap<T> = std::map<int,T*>
63  *
64  * Output:
65  * A buffer:
66  * [0] : numMesh : int (**Begin Message**)
67  * [1] : meshID : int (** Begin Mesh**)
68  * [2] : numMaps : int
69  * [3] : sizeName : int (** Begin Map **)
70  * [4] : name[0] : char
71  * ...
72  * [...] : name[sizeName-1] : chare
73  * [...] : mapSize : int
74  * [...] : THitsMap.keys()[0] : int
75  * ...
76  * [...] : THitsMap.keys()[mapSize-1] : int
77  * [...] : THitsMap.values()[0] : double
78  * ...
79  * [...] : THitsMap.values()[mapSize-1] : double (**End Map**)
80  * [...] : Next Map : repeat from (**Begin Map**)
81  * ...
82  * [...] : Next Mesh : repeat from (**Begin Mesh**)
83  *
84  *
85  */
86 
88  DMSG(0, "G4MPIscorerMerger::Merge called");
89  const unsigned int myrank = MPI::COMM_WORLD.Get_rank();
90  commSize = MPI::COMM_WORLD.Get_size();
91  if ( commSize == 1 ) {
92  DMSG(1,"Comm world size is 1, nothing to do");
93  return;
94  }
95  comm = MPI::COMM_WORLD.Dup();
96  DestroyBuffer();
97 
98  //ANDREA:->
99 // G4cout<<"Before sending: "<<G4endl;
100 // scoringManager->GetMesh(0)->Dump();
101 // for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
102 // for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
103 // {
104 // G4cout<<e.first<<" : "<<e.second<<G4endl;
105 // for ( auto c: *(e.second->GetMap()) ) {
106 // G4cout<<c.first<<"="<<*c.second<<G4endl;
107 //
108 // }
109 // }
110 // }
111  //ANDREA:<-
112 
113  bytesSent=0;
114  const G4double sttime = MPI::Wtime();
115 
116  //Use G4MPIutils to optimize communications between ranks
117  typedef std::function<void(unsigned int)> handler_t;
118  using std::placeholders::_1;
119  handler_t sender = std::bind(&G4MPIscorerMerger::Send , this , _1);
120  handler_t receiver = std::bind(&G4MPIscorerMerger::Receive, this, _1);
121  std::function<void(void)> barrier = std::bind(&MPI::Intracomm::Barrier,&comm);
122  G4mpi::Merge( sender , receiver , barrier , commSize , myrank );
123 
124  //OLD Style p2p communications
125  /*
126  if ( myrank != destinationRank ) {
127  DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
128  <<myrank<<" sending to rank "<<destinationRank
129  <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
130  Send(destinationRank);
131  } else {
132  DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
133  <<myrank<<" receiving "
134  <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
135  for ( unsigned int i = 0 ; i < commSize ; ++i ) {
136  if ( i != myrank ) Receive(i);
137  }
138  }
139 */
140  const G4double elapsed = MPI::Wtime() - sttime;
141  long total=0;
142  comm.Reduce(&bytesSent,&total,1,MPI::LONG,MPI::SUM,destinationRank);
143  if ( verbose > 0 && myrank == destinationRank ) {
144  //Collect from ranks how much data was sent around
145  G4cout<<"G4MPIscorerMerger::Merge() -data transfer performances: "
146  <<double(total)/1000./elapsed<<" kB/s"
147  <<" (Total Data Transfer= "<<double(total)/1000.<<" kB in "
148  <<elapsed<<" s)."<<G4endl;
149  }
150  //ANDREA:->
151 // G4cout<<"After Receiving: "<<G4endl;
152 // scoringManager->GetMesh(0)->Dump();
153 // for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
154 // for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
155 // {
156 // G4cout<<e.first<<" : "<<e.second<<G4endl;
157 // for ( auto c: *(e.second->GetMap()) ) {
158 // G4cout<<c.first<<"="<<*c.second<<" (=2x"<<.5*(*c.second)<<")"<<G4endl;
159 //
160 // }
161 // }
162 // }
163  //ANDREA:<-
164  comm.Free();
165  DMSG(0,"G4MPIscorerMerger::Merge done.");
166 }
167 
168 void G4MPIscorerMerger::Receive(const unsigned int source) {
169  DMSG(1,"Receiving scorers");
170  // DestroyBuffer();
171  DMSG(2,"Receiving from: "<<source);
172  MPI::Status status;
173  comm.Probe(source, G4MPImanager::kTAG_CMDSCR, status);
174  const G4int newbuffsize = status.Get_count(MPI::PACKED);
175  DMSG(2,"Preparing to receive buffer of size: "<<newbuffsize);
176  char* buffer = outputBuffer;
177  if ( newbuffsize > outputBufferSize ) {
178  DMSG(3,"New larger buffer expected, resize");
179  //New larger buffer incoming, recreate buffer
180  //TODO: use realloc?
181  delete[] outputBuffer;
182  buffer = new char[newbuffsize];
183  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
184  //small cpu penalty, we can live with that).)
185  std::fill( buffer , buffer + newbuffsize , 0 );
186  ownsBuffer = true;
187  }
188  SetupOutputBuffer(buffer,newbuffsize,0);
189  comm.Recv(buffer, newbuffsize, MPI::PACKED, source,
190  G4MPImanager::kTAG_CMDSCR, status);
191  DMSG(3,"Buffer Size: "<<outputBufferSize<< " bytes at: "<<(void*)outputBuffer);
193  DMSG(1,"Receiving of comamnd line scorers done");
194 }
195 
196 void G4MPIscorerMerger::Send(const unsigned int destination) {
197  DMSG(1,"Sending scorers "<<this);
198  //Step 1: Setup buffer to pack/unpack data
199  const G4int newbuffsize = CalculatePackSize(scoringManager);
200  //DestroyBuffer();
201  char* buffer = outputBuffer;
202  if ( newbuffsize > outputBufferSize ) {
203  delete[] outputBuffer;
204  buffer = new char[newbuffsize];
205  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
206  //small cpu penalty, we can live with that).)
207  std::fill( buffer , buffer+newbuffsize,0);
208  ownsBuffer = true;
209  }
210  SetupOutputBuffer(buffer,newbuffsize,0);
211  DMSG(3,"Buffer Size: "<<newbuffsize<< " bytes at: "<<(void*)outputBuffer);
214 
215  //Version 1: p2p communication
216  comm.Send( outputBuffer , outputBufferSize , MPI::PACKED ,
217  destination , G4MPImanager::kTAG_CMDSCR);
218  bytesSent += newbuffsize;
219  //Receiver should use probe to get size of the package being sent
220  DMSG(1,"Sending done");
221 }
222 
224  assert(sm!=nullptr);
225  if ( outputBuffer == nullptr || outputBufferPosition>=outputBufferSize) {
226  G4Exception("G4MPIscorerMerger::Pack(const G4ScoringManager*)",
227  "MPI001",FatalException,
228  "Call SetOututBuffer before trying to pack");
229  return;
230  }
231  DMSG(2,"Starting packing of meshes, # meshes: "<<sm->GetNumberOfMesh());
232  /*const*/ size_t numMeshes=sm->GetNumberOfMesh();//TODO: OLD MPI interface
233  MPI_Pack(&numMeshes,1,MPI::UNSIGNED,
236  comm);
237  for (size_t i = 0; i <numMeshes; ++i)
238  {
239  MPI_Pack(&i,1,MPI::UNSIGNED,
242  Pack(sm->GetMesh(i));
243  }
244 }
245 
247  assert(sm!=nullptr);
248  if ( outputBuffer == nullptr || outputBufferPosition>=outputBufferSize) {
249  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
250  "MPI001",FatalException,
251  "Call SetOututBuffer before trying to un-pack");
252  return;
253  }
254  size_t numMeshes=0;
256  &numMeshes,1,MPI::UNSIGNED,comm);
257  if ( numMeshes != sm->GetNumberOfMesh() ) {
259  msg << "Number of meshes to unpack ("<<numMeshes;
260  msg <<") does not correspond to expected number ("<<sm->GetNumberOfMesh();
261  msg<<")";
262  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
263  "MPI001",FatalException,msg);
264  return;
265  }
266 
267  size_t meshid=0;
268  for ( size_t i = 0 ; i < numMeshes ; ++i ) {
270  &meshid,1,MPI::UNSIGNED,comm);
271  if ( meshid != i ) {
273  msg<<"Cannot unpack: expecting mesh "<<i<<" and found "<<meshid;
274  msg<<" during unpack.";
275  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
276  "MPI001",FatalException,msg);
277  return;
278  }
279  G4VScoringMesh* original = sm->GetMesh(i);
280  UnPackAndMerge(original);
281  }
282 }
283 
285  assert(mesh!=nullptr);
286  assert(outputBuffer!=nullptr);
288  DMSG(3,"Packing mesh: "<<mesh);
289 
290  const MeshScoreMap& map = mesh->GetScoreMap();
291  /*const*/ size_t nummaps = map.size();//TODO: old MPI interface
292  MPI_Pack(&nummaps,1,MPI::UNSIGNED,
295  for ( const auto& ele: map ) {
296  const G4String& name = ele.first;
297  /*const*/ size_t ss = name.size();
298  MPI_Pack(&ss,1,MPI::UNSIGNED,
301 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
302  char* nn = new char[name.length()];
303  std::copy(name.begin(),name.end(),nn);
304 #else
305  const char* nn = name.c_str();
306 #endif
307  MPI_Pack(nn,ss,MPI::CHAR,
310  Pack(ele.second);
311 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
312  delete[] nn;
313 #endif
314  }
315 }
316 
318  assert(outputBuffer!=nullptr);
320  assert(inmesh!=nullptr);
321  DMSG(3,"Preparing to unpack a mesh and merge into: "<<inmesh);
322  const G4String& detName = inmesh->GetWorldName();
323  size_t nummaps = 0;
325  &nummaps,1,MPI::UNSIGNED,comm);
326  for ( size_t i = 0 ; i < nummaps ; ++i ) {
327  size_t nameSize = 0;
329  &nameSize,1,MPI::UNSIGNED,comm);
330  //Create a null-terminated c-string: needed later when converting this to a G4String
331  //(Not sure: but issue reported by valgrind with the use of MPI_Unpack)
332  char* name = new char[nameSize+1];
333  std::fill(name,name+nameSize+1,0);
335  name,nameSize,MPI::CHAR,comm);
336  const G4String colname(name,nameSize);
337  delete[] name;
338  //This memory churn is very inefficient, but we cannot reuse the HitMap
339  //because we cannot change the names
340  //TODO: Evaluate change in HitMap class to allow for change of names
341  HitMap* hm = UnPackHitMap(detName,colname);
342  inmesh->Accumulate(hm);
343  delete hm;
344  }
345 }
346 
348  assert(sm!=nullptr);
349  assert(outputBuffer!=nullptr);
351  DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
352  /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
353  MPI_Pack(&numEl,1,MPI::UNSIGNED,
356  const auto& theMap = *sm->GetMap();
357  std::vector<G4int> ids;
358  std::vector<G4double> vals;
359  std::transform(theMap.begin(),theMap.end(),std::back_inserter(ids),
360  [](decltype(*theMap.begin())& e){ return e.first;});
361  std::transform(theMap.begin(),theMap.end(),std::back_inserter(vals),
362  [](decltype(*theMap.begin())& e){ return *e.second;});
363  assert(ids.size()==vals.size()&&ids.size()==numEl);
364  MPI_Pack(ids.data(),ids.size(),MPI::INT,
367  MPI_Pack(vals.data(),vals.size(),MPI::DOUBLE,
370 }
371 
373  const G4String& colName) {
374  assert(outputBuffer!=nullptr);
376  DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
377  size_t numEl =0 ;
379  &numEl,1,MPI::UNSIGNED,comm);
380  G4int* ids = new G4int[numEl];
382  ids,numEl,MPI::INT,comm);
383  G4double* vals = new G4double[numEl];
385  vals,numEl,MPI::DOUBLE,comm);
386  HitMap* result = new HitMap(detName,colName);
387  for ( unsigned int i = 0; i<numEl;++i) result->set(ids[i],vals[i]);
388  delete[] ids;
389  delete[] vals;
390  return result;
391 }
392 
394 {
395  DMSG(3,"Calculating dimension of data to send");
396  if ( sm == nullptr ) return 0;
397  //Calcualte how much data each call to Pack* appends to the buffer
398  //e.g. sizeof(data)
399  //The number of sizeof here should match the number of calls to MPI_Pack
400 
401  //Pack(ScoringMgr)
402  G4int size = sizeof(unsigned int);
403  DMSG(3,"There are "<<sm->GetNumberOfMesh()<<" meshes.");
404  //Loop on mesh
405  for ( size_t i = 0 ; i<sm->GetNumberOfMesh() ; ++i ) {
406  size += sizeof(unsigned int);//ID
407  size += CalculatePackSize(sm->GetMesh(i));
408  }
409  return size;
410 }
411 
413 {
414  DMSG(3,"Calculating size for mesh: "<<mesh);
415  //PackSingleMesh(Mesh)
416  G4int size = sizeof(unsigned int);//num maps
417  const MeshScoreMap& map = mesh->GetScoreMap();
418  for (const auto& ele : map ) {
419  //PackHitsMap
420  size += sizeof(unsigned int);//name size
421  const G4String& name = ele.first;
422  size += sizeof(char)*name.size();//name
423  size += CalculatePackSize(ele.second);
424  }
425  DMSG(3,"mesh "<<mesh<<" size: "<<size);
426  return size;
427 }
428 
430  const G4int numEls = map->GetSize();
431  G4int size = sizeof(unsigned int);
432  size += sizeof(G4int)*numEls;
433  size += sizeof(G4double)*numEls;
434  DMSG(3,"HitMap "<<map<<" size: "<<size<<" in "<<numEls<<" elements.");
435  return size;
436 }
437 
438 
const G4String & GetWorldName() const
std::ostringstream G4ExceptionDescription
Definition: globals.hh:76
void Pack(const G4ScoringManager *)
Pack all meshes into buffer.
std::map< G4String, G4THitsMap< G4double > * > MeshScoreMap
G4ScoringManager * scoringManager
unsigned int destinationRank
void UnPackAndMerge(const G4ScoringManager *)
G4String name
Definition: TRTMaterials.hh:40
void Accumulate(G4THitsMap< G4double > *map)
#define DMSG(LVL, MSG)
void Merge(std::function< void(unsigned int)> senderF, std::function< void(unsigned int)> receiverF, std::function< void(void)> barrierF, unsigned int commSize, unsigned int myrank)
Definition: G4MPIutils.cc:161
#define buffer
Definition: xmlparse.cc:628
int G4int
Definition: G4Types.hh:78
MPI::Intracomm comm
void Send(const unsigned int destination)
void Receive(const unsigned int source)
G4GLOB_DLL std::ostream G4cout
HitMap * UnPackHitMap(const G4String &detName, const G4String &colName)
G4THitsMap< G4double > HitMap
G4int CalculatePackSize(const G4ScoringManager *) const
virtual size_t GetSize() const
Definition: G4THitsMap.hh:86
virtual ~G4MPIscorerMerger()
void G4Exception(const char *originOfException, const char *exceptionCode, G4ExceptionSeverity severity, const char *comments)
Definition: G4Exception.cc:41
G4double total(Particle const *const p1, Particle const *const p2)
static const G4double ele
MeshScoreMap GetScoreMap() const
std::map< G4int, T * > * GetMap() const
Definition: G4THitsMap.hh:68
static G4String Status(G4StepStatus stps)
#define G4endl
Definition: G4ios.hh:61
size_t GetNumberOfMesh() const
double G4double
Definition: G4Types.hh:76
void SetupOutputBuffer(char *buff, G4int size, G4int position)
G4VScoringMesh * GetMesh(G4int i) const
G4int set(const G4int &key, T *&aHit) const
Definition: G4THitsMap.hh:165