Geant4  10.03.p02
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
G4MPIscorerMerger.cc
Go to the documentation of this file.
1 //
2 // ********************************************************************
3 // * License and Disclaimer *
4 // * *
5 // * The Geant4 software is copyright of the Copyright Holders of *
6 // * the Geant4 Collaboration. It is provided under the terms and *
7 // * conditions of the Geant4 Software License, included in the file *
8 // * LICENSE and available at http://cern.ch/geant4/license . These *
9 // * include a list of copyright holders. *
10 // * *
11 // * Neither the authors of this software system, nor their employing *
12 // * institutes,nor the agencies providing financial support for this *
13 // * work make any representation or warranty, express or implied, *
14 // * regarding this software system or assume any liability for its *
15 // * use. Please see the license in the file LICENSE and URL above *
16 // * for the full disclaimer and the limitation of liability. *
17 // * *
18 // * This code implementation is the result of the scientific and *
19 // * technical work of the GEANT4 collaboration. *
20 // * By using, copying, modifying or distributing the software (or *
21 // * any work based on the software) you agree to acknowledge its *
22 // * use in resulting scientific publications, and indicate your *
23 // * acceptance of all terms of the Geant4 Software license. *
24 // ********************************************************************
25 //
26 #include "G4MPIscorerMerger.hh"
27 #include <map>
28 #include <ostream>
29 #include <algorithm>
30 #include <assert.h>
31 #include <functional>
32 #include "G4MPIutils.hh"
33 
34 #define DMSG( LVL , MSG ) { if ( verbose > LVL ) { G4cout << MSG << G4endl; } }
35 
36 namespace {
37  //This class extends G4StatDouble to get access
38  //to all data-members of the base class.
39  //It provides two functions: pack and unpack
40  //into a buffer for sending/receiving via MPI
41  struct MPIStatDouble : public G4StatDouble {
42  G4int verbose;
43  inline void Pack(void* buffer,int bufferSize,int* position , MPI::Intracomm& comm ) const
44  {
45  DMSG(4,"Packing G4StatDouble(n,scale,sum_w,sum_w2,sum_wx,sum_wx2): "
46  <<m_n<<" "<<m_scale<<" "<<m_sum_w<<" "<<m_sum_w2
47  <<" "<<m_sum_wx<<" "<<m_sum_wx2);
48  MPI_Pack(&m_n,1,MPI::INT,buffer,bufferSize,position,comm);
49  const G4double data[]{m_scale,m_sum_w,m_sum_w2,m_sum_wx,m_sum_wx2};
50  MPI_Pack(&data,5,MPI::DOUBLE,buffer,bufferSize,position,comm);
51  }
52  inline void UnPack(void* buffer,int bufferSize,int* position , MPI::Intracomm& comm ) {
53  MPI_Unpack(buffer,bufferSize,position,&m_n,1,MPI::INT,comm);
54  G4double data[5];
55  MPI_Unpack(buffer,bufferSize,position,data,5,MPI::DOUBLE,comm);
56  m_scale = data[0];
57  m_sum_w = data[1];
58  m_sum_w2= data[2];
59  m_sum_wx= data[3];
60  m_sum_wx2=data[4];
61  DMSG(4,"UnPacking G4StatDouble(n,scale,sum_w,sum_w2,sum_wx,sum_wx2): "
62  <<m_n<<" "<<m_scale<<" "<<m_sum_w<<" "<<m_sum_w2
63  <<" "<<m_sum_wx<<" "<<m_sum_wx2);
64  }
65  MPIStatDouble(G4int ver = 0) : verbose(ver) {}
66  MPIStatDouble(const G4StatDouble& rhs , G4int ver) : verbose(ver)
67  {
69  }
70  };
71 }
72 
74  outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
75  ownsBuffer(false),scoringManager(nullptr),commSize(0),
76  destinationRank(G4MPImanager::kRANK_MASTER),verbose(0)
77 {}
78 
80  G4int destination,
81  G4int verbosity) :
82  outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
83  ownsBuffer(false),
84  scoringManager(mgr), commSize(0), destinationRank(destination),
85  verbose(verbosity)
86 {
87 }
88 
90  if ( ownsBuffer ) delete[] outputBuffer;
91 }
92 
93 /* Format of the message.
94  *
95  * Input:
96  * A vector of G4VScoringMesh, each of that is a
97  * std::map<name:G4String,G4THitsMap<G4double>*> where
98  * G4THitsMap<T> = std::map<int,T*>
99  *
100  * Output:
101  * A buffer:
102  * [0] : numMesh : int (**Begin Message**)
103  * [1] : meshID : int (** Begin Mesh**)
104  * [2] : numMaps : int
105  * [3] : sizeName : int (** Begin Map **)
106  * [4] : name[0] : char
107  * ...
108  * [...] : name[sizeName-1] : chare
109  * [...] : mapSize : int
110  * [...] : THitsMap.keys()[0] : int
111  * ...
112  * [...] : THitsMap.keys()[mapSize-1] : int
113  * [...] : THitsMap.values()[0] : double
114  * ...
115  * [...] : THitsMap.values()[mapSize-1] : double (**End Map**)
116  * [...] : Next Map : repeat from (**Begin Map**)
117  * ...
118  * [...] : Next Mesh : repeat from (**Begin Mesh**)
119  *
120  *
121  */
122 
124  DMSG(0, "G4MPIscorerMerger::Merge called");
125  const unsigned int myrank = MPI::COMM_WORLD.Get_rank();
126  commSize = MPI::COMM_WORLD.Get_size();
127  if ( commSize == 1 ) {
128  DMSG(1,"Comm world size is 1, nothing to do");
129  return;
130  }
131  comm = MPI::COMM_WORLD.Dup();
132  DestroyBuffer();
133 
134  //ANDREA:->
135 // G4cout<<"Before sending: "<<G4endl;
136 // scoringManager->GetMesh(0)->Dump();
137 // for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
138 // for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
139 // {
140 // G4cout<<e.first<<" : "<<e.second<<G4endl;
141 // for ( auto c: *(e.second->GetMap()) ) {
142 // G4cout<<c.first<<"="<<*c.second<<G4endl;
143 //
144 // }
145 // }
146 // }
147  //ANDREA:<-
148 
149  bytesSent=0;
150  const G4double sttime = MPI::Wtime();
151 
152  //Use G4MPIutils to optimize communications between ranks
153  typedef std::function<void(unsigned int)> handler_t;
154  using std::placeholders::_1;
155  handler_t sender = std::bind(&G4MPIscorerMerger::Send , this , _1);
156  handler_t receiver = std::bind(&G4MPIscorerMerger::Receive, this, _1);
157  std::function<void(void)> barrier = std::bind(&MPI::Intracomm::Barrier,&comm);
158  G4mpi::Merge( sender , receiver , barrier , commSize , myrank );
159 
160  //OLD Style p2p communications
161  /*
162  if ( myrank != destinationRank ) {
163  DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
164  <<myrank<<" sending to rank "<<destinationRank
165  <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
166  Send(destinationRank);
167  } else {
168  DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
169  <<myrank<<" receiving "
170  <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
171  for ( unsigned int i = 0 ; i < commSize ; ++i ) {
172  if ( i != myrank ) Receive(i);
173  }
174  }
175 */
176  const G4double elapsed = MPI::Wtime() - sttime;
177  long total=0;
178  comm.Reduce(&bytesSent,&total,1,MPI::LONG,MPI::SUM,destinationRank);
179  if ( verbose > 0 && myrank == destinationRank ) {
180  //Collect from ranks how much data was sent around
181  G4cout<<"G4MPIscorerMerger::Merge() -data transfer performances: "
182  <<double(total)/1000./elapsed<<" kB/s"
183  <<" (Total Data Transfer= "<<double(total)/1000.<<" kB in "
184  <<elapsed<<" s)."<<G4endl;
185  }
186  //ANDREA:->
187 // G4cout<<"After Receiving: "<<G4endl;
188 // scoringManager->GetMesh(0)->Dump();
189 // for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
190 // for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
191 // {
192 // G4cout<<e.first<<" : "<<e.second<<G4endl;
193 // for ( auto c: *(e.second->GetMap()) ) {
194 // G4cout<<c.first<<"="<<*c.second<<" (=2x"<<.5*(*c.second)<<")"<<G4endl;
195 //
196 // }
197 // }
198 // }
199  //ANDREA:<-
200  comm.Free();
201  DMSG(0,"G4MPIscorerMerger::Merge done.");
202 }
203 
204 void G4MPIscorerMerger::Receive(const unsigned int source) {
205  DMSG(1,"Receiving scorers");
206  // DestroyBuffer();
207  DMSG(2,"Receiving from: "<<source);
208  MPI::Status status;
209  comm.Probe(source, G4MPImanager::kTAG_CMDSCR, status);
210  const G4int newbuffsize = status.Get_count(MPI::PACKED);
211  DMSG(2,"Preparing to receive buffer of size: "<<newbuffsize);
212  char* buffer = outputBuffer;
213  if ( newbuffsize > outputBufferSize ) {
214  DMSG(3,"New larger buffer expected, resize");
215  //New larger buffer incoming, recreate buffer
216  //TODO: use realloc?
217  delete[] outputBuffer;
218  buffer = new char[newbuffsize];
219  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
220  //small cpu penalty, we can live with that).)
221  std::fill( buffer , buffer + newbuffsize , 0 );
222  ownsBuffer = true;
223  }
224  SetupOutputBuffer(buffer,newbuffsize,0);
225  comm.Recv(buffer, newbuffsize, MPI::PACKED, source,
226  G4MPImanager::kTAG_CMDSCR, status);
227  DMSG(3,"Buffer Size: "<<outputBufferSize<< " bytes at: "<<(void*)outputBuffer);
228  UnPackAndMerge(scoringManager);
229  DMSG(1,"Receiving of comamnd line scorers done");
230 }
231 
232 void G4MPIscorerMerger::Send(const unsigned int destination) {
233  DMSG(1,"Sending scorers "<<this);
234  //Step 1: Setup buffer to pack/unpack data
235  const G4int newbuffsize = CalculatePackSize(scoringManager);
236  //DestroyBuffer();
237  char* buffer = outputBuffer;
238  if ( newbuffsize > outputBufferSize ) {
239  delete[] outputBuffer;
240  buffer = new char[newbuffsize];
241  //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
242  //small cpu penalty, we can live with that).)
243  std::fill( buffer , buffer+newbuffsize,0);
244  ownsBuffer = true;
245  }
246  SetupOutputBuffer(buffer,newbuffsize,0);
247  DMSG(3,"Buffer Size: "<<newbuffsize<< " bytes at: "<<(void*)outputBuffer);
248  Pack(scoringManager);
249  assert(outputBufferSize==outputBufferPosition);
250 
251  //Version 1: p2p communication
252  comm.Send(outputBuffer, outputBufferSize, MPI::PACKED, destination, G4MPImanager::kTAG_CMDSCR);
253  bytesSent += newbuffsize;
254  //Receiver should use probe to get size of the package being sent
255  DMSG(1,"Sending done");
256 }
257 
259  assert(sm!=nullptr);
260  if ( outputBuffer == nullptr || outputBufferPosition>=outputBufferSize) {
261  G4Exception("G4MPIscorerMerger::Pack(const G4ScoringManager*)",
262  "MPI001",FatalException,
263  "Call SetOututBuffer before trying to pack");
264  return;
265  }
266  DMSG(2,"Starting packing of meshes, # meshes: "<<sm->GetNumberOfMesh());
267  /*const*/ size_t numMeshes=sm->GetNumberOfMesh();//TODO: OLD MPI interface
268  MPI_Pack(&numMeshes,1,MPI::UNSIGNED,
269  outputBuffer,outputBufferSize,
270  &outputBufferPosition,
271  comm);
272  for (size_t i = 0; i <numMeshes; ++i)
273  {
274  MPI_Pack(&i,1,MPI::UNSIGNED,
275  outputBuffer,outputBufferSize,
276  &outputBufferPosition,comm);
277  Pack(sm->GetMesh(i));
278  }
279 }
280 
282  assert(sm!=nullptr);
283  if ( outputBuffer == nullptr || outputBufferPosition>=outputBufferSize) {
284  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
285  "MPI001",FatalException,
286  "Call SetOututBuffer before trying to un-pack");
287  return;
288  }
289  size_t numMeshes=0;
290  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
291  &numMeshes,1,MPI::UNSIGNED,comm);
292  if ( numMeshes != sm->GetNumberOfMesh() ) {
294  msg << "Number of meshes to unpack ("<<numMeshes;
295  msg <<") does not correspond to expected number ("<<sm->GetNumberOfMesh();
296  msg<<")";
297  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
298  "MPI001",FatalException,msg);
299  return;
300  }
301 
302  size_t meshid=0;
303  for ( size_t i = 0 ; i < numMeshes ; ++i ) {
304  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
305  &meshid,1,MPI::UNSIGNED,comm);
306  if ( meshid != i ) {
308  msg<<"Cannot unpack: expecting mesh "<<i<<" and found "<<meshid;
309  msg<<" during unpack.";
310  G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
311  "MPI001",FatalException,msg);
312  return;
313  }
314  G4VScoringMesh* original = sm->GetMesh(i);
315  UnPackAndMerge(original);
316  }
317 }
318 
320  assert(mesh!=nullptr);
321  assert(outputBuffer!=nullptr);
322  assert(outputBufferPosition<=outputBufferSize);
323  DMSG(3,"Packing mesh: "<<mesh);
324 
325  const MeshScoreMap& map = mesh->GetScoreMap();
326  /*const*/ size_t nummaps = map.size();//TODO: old MPI interface
327  MPI_Pack(&nummaps,1,MPI::UNSIGNED,
328  outputBuffer,outputBufferSize,
329  &outputBufferPosition,comm);
330  for ( const auto& ele: map ) {
331  const G4String& name = ele.first;
332  /*const*/ size_t ss = name.size();
333  MPI_Pack(&ss,1,MPI::UNSIGNED,
334  outputBuffer,outputBufferSize,
335  &outputBufferPosition,comm);
336 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
337  char* nn = new char[name.length()];
338  std::copy(name.begin(),name.end(),nn);
339 #else
340  const char* nn = name.c_str();
341 #endif
342  MPI_Pack(nn,ss,MPI::CHAR,outputBuffer,outputBufferSize,&outputBufferPosition,comm);
343  Pack(ele.second);
344 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
345  delete[] nn;
346 #endif
347  }
348 }
349 
351  assert(outputBuffer!=nullptr);
352  assert(outputBufferPosition<=outputBufferSize);
353  assert(inmesh!=nullptr);
354  DMSG(3,"Preparing to unpack a mesh and merge into: "<<inmesh);
355  const G4String& detName = inmesh->GetWorldName();
356  size_t nummaps = 0;
357  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
358  &nummaps,1,MPI::UNSIGNED,comm);
359  for ( size_t i = 0 ; i < nummaps ; ++i ) {
360  size_t nameSize = 0;
361  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
362  &nameSize,1,MPI::UNSIGNED,comm);
363  //Create a null-terminated c-string: needed later when converting this to a G4String
364  //(Not sure: but issue reported by valgrind with the use of MPI_Unpack)
365  char* name = new char[nameSize+1];
366  std::fill(name,name+nameSize+1,0);
367  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
368  name,nameSize,MPI::CHAR,comm);
369  const G4String colname(name,nameSize);
370  delete[] name;
371  //This memory churn is very inefficient, but we cannot reuse the HitMap
372  //because we cannot change the names
373  //TODO: Evaluate change in HitMap class to allow for change of names
374  //HitMap* hm = UnPackHitMap(detName,colname);
375  HitStatDoubleMap* hm = UnPackHitStatDoubleMap(detName,colname);
376  inmesh->Accumulate(hm);
377  delete hm;
378  }
379 }
380 
381 
382 //void G4MPIscorerMerger::Pack(const HitMap* sm) {
383 // assert(sm!=nullptr);
384 // assert(outputBuffer!=nullptr);
385 // assert(outputBufferPosition<=outputBufferSize);
386 // DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
387 // /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
388 // MPI_Pack(&numEl,1,MPI::UNSIGNED,
389 // outputBuffer,outputBufferSize,
390 // &outputBufferPosition,comm);
391 // const auto& theMap = *sm->GetMap();
392 // std::vector<G4int> ids;
393 // std::vector<G4double> vals;
394 // std::transform(theMap.begin(),theMap.end(),std::back_inserter(ids),
395 // [](decltype(*theMap.begin())& e){ return e.first;});
396 // std::transform(theMap.begin(),theMap.end(),std::back_inserter(vals),
397 // [](decltype(*theMap.begin())& e){ return *e.second;});
398 // assert(ids.size()==vals.size()&&ids.size()==numEl);
399 // MPI_Pack(ids.data(),ids.size(),MPI::INT,
400 // outputBuffer,outputBufferSize,
401 // &outputBufferPosition,comm);
402 // MPI_Pack(vals.data(),vals.size(),MPI::DOUBLE,
403 // outputBuffer,outputBufferSize,
404 // &outputBufferPosition,comm);
405 //}
406 
408  assert(sm!=nullptr);
409  assert(outputBuffer!=nullptr);
410  assert(outputBufferPosition<=outputBufferSize);
411  DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
412  /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
413  MPI_Pack(&numEl,1,MPI::UNSIGNED,
414  outputBuffer,outputBufferSize,
415  &outputBufferPosition,comm);
416  const auto& theMap = *sm->GetMap();
417  std::vector<G4int> ids;
418  std::transform(theMap.begin(),theMap.end(),std::back_inserter(ids),
419  [](decltype(*theMap.begin())& e){ return e.first;});
420  assert(/*ids.size()==vals.size()&&*/ids.size()==numEl);
421  MPI_Pack(ids.data(),ids.size(),MPI::INT,outputBuffer,outputBufferSize,
422  &outputBufferPosition,comm);
423  for( const auto& e : theMap) {
424  const MPIStatDouble sd(*e.second,verbose);
425  sd.Pack(outputBuffer,outputBufferSize,&outputBufferPosition,comm);
426  }
427 }
428 
429 //HitMap* G4MPIscorerMerger::UnPackHitMap(const G4String& detName,
430 // const G4String& colName) {
431 // assert(outputBuffer!=nullptr);
432 // assert(outputBufferPosition<=outputBufferSize);
433 // DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
434 // size_t numEl =0 ;
435 // MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
436 // &numEl,1,MPI::UNSIGNED,comm);
437 // G4int* ids = new G4int[numEl];
438 // MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
439 // ids,numEl,MPI::INT,comm);
440 // G4double* vals = new G4double[numEl];
441 // MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
442 // vals,numEl,MPI::DOUBLE,comm);
443 // HitMap* result = new HitMap(detName,colName);
444 // for ( unsigned int i = 0; i<numEl;++i) result->set(ids[i],vals[i]);
445 // delete[] ids;
446 // delete[] vals;
447 // return result;
448 //}
449 
451  const G4String& detName, const G4String& colName)
452 {
453  assert(outputBuffer!=nullptr);
454  assert(outputBufferPosition<=outputBufferSize);
455  DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
456  size_t numEl =0 ;
457  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
458  &numEl,1,MPI::UNSIGNED,comm);
459  DMSG(3,"Will receive "<<numEl<<" values");
460  G4int* ids = new G4int[numEl];
461  MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
462  ids,numEl,MPI::INT,comm);
463  HitStatDoubleMap* result = new HitStatDoubleMap(detName,colName);
464  for ( unsigned int i = 0; i<numEl;++i) {
465  MPIStatDouble sd(verbose);
466  sd.UnPack(outputBuffer,outputBufferSize,&outputBufferPosition,comm);
467  result->set(ids[i],sd);
468  }
469  delete[] ids;
470  return result;
471 }
472 
473 
475 {
476  DMSG(3,"Calculating dimension of data to send");
477  if ( sm == nullptr ) return 0;
478  //Calcualte how much data each call to Pack* appends to the buffer
479  //e.g. sizeof(data)
480  //The number of sizeof here should match the number of calls to MPI_Pack
481 
482  //Pack(ScoringMgr)
483  G4int size = sizeof(unsigned int);
484  DMSG(3,"There are "<<sm->GetNumberOfMesh()<<" meshes.");
485  //Loop on mesh
486  for ( size_t i = 0 ; i<sm->GetNumberOfMesh() ; ++i ) {
487  size += sizeof(unsigned int);//ID
488  size += CalculatePackSize(sm->GetMesh(i));
489  }
490  return size;
491 }
492 
494 {
495  DMSG(3,"Calculating size for mesh: "<<mesh);
496  //PackSingleMesh(Mesh)
497  G4int size = sizeof(unsigned int);//num maps
498  const MeshScoreMap& map = mesh->GetScoreMap();
499  for (const auto& ele : map ) {
500  //PackHitsMap
501  size += sizeof(unsigned int);//name size
502  const G4String& name = ele.first;
503  size += sizeof(char)*name.size();//name
504  size += CalculatePackSize(ele.second);
505  }
506  DMSG(3,"mesh "<<mesh<<" size: "<<size);
507  return size;
508 }
509 
510 //G4int G4MPIscorerMerger::CalculatePackSize(const HitMap* map) const {
511 // const G4int numEls = map->GetSize();
512 // G4int size = sizeof(unsigned int);
513 // size += sizeof(G4int)*numEls;
514 // size += sizeof(G4double)*numEls;
515 // DMSG(3,"HitMap "<<map<<" size: "<<size<<" in "<<numEls<<" elements.");
516 // return size;
517 //}
518 
520  const G4int numEls = map->GetSize();
521  G4int size = sizeof(unsigned int);
522  size += sizeof(G4int)*numEls;
523  //G4StatDouble: 5 doubles and 1 int
524  //Can I use sizeof(G4StatDouble)? NO sizeof(G4StatDouble)==56
525  size += numEls*(sizeof(G4double)*5+sizeof(G4int));
526  DMSG(3,"HitStatDoubleMap "<<map<<" size: "<<size<<" in "<<numEls<<" elements.");
527  return size;
528 }
529 
G4double G4ParticleHPJENDLHEData::G4double result
const XML_Char * name
Definition: expat.h:151
int MPI_Unpack(const void *, int, int *, void *, int, MPI_Datatype, MPI_Comm)
Definition: dummy_mpi.h:35
const G4String & GetWorldName() const
G4StatDouble & operator=(const G4double &rhs)
Definition: G4StatDouble.hh:54
G4int set(const G4int &key, T *&aHit) const
Definition: G4THitsMap.hh:156
std::ostringstream G4ExceptionDescription
Definition: globals.hh:76
void Pack(const G4ScoringManager *)
Pack all meshes into buffer.
void UnPackAndMerge(const G4ScoringManager *)
void Accumulate(G4THitsMap< G4double > *map)
#define DMSG(LVL, MSG)
G4THitsMap< G4StatDouble > HitStatDoubleMap
void Merge(std::function< void(unsigned int)> senderF, std::function< void(unsigned int)> receiverF, std::function< void(void)> barrierF, unsigned int commSize, unsigned int myrank)
Definition: G4MPIutils.cc:161
#define buffer
Definition: xmlparse.cc:628
int G4int
Definition: G4Types.hh:78
const XML_Char const XML_Char * data
Definition: expat.h:268
void Send(const unsigned int destination)
HitStatDoubleMap * UnPackHitStatDoubleMap(const G4String &detName, const G4String &colName)
void Receive(const unsigned int source)
G4GLOB_DLL std::ostream G4cout
int MPI_Pack(const void *, int, MPI_Datatype, void *, int, int *, MPI_Comm)
Definition: dummy_mpi.h:34
typedef int(XMLCALL *XML_NotStandaloneHandler)(void *userData)
G4int CalculatePackSize(const G4ScoringManager *) const
virtual size_t GetSize() const
Definition: G4THitsMap.hh:208
virtual ~G4MPIscorerMerger()
void G4Exception(const char *originOfException, const char *exceptionCode, G4ExceptionSeverity severity, const char *comments)
Definition: G4Exception.cc:41
G4double total(Particle const *const p1, Particle const *const p2)
static const G4double ele
MeshScoreMap GetScoreMap() const
std::map< G4int, T * > * GetMap() const
Definition: G4THitsMap.hh:99
static G4String Status(G4StepStatus stps)
#define G4endl
Definition: G4ios.hh:61
size_t GetNumberOfMesh() const
double G4double
Definition: G4Types.hh:76
void SetupOutputBuffer(char *buff, G4int size, G4int position)
G4VScoringMesh * GetMesh(G4int i) const
std::map< G4String, RunScore * > MeshScoreMap