Geant4  9.6.p02
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
G4MPImanager.cc
Go to the documentation of this file.
1 //
2 // ********************************************************************
3 // * License and Disclaimer *
4 // * *
5 // * The Geant4 software is copyright of the Copyright Holders of *
6 // * the Geant4 Collaboration. It is provided under the terms and *
7 // * conditions of the Geant4 Software License, included in the file *
8 // * LICENSE and available at http://cern.ch/geant4/license . These *
9 // * include a list of copyright holders. *
10 // * *
11 // * Neither the authors of this software system, nor their employing *
12 // * institutes,nor the agencies providing financial support for this *
13 // * work make any representation or warranty, express or implied, *
14 // * regarding this software system or assume any liability for its *
15 // * use. Please see the license in the file LICENSE and URL above *
16 // * for the full disclaimer and the limitation of liability. *
17 // * *
18 // * This code implementation is the result of the scientific and *
19 // * technical work of the GEANT4 collaboration. *
20 // * By using, copying, modifying or distributing the software (or *
21 // * any work based on the software) you agree to acknowledge its *
22 // * use in resulting scientific publications, and indicate your *
23 // * acceptance of all terms of the Geant4 Software license. *
24 // ********************************************************************
27 
28 #include "G4MPImanager.hh"
29 #include "G4MPImessenger.hh"
30 #include "G4MPIsession.hh"
31 #include "G4MPIbatch.hh"
32 #include "G4MPIstatus.hh"
34 #include "G4UImanager.hh"
35 #include "G4RunManager.hh"
36 #include "G4StateManager.hh"
37 #include "G4Run.hh"
38 #include <time.h>
39 #include <stdio.h>
40 #include <getopt.h>
41 #include <assert.h>
42 
43 G4MPImanager* G4MPImanager::theManager = 0;
44 
45 // --------------------------------------------------------------------------
46 // wrappers for thread functions
47 static void thread_ExecuteThreadCommand(const G4String* command)
48 {
49  G4MPImanager::GetManager()-> ExecuteThreadCommand(*command);
50 }
51 
52 // --------------------------------------------------------------------------
54  : verbose(0), qfcout(false), qinitmacro(false), qbatchmode(false),
55  threadID(0), masterWeight(1.)
56 {
57  //MPI::Init();
58  MPI::Init_thread(MPI::THREAD_SERIALIZED);
59  Initialize();
60 }
61 
62 // --------------------------------------------------------------------------
63 G4MPImanager::G4MPImanager(int argc, char** argv)
64  : verbose(0), qfcout(false), qinitmacro(false), qbatchmode(false),
65  threadID(0), masterWeight(1.)
66 {
67  //MPI::Init(argc, argv);
68  MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
69  Initialize();
70  ParseArguments(argc, argv);
71 }
72 
73 // --------------------------------------------------------------------------
75 {
76  if(isSlave && qfcout) fscout.close();
77 
78  delete status;
79  delete messenger;
80  delete session;
81 
82  COMM_G4COMMAND.Free();
83 
84  MPI::Finalize();
85 
86  theManager = 0;
87 }
88 
89 // --------------------------------------------------------------------------
91 {
92  assert( theManager != 0 );
93  return theManager;
94 }
95 
96 // --------------------------------------------------------------------------
97 void G4MPImanager::Initialize()
98 {
99  assert( theManager == 0 );
100 
101  theManager= this;
102 
103  // get rank information
104  size = MPI::COMM_WORLD.Get_size();
105  rank = MPI::COMM_WORLD.Get_rank();
106  isMaster = (rank == RANK_MASTER);
107  isSlave = (rank != RANK_MASTER);
108 
109  // initialize MPI communicator
110  COMM_G4COMMAND = MPI::COMM_WORLD.Dup();
111 
112  // new G4MPI stuffs
113  messenger = new G4MPImessenger(this);
114  session = new G4MPIsession;
115  status = new G4MPIstatus;
116 
117  // default seed generator is random generator.
118  seedGenerator = new G4MPIrandomSeedGenerator;
119  DistributeSeeds();
120 }
121 
122 // --------------------------------------------------------------------------
123 void G4MPImanager::ParseArguments(int argc, char** argv)
124 {
125  G4int qhelp = 0;
126  G4String ofprefix = "mpi";
127 
128  G4int c;
129  while (1) {
130  G4int option_index= 0;
131  static struct option long_options[] = {
132  {"help", 0, 0, 0},
133  {"verbose", 0, 0, 0},
134  {"init", 1, 0, 0},
135  {"ofile", 2, 0, 0},
136  {0, 0, 0, 0}
137  };
138 
139  opterr = 0; // suppress message
140  c= getopt_long(argc, argv, "hvi:o", long_options, &option_index);
141  opterr = 1;
142 
143  if(c == -1) break;
144 
145  switch (c) {
146  case 0:
147  switch(option_index) {
148  case 0 : // --help
149  qhelp = 1;
150  break;
151  case 1 : // --verbose
152  verbose = 1;
153  break;
154  case 2 : // --init
155  qinitmacro = true;
156  initFileName = optarg;
157  break;
158  case 3 : // --ofile
159  qfcout = true;
160  if(optarg) ofprefix = optarg;
161  break;
162  }
163  break;
164  case 'h' :
165  qhelp = 1;
166  break;
167  case 'v' :
168  verbose = 1;
169  break;
170  case 'i' :
171  qinitmacro = true;
172  initFileName = optarg;
173  break;
174  case 'o' :
175  qfcout = true;
176  break;
177  default:
178  break;
179  }
180  }
181 
182  // show help
183  if(qhelp) {
184  if(isMaster) ShowHelp();
185  MPI::Finalize();
186  exit(0);
187  }
188 
189  // file output
190  if(isSlave && qfcout) {
191  G4String prefix = ofprefix + ".%03d" + ".cout";
192  char str[1024];
193  sprintf(str, prefix.c_str(), rank);
194  G4String fname(str);
195  fscout.open(fname.c_str(), std::ios::out);
196  }
197 
198  // non-option ARGV-elements ...
199  if (optind < argc ) {
200  qbatchmode = true;
201  macroFileName = argv[optind];
202  }
203 }
204 
205 // --------------------------------------------------------------------------
206 void G4MPImanager::Wait(G4int ausec) const
207 {
208  struct timespec treq, trem;
209  treq.tv_sec = 0;
210  treq.tv_nsec = ausec*1000;
211 
212  nanosleep(&treq, &trem);
213 }
214 
215 // ====================================================================
216 void G4MPImanager::UpdateStatus()
217 {
219  const G4Run* run = runManager-> GetCurrentRun();
220 
221  G4int runid, eventid, neventTBP;
222 
224  G4ApplicationState g4state = stateManager-> GetCurrentState();
225 
226  if (run) {
227  runid = run-> GetRunID();
228  neventTBP = run -> GetNumberOfEventToBeProcessed();
229  eventid = run-> GetNumberOfEvent();
230  if(g4state == G4State_GeomClosed || g4state == G4State_EventProc) {
231  status-> StopTimer();
232  }
233  } else {
234  runid = 0;
235  eventid = 0;
236  neventTBP = 0;
237  }
238 
239  status-> SetStatus(rank, runid, neventTBP, eventid, g4state);
240 }
241 
242 // --------------------------------------------------------------------------
244 {
246 
247  UpdateStatus();
248  G4bool gstatus = CheckThreadStatus();
249 
250  if(isMaster) {
251  status-> Print(); // for maser itself
252 
253  G4int nev = status-> GetEventID();
254  G4int nevtp = status-> GetNEventToBeProcessed();
255  G4double cputime = status-> GetCPUTime();
256 
257  // receive from each slave
258  for (G4int islave = 1; islave < size; islave++) {
259  COMM_G4COMMAND.Recv(buff, G4MPIstatus::NSIZE, MPI::INT,
260  islave, TAG_G4STATUS);
261  status-> UnPack(buff);
262  status-> Print();
263 
264  // aggregation
265  nev += status-> GetEventID();
266  nevtp += status-> GetNEventToBeProcessed();
267  cputime += status-> GetCPUTime();
268  }
269 
270  G4String strStatus;
271  if(gstatus) {
272  strStatus = "Run";
273  } else {
274  strStatus = "Idle";
275  }
276 
277  G4cout << "-------------------------------------------------------"
278  << G4endl
279  << "* #ranks= " << size
280  << " event= " << nev << "/" << nevtp
281  << " state= " << strStatus
282  << " time= " << cputime << "s"
283  << G4endl;
284  } else {
285  status-> Pack(buff);
286  COMM_G4COMMAND.Send(buff, G4MPIstatus::NSIZE, MPI::INT,
288  }
289 }
290 
291 // ====================================================================
293 {
294  std::vector<G4long> seedList = seedGenerator-> GetSeedList();
295  CLHEP::HepRandom::setTheSeed(seedList[rank]);
296 }
297 
298 // --------------------------------------------------------------------------
300 {
301  G4long buff;
302 
303  if(isMaster) {
304  // print master
305  G4cout << "* rank= " << rank
306  << " seed= " << CLHEP::HepRandom::getTheSeed()
307  << G4endl;
308  // receive from each slave
309  for (G4int islave = 1; islave < size; islave++) {
310  COMM_G4COMMAND.Recv(&buff, 1, MPI::LONG, islave, TAG_G4SEED);
311  G4cout << "* rank= " << islave
312  << " seed= " << buff
313  << G4endl;
314  }
315  } else { // slaves
317  COMM_G4COMMAND.Send(&buff, 1, MPI::LONG, RANK_MASTER, TAG_G4SEED);
318  }
319 }
320 
321 // --------------------------------------------------------------------------
323 {
324  if(rank == inode) {
326  }
327 }
328 
329 // ====================================================================
331 {
332  unsigned buff;
333  G4bool qstatus= false;
334 
335  if(isMaster) {
336  qstatus = threadID;
337  // get slave status
338  for (G4int islave = 1; islave < size; islave++) {
339  MPI::Request request = COMM_G4COMMAND.Irecv(&buff, 1, MPI::UNSIGNED,
340  islave, TAG_G4STATUS);
341  MPI::Status status;
342  while(! request.Test(status)) {
343  Wait(1000);
344  }
345  qstatus |= buff;
346  }
347  } else {
348  buff = unsigned(threadID);
349  COMM_G4COMMAND.Send(&buff, 1, MPI::UNSIGNED, RANK_MASTER, TAG_G4STATUS);
350  }
351 
352  // broadcast
353  buff = qstatus; // for master
354  COMM_G4COMMAND.Bcast(&buff, 1, MPI::UNSIGNED, RANK_MASTER);
355  qstatus = buff; // for slave
356 
357  return qstatus;
358 }
359 
360 // --------------------------------------------------------------------------
362 {
363  // this method is a thread function.
365  G4int rc = UI-> ApplyCommand(command);
366 
367  G4int commandStatus = rc - (rc%100);
368 
369  switch(commandStatus) {
370  case fCommandSucceeded:
371  break;
373  G4cerr << "illegal application state -- command refused" << G4endl;
374  break;
375  default:
376  G4cerr << "command refused (" << commandStatus << ")" << G4endl;
377  break;
378  }
379 
380  // thread is joined
381  if(threadID) {
382  pthread_join(threadID, 0);
383  threadID = 0;
384  }
385 
386  return;
387 }
388 
389 // --------------------------------------------------------------------------
391 {
392  G4bool threadStatus = CheckThreadStatus();
393 
394  if (threadStatus) {
395  if(isMaster) {
396  G4cout << "G4MPIsession:: beamOn is still running." << G4endl;
397  }
398  } else { // ok
399  static G4String cmdstr;
400  cmdstr = command;
401  G4int rc = pthread_create(&threadID, 0,
402  (Func_t)thread_ExecuteThreadCommand,
403  (void*)&cmdstr);
404  if (rc != 0)
405  G4Exception("G4MPImanager::ExecuteBeamOnThread()",
406  "MPI001",
408  "Failed to create a beamOn thread.");
409  }
410 }
411 
412 // --------------------------------------------------------------------------
414 {
415  if(threadID) {
416  pthread_join(threadID, 0);
417  threadID = 0;
418  }
419 }
420 
421 // ====================================================================
423 {
424  enum { BUFF_SIZE = 512 };
425  static char sbuff[BUFF_SIZE];
426  command.copy(sbuff,BUFF_SIZE);
427  G4int len = command.size();
428  sbuff[len] ='\0'; // no boundary check
429 
430  // "command" is not yet fixed in slaves at this time.
431 
432  // waiting message exhausts CPU in LAM!
433  //COMM_G4COMMAND.Bcast(sbuff, ssize, MPI::CHAR, RANK_MASTER);
434 
435  // another implementation
436  if( isMaster ) {
437  for (G4int islave = 1; islave < size; islave++) {
438  COMM_G4COMMAND.Send(sbuff, BUFF_SIZE, MPI::CHAR, islave, TAG_G4COMMAND);
439  }
440  } else {
441  // try non-blocking receive
442  MPI::Request request= COMM_G4COMMAND.Irecv(sbuff, BUFF_SIZE, MPI::CHAR,
444  // polling...
445  MPI::Status status;
446  while(! request.Test(status)) {
447  Wait(1000);
448  }
449  }
450 
451  return G4String(sbuff);
452 }
453 
454 // ====================================================================
456 {
457  G4bool currentmode = qbatchmode;
458  qbatchmode = true;
459  G4MPIbatch* batchSession = new G4MPIbatch(fname, qbatch);
460  batchSession-> SessionStart();
461  delete batchSession;
462  qbatchmode = currentmode;
463 }
464 
465 // --------------------------------------------------------------------------
467 {
469 
470  if(qdivide) { // events are divided
471  G4double ntot = masterWeight+size-1.;
472  G4int nproc = G4int(nevent/ntot);
473  G4int nproc0 = nevent-nproc*(size-1);
474 
475  if(verbose>0 && isMaster) {
476  G4cout << "#events in master=" << nproc0 << " / "
477  << "#events in slave=" << nproc << G4endl;
478  }
479 
480  status-> StartTimer(); // start timer
481  if(isMaster) runManager-> BeamOn(nproc0);
482  else runManager-> BeamOn(nproc);
483  status-> StopTimer(); // stop timer
484 
485  } else { // same events are generated in each node (for test use)
486  if(verbose>0 && isMaster) {
487  G4cout << "#events in master=" << nevent << " / "
488  << "#events in slave=" << nevent << G4endl;
489  }
490  status-> StartTimer(); // start timer
491  runManager-> BeamOn(nevent);
492  status-> StopTimer(); // stop timer
493  }
494 }
495 
496 // --------------------------------------------------------------------------
498 {
499  G4int buff = 0;
500  if (qbatchmode) { // valid only in batch mode
501  if(isMaster) {
502  // receive from each slave
503  for (G4int islave = 1; islave < size; islave++) {
504  MPI::Request request = COMM_G4COMMAND.Irecv(&buff, 1, MPI::INT,
505  islave, TAG_G4STATUS);
506  MPI::Status status;
507  while(! request.Test(status)) {
508  Wait(1000);
509  }
510  }
511  } else {
512  buff = 1;
513  COMM_G4COMMAND.Send(&buff, 1, MPI::INT, RANK_MASTER, TAG_G4STATUS);
514  }
515  }
516 }
517 
518 // --------------------------------------------------------------------------
519 void G4MPImanager::Print(const G4String& message)
520 {
521  if(isMaster){
522  std::cout << message << std::flush;
523  } else {
524  if(qfcout) { // output to a file
525  fscout << message << std::flush;
526  } else { // output to stdout
527  std::cout << rank << ":" << message << std::flush;
528  }
529  }
530 }
531 
532 // --------------------------------------------------------------------------
534 {
535  if(isSlave) return;
536 
537  G4cout << "Geant4 MPI interface" << G4endl;
538  G4cout << "usage:" << G4endl;
539  G4cout << "<app> [options] [macro file]"
540  << G4endl << G4endl;
541  G4cout << " -h, --help show this message."
542  << G4endl;
543  G4cout << " -v, --verbose show verbose message"
544  << G4endl;
545  G4cout << " -i, --init=FNAME set an init macro file"
546  << G4endl;
547  G4cout << " -o, --ofile[=FNAME] set slave output to a flie"
548  << G4endl;
549  G4cout << G4endl;
550 }