1 /*---------------------------------------------------------------------------*\
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
5 \\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
7 -------------------------------------------------------------------------------
9 This file is part of OpenFOAM.
11 OpenFOAM is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 2 of the License, or (at your
14 option) any later version.
16 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with OpenFOAM; if not, write to the Free Software Foundation,
23 Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 \*---------------------------------------------------------------------------*/
28 #include "PstreamReduceOps.H"
36 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
41 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
43 void Pstream::addValidParOptions(HashTable<string>& validParOptions)
45 validParOptions.insert("np", "");
46 validParOptions.insert("p4pg", "PI file");
47 validParOptions.insert("p4wd", "directory");
48 validParOptions.insert("p4amslave", "");
49 validParOptions.insert("p4yourname", "hostname");
53 bool Pstream::init(int& argc, char**& argv)
55 // Set the comunications options
56 pvm_setopt(PvmRoute, PvmRouteDirect);
58 // Get the ID of this processor
59 int mytid = pvm_mytid();
63 // Get the size of the NULL group
64 procIDs_.setSize(pvm_gsize(NULL));
66 // For each processor of the NULL group get its ID
67 for (int proci=0; proci<ProcIDs.size(); proci++)
69 procIDs_[proci] = pvm_gettid(NULL, proci);
74 // Initialisation message type
77 // If this is not a slave then it must be the master.
78 // Master spawns the rest of the child processes in the same manner as MPI
79 if (string(argv[argc-1]) != "-slave")
81 // Last argument is number of processors in parallel run
82 int nProcs = atoi(argv[argc-1]);
84 // If it is less than 2 this is not a parallel run!
87 FatalErrorIn("Pstream::init(int& argc, char**& argv)")
88 << "Attempt to run parallel on < 2 processors ... stopping."
93 Info<< "Starting parallel run on " << nProcs << " processors ... "
97 // set size of ID list
98 procIDs_.setSize(nProcs);
104 // Put my ID in the list
107 // Setup arguments of children
108 typedef char* charPtr;
109 char** Argv = new charPtr[argc + 1];
111 for (int i=0; i<argc-1; i++)
113 Argv[i] = new char[strlen(argv[i+1] + 1)];
114 strcpy(Argv[i], argv[i+1]);
117 Argv[argc-1] = new char[7];
118 strcpy(Argv[argc-1], "-slave");
122 // Spawn children as copies of me
136 FatalErrorIn("Pstream::init(int& argc, char**& argv)")
137 << "Unable to spawn processes ... stopping."
138 << abort(FatalError);
142 // Broadcast task IDs to all children
143 pvm_setopt(PvmRoute, PvmRouteDirect);
144 pvm_initsend(PvmDataDefault);
145 pvm_pkint((int*)(&nProcs), 1, 1);
146 pvm_pkint(procIDs_.begin(), nProcs, 1);
147 pvm_mcast(procIDs_.begin(), nProcs, initMsgType);
150 Info<< "nProcs : " << nProcs << endl;
152 for (int proci=0; proci<procIDs_.size(); proci++)
154 cout<< hex << procIDs_[proci] << ' ';
156 cout<< dec << nl << std::endl;
160 // Receive processor data from master
161 pvm_recv(-1, initMsgType);
163 // Should have received the number of processors in the run
165 pvm_upkint(&nProcs, 1, 1);
167 // ... set size of ID list
168 procIDs_.setSize(nProcs);
170 // ... and unpack the processor IDs
171 pvm_upkint(procIDs_.begin(), nProcs, 1);
176 // Find which processor number this is
177 for (int proci=0; proci<procIDs_.size(); proci++)
179 if (procIDs_[proci] == mytid)
185 // Set the processor numbers to start from 1
186 myProcNo_ = proci + 1;
189 if (pvm_joingroup("foam") < 0)
191 FatalErrorIn("Pstream::init(int& argc, char**& argv)")
192 << "Pstream::init(int*, char **[]) : "
193 << "could not join group ... stopping."
194 << abort(FatalError);
197 pvm_barrier("foam", nProcs());
200 // Setup signal handler to catch an interupt (^C) and abort the run
201 // This doesn't work, it causes
202 // libpvm [t40003]: pvm_sendsig(): Not implemented
203 // libpvm [t40003]: pvm_kill(): Not implemented
205 //signal(SIGINT, stop);
209 Sout<< "Master started successfully." << nl << endl;
213 Sout<< "Child " << myProcNo_ << " started successfully." << nl << endl;
223 void Pstream::exit(int errnum)
225 //pvm_lvgroup("foam");
229 for (int proci=1; proci<=procIDs_.size(); proci++)
231 if (proci != myProcNo())
233 pvm_kill(procID(proci));
243 void Pstream::abort()
245 for (int proci=1; proci<=procIDs_.size(); proci++)
247 if (proci != myProcNo())
249 pvm_kill(procID(proci));
258 void reduce(scalar& Value, const sumOp<scalar>& bop)
260 if (Pstream::parRun())
279 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
280 ) << "pvm_reduce failed"
281 << abort(FatalError);
285 if (Pstream::master())
289 int slave=Pstream::firstSlave();
290 slave<=Pstream::lastSlave();
295 int atid, atag, alen;
301 Pstream::procID(slave),
312 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
313 ) << "pvm_precv failed"
314 << abort(FatalError);
317 Value = bop(Value, value);
326 Pstream::procID(Pstream::masterNo()),
336 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
337 ) << "pvm_psend failed"
338 << abort(FatalError);
343 if (Pstream::master())
345 pvm_initsend(PvmDataDefault);
346 pvm_pkdouble(&Value, 1, 1);
352 (int*)Pstream::procIDs().begin(),
360 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
361 ) << "pvm_mcast failed"
362 << abort(FatalError);
371 Pstream::procID(Pstream::masterNo()),
378 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
379 ) << "pvm_psend failed"
380 << abort(FatalError);
383 pvm_upkdouble(&Value, 1, 1);
389 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
391 } // End namespace Foam
393 // ************************************************************************* //