1 /*---------------------------------------------------------------------------*\
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
5 \\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
7 -------------------------------------------------------------------------------
9 This file is part of OpenFOAM.
11 OpenFOAM is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 2 of the License, or (at your
14 option) any later version.
16 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with OpenFOAM; if not, write to the Free Software Foundation,
23 Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 \*---------------------------------------------------------------------------*/
30 #include "PstreamReduceOps.H"
31 #include "OSspecific.H"
38 # define MPI_SCALAR MPI_FLOAT
40 # define MPI_SCALAR MPI_DOUBLE
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
46 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
49 // valid parallel options vary between implementations, but flag common ones.
50 // if they are not removed by MPI_Init(), the subsequent argument processing
51 // will notice that they are wrong
52 void Foam::Pstream::addValidParOptions(HashTable<string>& validParOptions)
54 validParOptions.insert("np", "");
55 validParOptions.insert("p4pg", "PI file");
56 validParOptions.insert("p4wd", "directory");
57 validParOptions.insert("p4amslave", "");
58 validParOptions.insert("p4yourname", "hostname");
59 validParOptions.insert("GAMMANP", "number of instances");
60 validParOptions.insert("machinefile", "machine file");
64 bool Foam::Pstream::init(int& argc, char**& argv)
66 MPI_Init(&argc, &argv);
69 MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
70 MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_);
74 FatalErrorIn("Pstream::init(int& argc, char**& argv)")
75 << "bool Pstream::init(int& argc, char**& argv) : "
76 "attempt to run parallel on 1 processor"
77 << Foam::abort(FatalError);
80 procIDs_.setSize(numprocs);
82 forAll(procIDs_, procNo)
84 procIDs_[procNo] = procNo;
90 string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
92 if (bufferSizeName.size())
94 int bufferSize = atoi(bufferSizeName.c_str());
98 MPI_Buffer_attach(new char[bufferSize], bufferSize);
103 FatalErrorIn("Pstream::init(int& argc, char**& argv)")
104 << "Pstream::init(int& argc, char**& argv) : "
105 << "environment variable MPI_BUFFER_SIZE not defined"
106 << Foam::abort(FatalError);
110 int processorNameLen;
111 char processorName[MPI_MAX_PROCESSOR_NAME];
113 MPI_Get_processor_name(processorName, &processorNameLen);
115 //signal(SIGABRT, stop);
117 // Now that nprocs is known construct communication tables.
118 initCommunicationSchedule();
124 void Foam::Pstream::exit(int errnum)
129 MPI_Buffer_detach(&buff, &size);
140 MPI_Abort(MPI_COMM_WORLD, errnum);
145 void Foam::Pstream::abort()
147 MPI_Abort(MPI_COMM_WORLD, 1);
151 void Foam::reduce(scalar& Value, const sumOp<scalar>& bop)
153 if (!Pstream::parRun())
158 if (Pstream::nProcs() <= Pstream::nProcsSimpleSum)
162 if (Pstream::master())
166 int slave=Pstream::firstSlave();
167 slave<=Pstream::lastSlave();
180 Pstream::procID(slave),
189 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
190 ) << "MPI_Recv failed"
191 << Foam::abort(FatalError);
194 Value = bop(Value, value);
206 Pstream::procID(Pstream::masterNo()),
214 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
215 ) << "MPI_Send failed"
216 << Foam::abort(FatalError);
221 if (Pstream::master())
225 int slave=Pstream::firstSlave();
226 slave<=Pstream::lastSlave();
237 Pstream::procID(slave),
245 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
246 ) << "MPI_Send failed"
247 << Foam::abort(FatalError);
260 Pstream::procID(Pstream::masterNo()),
269 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
270 ) << "MPI_Recv failed"
271 << Foam::abort(FatalError);
278 MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
284 int myProcNo = Pstream::myProcNo();
285 int nProcs = Pstream::nProcs();
288 // receive from children
291 int thisLevelOffset = 2;
292 int childLevelOffset = thisLevelOffset/2;
297 (childLevelOffset < nProcs)
298 && (myProcNo % thisLevelOffset) == 0
301 childProcId = myProcNo + childLevelOffset;
305 if (childProcId < nProcs)
314 Pstream::procID(childProcId),
323 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
324 ) << "MPI_Recv failed"
325 << Foam::abort(FatalError);
328 Value = bop(Value, value);
332 thisLevelOffset <<= 1;
333 childLevelOffset = thisLevelOffset/2;
337 // send and receive from parent
339 if (!Pstream::master())
341 int parentId = myProcNo - (myProcNo % thisLevelOffset);
350 Pstream::procID(parentId),
358 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
359 ) << "MPI_Send failed"
360 << Foam::abort(FatalError);
370 Pstream::procID(parentId),
379 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
380 ) << "MPI_Recv failed"
381 << Foam::abort(FatalError);
387 // distribute to my children
390 thisLevelOffset >>= 1;
391 childLevelOffset = thisLevelOffset/2;
395 childProcId = myProcNo + childLevelOffset;
397 if (childProcId < nProcs)
406 Pstream::procID(childProcId),
414 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
415 ) << "MPI_Send failed"
416 << Foam::abort(FatalError);
421 thisLevelOffset >>= 1;
422 childLevelOffset = thisLevelOffset/2;
429 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
431 // ************************************************************************* //