initial commit for version 1.5.x patch release
[OpenFOAM-1.5.x.git] / src / Pstream / mpi / Pstream.C
bloba6361ab102077eb21922a224d99dbc4ed86b7ba2
1 /*---------------------------------------------------------------------------*\
2   =========                 |
3   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
4    \\    /   O peration     |
5     \\  /    A nd           | Copyright (C) 1991-2008 OpenCFD Ltd.
6      \\/     M anipulation  |
7 -------------------------------------------------------------------------------
8 License
9     This file is part of OpenFOAM.
11     OpenFOAM is free software; you can redistribute it and/or modify it
12     under the terms of the GNU General Public License as published by the
13     Free Software Foundation; either version 2 of the License, or (at your
14     option) any later version.
16     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19     for more details.
21     You should have received a copy of the GNU General Public License
22     along with OpenFOAM; if not, write to the Free Software Foundation,
23     Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 \*---------------------------------------------------------------------------*/
27 #include "mpi.h"
29 #include "Pstream.H"
30 #include "PstreamReduceOps.H"
31 #include "OSspecific.H"
33 #include <cstring>
34 #include <cstdlib>
35 #include <csignal>
37 #if defined(SP)
38 #   define MPI_SCALAR MPI_FLOAT
39 #elif defined(DP)
40 #   define MPI_SCALAR MPI_DOUBLE
41 #endif
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
46 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
48 // NOTE:
49 // valid parallel options vary between implementations, but flag common ones.
50 // if they are not removed by MPI_Init(), the subsequent argument processing
51 // will notice that they are wrong
52 void Foam::Pstream::addValidParOptions(HashTable<string>& validParOptions)
54     validParOptions.insert("np", "");
55     validParOptions.insert("p4pg", "PI file");
56     validParOptions.insert("p4wd", "directory");
57     validParOptions.insert("p4amslave", "");
58     validParOptions.insert("p4yourname", "hostname");
59     validParOptions.insert("GAMMANP", "number of instances");
60     validParOptions.insert("machinefile", "machine file");
64 bool Foam::Pstream::init(int& argc, char**& argv)
66     MPI_Init(&argc, &argv);
68     int numprocs;
69     MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
70     MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_);
72     if (numprocs <= 1)
73     {
74         FatalErrorIn("Pstream::init(int& argc, char**& argv)")
75             << "bool Pstream::init(int& argc, char**& argv) : "
76                "attempt to run parallel on 1 processor"
77             << Foam::abort(FatalError);
78     }
80     procIDs_.setSize(numprocs);
82     forAll(procIDs_, procNo)
83     {
84         procIDs_[procNo] = procNo;
85     }
87     setParRun();
89 #   ifndef SGIMPI
90     string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
92     if (bufferSizeName.size())
93     {
94         int bufferSize = atoi(bufferSizeName.c_str());
96         if (bufferSize)
97         {
98             MPI_Buffer_attach(new char[bufferSize], bufferSize);
99         }
100     }
101     else
102     {
103         FatalErrorIn("Pstream::init(int& argc, char**& argv)")
104             << "Pstream::init(int& argc, char**& argv) : "
105             << "environment variable MPI_BUFFER_SIZE not defined"
106             << Foam::abort(FatalError);
107     }
108 #   endif
110     int processorNameLen;
111     char processorName[MPI_MAX_PROCESSOR_NAME];
113     MPI_Get_processor_name(processorName, &processorNameLen);
115     //signal(SIGABRT, stop);
117     // Now that nprocs is known construct communication tables.
118     initCommunicationSchedule();
120     return true;
124 void Foam::Pstream::exit(int errnum)
126 #   ifndef SGIMPI
127     int size;
128     char* buff;
129     MPI_Buffer_detach(&buff, &size);
130     delete[] buff;
131 #   endif
133     if (errnum == 0)
134     {
135         MPI_Finalize();
136         ::exit(errnum);
137     }
138     else
139     {
140         MPI_Abort(MPI_COMM_WORLD, errnum);
141     }
145 void Foam::Pstream::abort()
147     MPI_Abort(MPI_COMM_WORLD, 1);
151 void Foam::reduce(scalar& Value, const sumOp<scalar>& bop)
153     if (!Pstream::parRun())
154     {
155         return;
156     }
158     if (Pstream::nProcs() <= Pstream::nProcsSimpleSum)
159     {
160         MPI_Status status;
162         if (Pstream::master())
163         {
164             for
165             (
166                 int slave=Pstream::firstSlave();
167                 slave<=Pstream::lastSlave();
168                 slave++
169             )
170             {
171                 scalar value;
173                 if
174                 (
175                     MPI_Recv
176                     (
177                         &value,
178                         1,
179                         MPI_SCALAR,
180                         Pstream::procID(slave),
181                         Pstream::msgType(),
182                         MPI_COMM_WORLD,
183                         &status
184                     )
185                 )
186                 {
187                     FatalErrorIn
188                     (
189                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
190                     )   << "MPI_Recv failed"
191                         << Foam::abort(FatalError);
192                 }
194                 Value = bop(Value, value);
195             }
196         }
197         else
198         {
199             if
200             (
201                 MPI_Send
202                 (
203                     &Value,
204                     1,
205                     MPI_SCALAR,
206                     Pstream::procID(Pstream::masterNo()),
207                     Pstream::msgType(),
208                     MPI_COMM_WORLD
209                 )
210             )
211             {
212                 FatalErrorIn
213                 (
214                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
215                 )   << "MPI_Send failed"
216                     << Foam::abort(FatalError);
217             }
218         }
221         if (Pstream::master())
222         {
223             for
224             (
225                 int slave=Pstream::firstSlave();
226                 slave<=Pstream::lastSlave();
227                 slave++
228             )
229             {
230                 if
231                 (
232                     MPI_Send
233                     (
234                         &Value,
235                         1,
236                         MPI_SCALAR,
237                         Pstream::procID(slave),
238                         Pstream::msgType(),
239                         MPI_COMM_WORLD
240                     )
241                 )
242                 {
243                     FatalErrorIn
244                     (
245                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
246                     )   << "MPI_Send failed"
247                         << Foam::abort(FatalError);
248                 }
249             }
250         }
251         else
252         {
253             if
254             (
255                 MPI_Recv
256                 (
257                     &Value,
258                     1,
259                     MPI_SCALAR,
260                     Pstream::procID(Pstream::masterNo()),
261                     Pstream::msgType(),
262                     MPI_COMM_WORLD,
263                     &status
264                 )
265             )
266             {
267                 FatalErrorIn
268                 (
269                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
270                 )   << "MPI_Recv failed"
271                     << Foam::abort(FatalError);
272             }
273         }
274     }
275     else
276     {
277         scalar sum;
278         MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
279         Value = sum;
281         /*
282         MPI_Status status;
284         int myProcNo = Pstream::myProcNo();
285         int nProcs = Pstream::nProcs();
287         //
288         // receive from children
289         //
290         int level = 1;
291         int thisLevelOffset = 2;
292         int childLevelOffset = thisLevelOffset/2;
293         int childProcId = 0;
295         while
296         (
297             (childLevelOffset < nProcs)
298          && (myProcNo % thisLevelOffset) == 0
299         )
300         {
301             childProcId = myProcNo + childLevelOffset;
303             scalar value;
305             if (childProcId < nProcs)
306             {
307                 if
308                 (
309                     MPI_Recv
310                     (
311                         &value,
312                         1,
313                         MPI_SCALAR,
314                         Pstream::procID(childProcId),
315                         Pstream::msgType(),
316                         MPI_COMM_WORLD,
317                         &status
318                     )
319                 )
320                 {
321                     FatalErrorIn
322                     (
323                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
324                     )   << "MPI_Recv failed"
325                         << Foam::abort(FatalError);
326                 }
328                     Value = bop(Value, value);
329                 }
331             level++;
332             thisLevelOffset <<= 1;
333             childLevelOffset = thisLevelOffset/2;
334         }
336         //
337         // send and receive from parent
338         //
339         if (!Pstream::master())
340         {
341             int parentId = myProcNo - (myProcNo % thisLevelOffset);
343             if
344             (
345                 MPI_Send
346                 (
347                     &Value,
348                     1,
349                     MPI_SCALAR,
350                     Pstream::procID(parentId),
351                     Pstream::msgType(),
352                     MPI_COMM_WORLD
353                 )
354             )
355             {
356                 FatalErrorIn
357                 (
358                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
359                 )   << "MPI_Send failed"
360                     << Foam::abort(FatalError);
361             }
363             if
364             (
365                 MPI_Recv
366                 (
367                     &Value,
368                     1,
369                     MPI_SCALAR,
370                     Pstream::procID(parentId),
371                     Pstream::msgType(),
372                     MPI_COMM_WORLD,
373                     &status
374                 )
375             )
376             {
377                 FatalErrorIn
378                 (
379                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
380                 )   << "MPI_Recv failed"
381                     << Foam::abort(FatalError);
382             }
383         }
386         //
387         // distribute to my children
388         //
389         level--;
390         thisLevelOffset >>= 1;
391         childLevelOffset = thisLevelOffset/2;
393         while (level > 0)
394         {
395             childProcId = myProcNo + childLevelOffset;
397             if (childProcId < nProcs)
398             {
399                 if
400                 (
401                     MPI_Send
402                     (
403                         &Value,
404                         1,
405                         MPI_SCALAR,
406                         Pstream::procID(childProcId),
407                         Pstream::msgType(),
408                         MPI_COMM_WORLD
409                     )
410                 )
411                 {
412                     FatalErrorIn
413                     (
414                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
415                     )   << "MPI_Send failed"
416                         << Foam::abort(FatalError);
417                 }
418             }
420             level--;
421             thisLevelOffset >>= 1;
422             childLevelOffset = thisLevelOffset/2;
423         }
424         */
425     }
429 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
431 // ************************************************************************* //