Merge branch 'upstream/OpenFOAM' into master
[freefoam.git] / src / Pstream / mpi / mpiOPstreamImpl.C
blob2392f98e883281fb0f875b6aa98ed5754f815a62
1 /*---------------------------------------------------------------------------*\
2   =========                 |
3   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
4    \\    /   O peration     |
5     \\  /    A nd           | Copyright (C) 1991-2009 OpenCFD Ltd.
6      \\/     M anipulation  |
7 -------------------------------------------------------------------------------
8 License
9     This file is part of OpenFOAM.
11     OpenFOAM is free software; you can redistribute it and/or modify it
12     under the terms of the GNU General Public License as published by the
13     Free Software Foundation; either version 2 of the License, or (at your
14     option) any later version.
16     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19     for more details.
21     You should have received a copy of the GNU General Public License
22     along with OpenFOAM; if not, write to the Free Software Foundation,
23     Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 Description
26     Write primitive and binary block from mpiOPstreamImpl
28 \*---------------------------------------------------------------------------*/
30 #include "mpi.h"
32 #include "mpiOPstreamImpl.H"
33 #include <OpenFOAM/DynamicList.H>
34 #include <OpenFOAM/Pstream.H>
35 #include <OpenFOAM/addToRunTimeSelectionTable.H>
37 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
39 namespace Foam
42 defineTypeNameAndDebug(mpiOPstreamImpl, 0);
43 addToRunTimeSelectionTable(OPstreamImpl, mpiOPstreamImpl, dictionary);
47 // Outstanding non-blocking operations.
48 //! @cond fileScope
49 Foam::DynamicList<MPI_Request> OPstream_outstandingRequests_;
50 //! @endcond fileScope
52 // * * * * * * * * * * * * * * * * Destructor  * * * * * * * * * * * * * * * //
54 void Foam::mpiOPstreamImpl::flush
56     const PstreamImpl::commsTypes commsType,
57     const int toProcNo,
58     const char* buf,
59     const int bufPosition
62     if
63     (
64        !write
65         (
66             commsType,
67             toProcNo,
68             buf,
69             bufPosition
70         )
71     )
72     {
73         FatalErrorIn("mpiOPstreamImpl::flush(const PstreamImpl::commsTypes, "
74                      "const int, const char*, const int)")
75             << "MPI_Bsend cannot send outgoing message"
76             << Foam::abort(FatalError);
77     }
80 // * * * * * * * * * * * * * * * Member Functions  * * * * * * * * * * * * * //
82 bool Foam::mpiOPstreamImpl::write
84     const PstreamImpl::commsTypes commsType,
85     const int toProcNo,
86     const char* buf,
87     const std::streamsize bufSize
90     bool transferFailed = true;
92     if (commsType == PstreamImpl::blocking)
93     {
94         transferFailed = MPI_Bsend
95         (
96             const_cast<char*>(buf),
97             bufSize,
98             MPI_PACKED,
99             Pstream::procID(toProcNo),
100             Pstream::msgType(),
101             MPI_COMM_WORLD
102         );
103     }
104     else if (commsType == PstreamImpl::scheduled)
105     {
106         transferFailed = MPI_Send
107         (
108             const_cast<char*>(buf),
109             bufSize,
110             MPI_PACKED,
111             Pstream::procID(toProcNo),
112             Pstream::msgType(),
113             MPI_COMM_WORLD
114         );
115     }
116     else if (commsType == PstreamImpl::nonBlocking)
117     {
118         MPI_Request request;
120         transferFailed = MPI_Isend
121         (
122             const_cast<char*>(buf),
123             bufSize,
124             MPI_PACKED,
125             Pstream::procID(toProcNo),
126             Pstream::msgType(),
127             MPI_COMM_WORLD,
128             &request
129         );
131         OPstream_outstandingRequests_.append(request);
132     }
133     else
134     {
135         FatalErrorIn
136         (
137             "mpiOPstreamImpl::write"
138             "(const int fromProcNo, char* buf, std::streamsize bufSize)"
139         )   << "Unsupported communications type " << commsType
140             << Foam::abort(FatalError);
141     }
143     return !transferFailed;
147 void Foam::mpiOPstreamImpl::waitRequests()
149     if (OPstream_outstandingRequests_.size())
150     {
151         if
152         (
153             MPI_Waitall
154             (
155                 OPstream_outstandingRequests_.size(),
156                 OPstream_outstandingRequests_.begin(),
157                 MPI_STATUSES_IGNORE
158             )
159         )
160         {
161             FatalErrorIn
162             (
163                 "mpiOPstreamImpl::waitRequests()"
164             )   << "MPI_Waitall returned with error" << Foam::endl;
165         }
167         OPstream_outstandingRequests_.clear();
168     }
172 bool Foam::mpiOPstreamImpl::finishedRequest(const label i)
174     if (i >= OPstream_outstandingRequests_.size())
175     {
176         FatalErrorIn
177         (
178             "mpiOPstreamImpl::finishedRequest(const label)"
179         )   << "There are " << OPstream_outstandingRequests_.size()
180             << " outstanding send requests and you are asking for i=" << i
181             << nl
182             << "Maybe you are mixing blocking/non-blocking comms?"
183             << Foam::abort(FatalError);
184     }
186     int flag;
187     MPI_Test(&OPstream_outstandingRequests_[i], &flag, MPI_STATUS_IGNORE);
189     return flag != 0;
193 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
195 // ************************ vim: set sw=4 sts=4 et: ************************ //