initial commit for version 1.6.x patch release
[OpenFOAM-1.6.x.git] / src / OpenFOAM / meshes / ProcessorTopology / ProcessorTopology.C
blob191cb37ebde7f82f000be51494fe8746c15cd42a
1 /*---------------------------------------------------------------------------*\
2   =========                 |
3   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
4    \\    /   O peration     |
5     \\  /    A nd           | Copyright (C) 1991-2009 OpenCFD Ltd.
6      \\/     M anipulation  |
7 -------------------------------------------------------------------------------
8 License
9     This file is part of OpenFOAM.
11     OpenFOAM is free software; you can redistribute it and/or modify it
12     under the terms of the GNU General Public License as published by the
13     Free Software Foundation; either version 2 of the License, or (at your
14     option) any later version.
16     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19     for more details.
21     You should have received a copy of the GNU General Public License
22     along with OpenFOAM; if not, write to the Free Software Foundation,
23     Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 \*---------------------------------------------------------------------------*/
27 #include "ProcessorTopology.H"
28 #include "ListOps.H"
29 #include "Pstream.H"
30 #include "commSchedule.H"
32 // * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
34 template<class Patch, class ProcPatch>
35 Foam::labelList Foam::ProcessorTopology<Patch, ProcPatch>::procNeighbours
37     const PtrList<Patch>& patches
40     // Determine number of processor neighbours and max neighbour id.
42     label nNeighbours = 0;
44     label maxNb = 0;
46     forAll(patches, patchi)
47     {
48         const Patch& patch = patches[patchi];
50         if (isType<ProcPatch>(patch))
51         {
52             const ProcPatch& procPatch = 
53                 refCast<const ProcPatch>(patch);
55             nNeighbours++;
57             maxNb = max(maxNb, procPatch.neighbProcNo());
58         }
59     }
61     labelList neighbours(nNeighbours);
63     procPatchMap_.setSize(maxNb + 1);
64     procPatchMap_ = -1;
66     nNeighbours = 0;
68     forAll(patches, patchi)
69     {
70         const Patch& patch = patches[patchi];
72         if (isType<ProcPatch>(patch))
73         {
74             const ProcPatch& procPatch = 
75                 refCast<const ProcPatch>(patch);
77             neighbours[nNeighbours++] = procPatch.neighbProcNo();
79             // Construct reverse map
80             procPatchMap_[procPatch.neighbProcNo()] = patchi;
81         }
82     }
84     return neighbours;
88 // * * * * * * * * * * * * * * * * Constructors  * * * * * * * * * * * * * * //
90 // Construct from components
91 template<class Patch, class ProcPatch>
92 Foam::ProcessorTopology<Patch, ProcPatch>::ProcessorTopology
94     const PtrList<Patch>& patches
97     labelListList(Pstream::nProcs()),
98     patchSchedule_(2*patches.size())
100     if (Pstream::parRun())
101     {
102         // Fill my 'slot' with my neighbours
103         operator[](Pstream::myProcNo()) = procNeighbours(patches);
105         // Distribute to all processors
106         Pstream::gatherList(*this);
107         Pstream::scatterList(*this);
108     }
110     if (Pstream::parRun() && Pstream::defaultCommsType == Pstream::scheduled)
111     {
112         label patchEvali = 0;
114         // 1. All non-processor patches
115         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
117         forAll(patches, patchi)
118         {
119             if (!isType<ProcPatch>(patches[patchi]))
120             {
121                 patchSchedule_[patchEvali].patch = patchi;
122                 patchSchedule_[patchEvali++].init = true;
123                 patchSchedule_[patchEvali].patch = patchi;
124                 patchSchedule_[patchEvali++].init = false;
125             }
126         }
128         // 2. All processor patches
129         // ~~~~~~~~~~~~~~~~~~~~~~~~
131         // Determine the schedule for all. Insert processor pair once
132         // to determine the schedule. Each processor pair stands for both
133         // send and receive.
134         label nComms = 0;
135         forAll(*this, procI)
136         {
137             nComms += operator[](procI).size();
138         }
139         DynamicList<labelPair> comms(nComms);
141         forAll(*this, procI)
142         {
143             const labelList& nbrs = operator[](procI);
145             forAll(nbrs, i)
146             {
147                 if (procI < nbrs[i])
148                 {
149                     comms.append(labelPair(procI, nbrs[i]));
150                 }
151             }
152         }
153         comms.shrink();
155         // Determine a schedule.
156         labelList mySchedule
157         (
158             commSchedule
159             (
160                 Pstream::nProcs(),
161                 comms
162             ).procSchedule()[Pstream::myProcNo()]
163         );
165         forAll(mySchedule, iter)
166         {
167             label commI = mySchedule[iter];
169             // Get the other processor
170             label nb = comms[commI][0];
171             if (nb == Pstream::myProcNo())
172             {
173                 nb = comms[commI][1];
174             }
175             label patchi = procPatchMap_[nb];
177             if (Pstream::myProcNo() > nb)
178             {
179                 patchSchedule_[patchEvali].patch = patchi;
180                 patchSchedule_[patchEvali++].init = true;
181                 patchSchedule_[patchEvali].patch = patchi;
182                 patchSchedule_[patchEvali++].init = false;
183             }
184             else
185             {
186                 patchSchedule_[patchEvali].patch = patchi;
187                 patchSchedule_[patchEvali++].init = false;
188                 patchSchedule_[patchEvali].patch = patchi;
189                 patchSchedule_[patchEvali++].init = true;
190             }
191         }
192     }
193     else
194     {
195         label patchEvali = 0;
197         // 1. All non-processor patches
198         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
200         // Have evaluate directly after initEvaluate. Could have them separated
201         // as long as they're not intermingled with processor patches since
202         // then e.g. any reduce parallel traffic would interfere with the
203         // processor swaps.
205         forAll(patches, patchi)
206         {
207             if (!isType<ProcPatch>(patches[patchi]))
208             {
209                 patchSchedule_[patchEvali].patch = patchi;
210                 patchSchedule_[patchEvali++].init = true;
211                 patchSchedule_[patchEvali].patch = patchi;
212                 patchSchedule_[patchEvali++].init = false;
213             }
214         }
216         // 2. All processor patches
217         // ~~~~~~~~~~~~~~~~~~~~~~~~
219         // 2a. initEvaluate
220         forAll(patches, patchi)
221         {
222             if (isType<ProcPatch>(patches[patchi]))
223             {
224                 patchSchedule_[patchEvali].patch = patchi;
225                 patchSchedule_[patchEvali++].init = true;
226             }
227         }
229         // 2b. evaluate
230         forAll(patches, patchi)
231         {
232             if (isType<ProcPatch>(patches[patchi]))
233             {
234                 patchSchedule_[patchEvali].patch = patchi;
235                 patchSchedule_[patchEvali++].init = false;
236             }
237         }
238     }
242 // ************************************************************************* //