PageRenderTime 40ms CodeModel.GetById 11ms app.highlight 25ms RepoModel.GetById 1ms app.codeStats 0ms

/Src/Dependencies/Boost/boost/graph/distributed/adjlist/redistribute.hpp

http://hadesmem.googlecode.com/
C++ Header | 393 lines | 257 code | 65 blank | 71 comment | 48 complexity | 37df71f47d8f7e6ce8ea139f7bd195fd MD5 | raw file
  1// Copyright (C) 2005-2006 The Trustees of Indiana University.
  2
  3// Use, modification and distribution is subject to the Boost Software
  4// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
  5// http://www.boost.org/LICENSE_1_0.txt)
  6
  7//  Authors: Douglas Gregor
  8//           Andrew Lumsdaine
  9
 10//
 11// Implements redistribution of vertices for a distributed adjacency
 12// list. This file should not be included by users. It will be
 13// included by the distributed adjacency list header.
 14//
 15
 16#ifndef BOOST_GRAPH_USE_MPI
 17#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
 18#endif
 19
 20#include <boost/pending/container_traits.hpp>
 21
 22namespace boost { namespace detail { namespace parallel {
 23
 24/* This structure contains a (vertex or edge) descriptor that is being
 25   moved from one processor to another. It contains the properties for
 26   that descriptor (if any).
 27 */
 28template<typename Descriptor, typename DescriptorProperty>
 29struct redistributed_descriptor : maybe_store_property<DescriptorProperty>
 30{
 31  typedef maybe_store_property<DescriptorProperty> inherited;
 32
 33  redistributed_descriptor() { }
 34
 35  redistributed_descriptor(const Descriptor& v, const DescriptorProperty& p)
 36    : inherited(p), descriptor(v) { }
 37
 38  Descriptor descriptor;
 39
 40private:
 41  friend class boost::serialization::access;
 42
 43  template<typename Archiver>
 44  void serialize(Archiver& ar, unsigned int /*version*/)
 45  {
 46    ar & boost::serialization::base_object<inherited>(*this) 
 47       & unsafe_serialize(descriptor);
 48  }
 49};
 50
 51/* Predicate that returns true if the target has migrated. */
 52template<typename VertexProcessorMap, typename Graph>
 53struct target_migrated_t
 54{
 55  typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
 56  typedef typename graph_traits<Graph>::edge_descriptor Edge;
 57
 58  target_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
 59    : vertex_to_processor(vertex_to_processor), g(g) { }
 60
 61  bool operator()(Edge e) const
 62  {
 63    typedef global_descriptor<Vertex> DVertex;
 64    processor_id_type owner = get(edge_target_processor_id, g, e);
 65    return get(vertex_to_processor, DVertex(owner, target(e, g))) != owner;
 66  }
 67
 68private:
 69  VertexProcessorMap vertex_to_processor;
 70  const Graph& g;
 71};
 72
 73template<typename VertexProcessorMap, typename Graph>
 74inline target_migrated_t<VertexProcessorMap, Graph>
 75target_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
 76{ return target_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
 77
 78/* Predicate that returns true if the source of an in-edge has migrated. */
 79template<typename VertexProcessorMap, typename Graph>
 80struct source_migrated_t
 81{
 82  typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
 83  typedef typename graph_traits<Graph>::edge_descriptor Edge;
 84
 85  source_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
 86    : vertex_to_processor(vertex_to_processor), g(g) { }
 87
 88  bool operator()(stored_in_edge<Edge> e) const
 89  {
 90    return get(vertex_to_processor, DVertex(e.source_processor, source(e.e, g)))
 91      != e.source_processor;
 92  }
 93
 94private:
 95  VertexProcessorMap vertex_to_processor;
 96  const Graph& g;
 97};
 98
 99template<typename VertexProcessorMap, typename Graph>
100inline source_migrated_t<VertexProcessorMap, Graph>
101source_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
102{ return source_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
103
104/* Predicate that returns true if the target has migrated. */
105template<typename VertexProcessorMap, typename Graph>
106struct source_or_target_migrated_t
107{
108  typedef typename graph_traits<Graph>::edge_descriptor Edge;
109
110  source_or_target_migrated_t(VertexProcessorMap vertex_to_processor,
111                              const Graph& g)
112    : vertex_to_processor(vertex_to_processor), g(g) { }
113
114  bool operator()(Edge e) const
115  {
116    return get(vertex_to_processor, source(e, g)) != source(e, g).owner
117      || get(vertex_to_processor, target(e, g)) != target(e, g).owner;
118  }
119
120private:
121  VertexProcessorMap vertex_to_processor;
122  const Graph& g;
123};
124
125template<typename VertexProcessorMap, typename Graph>
126inline source_or_target_migrated_t<VertexProcessorMap, Graph>
127source_or_target_migrated(VertexProcessorMap vertex_to_processor,
128const Graph& g)
129{
130  typedef source_or_target_migrated_t<VertexProcessorMap, Graph> result_type;
131  return result_type(vertex_to_processor, g);
132}
133
134} } // end of namespace detail::parallel
135
136template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
137template<typename VertexProcessorMap>
138void
139PBGL_DISTRIB_ADJLIST_TYPE
140::request_in_neighbors(vertex_descriptor v,
141                       VertexProcessorMap vertex_to_processor,
142                       bidirectionalS)
143{
144  BGL_FORALL_INEDGES_T(v, e, *this, graph_type)
145    request(vertex_to_processor, source(e, *this));
146}
147
148template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
149template<typename VertexProcessorMap>
150void
151PBGL_DISTRIB_ADJLIST_TYPE
152::remove_migrated_in_edges(vertex_descriptor v,
153                           VertexProcessorMap vertex_to_processor,
154                           bidirectionalS)
155{
156  graph_detail::erase_if(get(vertex_in_edges, base())[v.local],
157                         source_migrated(vertex_to_processor, base()));
158}
159
160template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
161template<typename VertexProcessorMap>
162void
163PBGL_DISTRIB_ADJLIST_TYPE
164::redistribute(VertexProcessorMap vertex_to_processor)
165{
166  using boost::parallel::inplace_all_to_all;
167
168  // When we have stable descriptors, we only move those descriptors
169  // that actually need to be moved. Otherwise, we essentially have to
170  // regenerate the entire graph.
171  const bool has_stable_descriptors =
172    is_same<typename config_type::vertex_list_selector, listS>::value
173    || is_same<typename config_type::vertex_list_selector, setS>::value
174    || is_same<typename config_type::vertex_list_selector, multisetS>::value;
175
176  typedef detail::parallel::redistributed_descriptor<vertex_descriptor, 
177                                                     vertex_property_type>
178    redistributed_vertex;
179  typedef detail::parallel::redistributed_descriptor<edge_descriptor, 
180                                                     edge_property_type>
181    redistributed_edge;
182  typedef std::pair<vertices_size_type, edges_size_type> num_relocated_pair;
183
184  vertex_iterator vi, vi_end;
185  edge_iterator ei, ei_end;
186
187  process_group_type pg = process_group();
188
189  // Initial synchronization makes sure that we have all of our ducks
190  // in a row. We don't want any outstanding add/remove messages
191  // coming in mid-redistribution!
192  synchronize(process_group_);
193
194  // We cannot cope with eviction of ghost cells
195  vertex_to_processor.set_max_ghost_cells(0);
196
197  process_id_type p = num_processes(pg);
198
199  // Send vertices and edges to the processor where they will
200  // actually reside.  This requires O(|V| + |E|) communication
201  std::vector<std::vector<redistributed_vertex> > redistributed_vertices(p);
202  std::vector<std::vector<redistributed_edge> > redistributed_edges(p);
203
204  // Build the sets of relocated vertices for each process and then do
205  // an all-to-all transfer.
206  for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; ++vi) {
207    if (!has_stable_descriptors
208        || get(vertex_to_processor, *vi) != vi->owner) {
209      redistributed_vertices[get(vertex_to_processor, *vi)]
210        .push_back(redistributed_vertex(*vi, get(vertex_all_t(), base(),
211                                                 vi->local)));
212    }
213
214    // When our descriptors are stable, we need to determine which
215    // adjacent descriptors are stable to determine which edges will
216    // be removed.
217    if (has_stable_descriptors) {
218      BGL_FORALL_OUTEDGES_T(*vi, e, *this, graph_type)
219        request(vertex_to_processor, target(e, *this));
220      request_in_neighbors(*vi, vertex_to_processor, directed_selector());
221    }
222  }
223
224  inplace_all_to_all(pg, redistributed_vertices);
225
226  // If we have stable descriptors, we need to know where our neighbor
227  // vertices are moving.
228  if (has_stable_descriptors)
229    synchronize(vertex_to_processor);
230
231  // Build the sets of relocated edges for each process and then do
232  // an all-to-all transfer.
233  for (boost::tie(ei, ei_end) = edges(*this); ei != ei_end; ++ei) {
234    vertex_descriptor src = source(*ei, *this);
235    vertex_descriptor tgt = target(*ei, *this);
236    if (!has_stable_descriptors
237        || get(vertex_to_processor, src) != src.owner
238        || get(vertex_to_processor, tgt) != tgt.owner)
239      redistributed_edges[get(vertex_to_processor, source(*ei, *this))]
240        .push_back(redistributed_edge(*ei, get(edge_all_t(), base(),
241                                               ei->local)));
242  }
243  inplace_all_to_all(pg, redistributed_edges);
244
245  // A mapping from old vertex descriptors to new vertex
246  // descriptors. This is an STL map partly because I'm too lazy to
247  // build a real property map (which is hard in the general case) but
248  // also because it won't try to look in the graph itself, because
249  // the keys are all vertex descriptors that have been invalidated.
250  std::map<vertex_descriptor, vertex_descriptor> old_to_new_vertex_map;
251
252  if (has_stable_descriptors) {
253    // Clear out all vertices and edges that will have moved. There
254    // are several stages to this.
255
256    // First, eliminate all outgoing edges from the (local) vertices
257    // that have been moved or whose targets have been moved.
258    BGL_FORALL_VERTICES_T(v, *this, graph_type) {
259      if (get(vertex_to_processor, v) != v.owner) {
260        clear_out_edges(v.local, base());
261        clear_in_edges_local(v, directed_selector());
262      } else {
263        remove_out_edge_if(v.local,
264                           target_migrated(vertex_to_processor, base()),
265                           base());
266        remove_migrated_in_edges(v, vertex_to_processor, directed_selector());
267      }
268    }
269
270    // Next, eliminate locally-stored edges that have migrated (for
271    // undirected graphs).
272    graph_detail::erase_if(local_edges_,
273                           source_or_target_migrated(vertex_to_processor, *this));
274
275    // Eliminate vertices that have migrated
276    for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; /* in loop */) {
277      if (get(vertex_to_processor, *vi) != vi->owner)
278        remove_vertex((*vi++).local, base());
279      else {
280        // Add the identity relation for vertices that have not migrated
281        old_to_new_vertex_map[*vi] = *vi;
282        ++vi;
283      }
284    }
285  } else {
286    // Clear out the local graph: the entire graph is in transit
287    clear();
288  }
289
290  // Add the new vertices to the graph. When we do so, update the old
291  // -> new vertex mapping both locally and for the owner of the "old"
292  // vertex.
293  {
294    typedef std::pair<vertex_descriptor, vertex_descriptor> mapping_pair;
295    std::vector<std::vector<mapping_pair> > mappings(p);
296
297    for (process_id_type src = 0; src < p; ++src) {
298      for (typename std::vector<redistributed_vertex>::iterator vi =
299             redistributed_vertices[src].begin();
300           vi != redistributed_vertices[src].end(); ++vi) {
301        vertex_descriptor new_vertex =
302            add_vertex(vi->get_property(), *this);
303        old_to_new_vertex_map[vi->descriptor] = new_vertex;
304        mappings[vi->descriptor.owner].push_back(mapping_pair(vi->descriptor,
305                                                              new_vertex));
306      }
307
308      redistributed_vertices[src].clear();
309    }
310
311    inplace_all_to_all(pg, mappings);
312
313    // Add the mappings we were sent into the old->new map.
314    for (process_id_type src = 0; src < p; ++src)
315      old_to_new_vertex_map.insert(mappings[src].begin(), mappings[src].end());
316  }
317
318  // Get old->new vertex mappings for all of the vertices we need to
319  // know about.
320
321  // TBD: An optimization here might involve sending the
322  // request-response pairs without an explicit request step (for
323  // bidirectional and undirected graphs). However, it may not matter
324  // all that much given the cost of redistribution.
325  {
326    std::vector<std::vector<vertex_descriptor> > vertex_map_requests(p);
327    std::vector<std::vector<vertex_descriptor> > vertex_map_responses(p);
328
329    // We need to know about all of the vertices incident on edges
330    // that have been relocated to this processor. Tell each processor
331    // what each other processor needs to know.
332    for (process_id_type src = 0; src < p; ++src)
333      for (typename std::vector<redistributed_edge>::iterator ei =
334             redistributed_edges[src].begin();
335           ei != redistributed_edges[src].end(); ++ei) {
336        vertex_descriptor need_vertex = target(ei->descriptor, *this);
337        if (old_to_new_vertex_map.find(need_vertex)
338            == old_to_new_vertex_map.end())
339          {
340            old_to_new_vertex_map[need_vertex] = need_vertex;
341            vertex_map_requests[need_vertex.owner].push_back(need_vertex);
342          }
343      }
344    inplace_all_to_all(pg,
345                       vertex_map_requests,
346                       vertex_map_responses);
347
348    // Process the requests made for vertices we own. Then perform yet
349    // another all-to-all swap. This one matches the requests we've
350    // made to the responses we were given.
351    for (process_id_type src = 0; src < p; ++src)
352      for (typename std::vector<vertex_descriptor>::iterator vi =
353             vertex_map_responses[src].begin();
354           vi != vertex_map_responses[src].end(); ++vi)
355        *vi = old_to_new_vertex_map[*vi];
356    inplace_all_to_all(pg, vertex_map_responses);
357
358    // Matching the requests to the responses, update the old->new
359    // vertex map for all of the vertices we will need to know.
360    for (process_id_type src = 0; src < p; ++src) {
361      typedef typename std::vector<vertex_descriptor>::size_type size_type;
362      for (size_type i = 0; i < vertex_map_requests[src].size(); ++i) {
363        old_to_new_vertex_map[vertex_map_requests[src][i]] =
364          vertex_map_responses[src][i];
365      }
366    }
367  }
368
369  // Add edges to the graph by mapping the source and target.
370  for (process_id_type src = 0; src < p; ++src) {
371    for (typename std::vector<redistributed_edge>::iterator ei =
372           redistributed_edges[src].begin();
373         ei != redistributed_edges[src].end(); ++ei) {
374      add_edge(old_to_new_vertex_map[source(ei->descriptor, *this)],
375               old_to_new_vertex_map[target(ei->descriptor, *this)],
376               ei->get_property(),
377               *this);
378    }
379
380    redistributed_edges[src].clear();
381  }
382
383  // Be sure that edge-addition messages are received now, completing
384  // the graph.
385  synchronize(process_group_);
386
387  this->distribution().clear();
388
389  detail::parallel::maybe_initialize_vertex_indices(vertices(base()), 
390                                                    get(vertex_index, base()));
391}
392
393} // end namespace boost