target-supports.exp (get_compiler_messages): Replace with...
[official-gcc.git] / libstdc++-v3 / include / parallel / tree.h
blobeae33c0cba25a5007d8fac4e0df60d2f1f2b5787
1 // -*- C++ -*-
3 // Copyright (C) 2007 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
9 // version.
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING. If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction. Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License. This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
29 // Public License.
31 /** @file parallel/tree.h
32 * @brief Parallel red-black tree operations.
34 * This implementation is described in
36 * Leonor Frias, Johannes Singler.
37 * Parallelization of Bulk Operations for STL Dictionaries.
38 * Workshop on Highly Parallel Processing on a Chip (HPPC) 2007.
40 * This file is a GNU parallel extension to the Standard C++ Library.
43 // Written by Leonor Frias Moya, Johannes Singler.
45 #ifndef _GLIBCXX_PARALLEL_TREE_H
46 #define _GLIBCXX_PARALLEL_TREE_H 1
48 #include <parallel/parallel.h>
49 #include <functional>
50 #include <cmath>
51 #include <algorithm>
52 #include <iterator>
53 #include <functional>
54 #include <iostream>
55 //#include <ext/malloc_allocator.h>
56 #include <bits/stl_tree.h>
58 #include <parallel/list_partition.h>
60 namespace std
62 // XXX Declaration should go to stl_tree.h.
63 void
64 _Rb_tree_rotate_left(_Rb_tree_node_base* const __x,
65 _Rb_tree_node_base*& __root);
67 void
68 _Rb_tree_rotate_right(_Rb_tree_node_base* const __x,
69 _Rb_tree_node_base*& __root);
73 namespace __gnu_parallel
75 // XXX move into parallel/type_traits.h if <type_traits> doesn't work.
76 /** @brief Helper class: remove the const modifier from the first
77 component, if present. Set kind component.
78 * @param T Simple type, nothing to unconst */
79 template<typename T>
80 struct unconst_first_component
82 /** @brief New type after removing the const */
83 typedef T type;
86 /** @brief Helper class: remove the const modifier from the first
87 component, if present. Map kind component
88 * @param Key First component, from which to remove the const modifier
89 * @param Load Second component
90 * @sa unconst_first_component */
91 template<typename Key, typename Load>
92 struct unconst_first_component<std::pair<const Key, Load> >
94 /** @brief New type after removing the const */
95 typedef std::pair<Key, Load> type;
98 /** @brief Helper class: set the appropriate comparator to deal with
99 * repetitions. Comparator for unique dictionaries.
101 * StrictlyLess and LessEqual are part of a mechanism to deal with
102 * repetitions transparently whatever the actual policy is.
103 * @param _Key Keys to compare
104 * @param _Compare Comparator equal to conceptual < */
105 template<typename _Key, typename _Compare>
106 struct StrictlyLess : public std::binary_function<_Key, _Key, bool>
108 /** @brief Comparator equal to conceptual < */
109 _Compare c;
111 /** @brief Constructor given a Comparator */
112 StrictlyLess(const _Compare& _c) : c(_c) { }
114 /** @brief Copy constructor */
115 StrictlyLess(const StrictlyLess<_Key, _Compare>& strictly_less)
116 : c(strictly_less.c) { }
118 /** @brief Operator() */
119 bool operator()(const _Key& k1, const _Key& k2) const
121 return c(k1, k2);
125 /** @brief Helper class: set the appropriate comparator to deal with
126 * repetitions. Comparator for non-unique dictionaries.
128 * StrictlyLess and LessEqual are part of a mechanism to deal with
129 * repetitions transparently whatever the actual policy is.
130 * @param _Key Keys to compare
131 * @param _Compare Comparator equal to conceptual <= */
132 template<typename _Key, typename _Compare>
133 struct LessEqual : public std::binary_function<_Key, _Key, bool>
135 /** @brief Comparator equal to conceptual < */
136 _Compare c;
138 /** @brief Constructor given a Comparator */
139 LessEqual(const _Compare& _c) : c(_c) { }
141 /** @brief Copy constructor */
142 LessEqual(const LessEqual<_Key, _Compare>& less_equal)
143 : c(less_equal.c) { }
145 /** @brief Operator() */
146 bool operator()(const _Key& k1, const _Key& k2) const
147 { return !c(k2, k1); }
151 /** @brief Parallel red-black tree.
153 * Extension of the sequential red-black tree. Specifically,
154 * parallel bulk insertion operations are provided.
155 * @param _Key Keys to compare
156 * @param _Val Elements to store in the tree
157 * @param _KeyOfValue Obtains the key from an element <
158 * @param _Compare Comparator equal to conceptual <
159 * @param _Alloc Allocator for the elements */
160 template<typename _Key, typename _Val, typename _KeyOfValue,
161 typename _Compare, typename _Alloc = std::allocator<_Val> >
162 class _Rb_tree
163 : public std::_Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>
165 private:
166 /** @brief Sequential tree */
167 typedef std::_Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc> base_type;
169 /** @brief Renaming of base node type */
170 typedef typename std::_Rb_tree_node<_Val> _Rb_tree_node;
172 /** @brief Renaming of libstdc++ node type */
173 typedef typename std::_Rb_tree_node_base _Rb_tree_node_base;
175 /** @brief Renaming of base key_type */
176 typedef typename base_type::key_type key_type;
178 /** @brief Renaming of base value_type */
179 typedef typename base_type::value_type value_type;
181 /** @brief Helper class to unconst the first component of
182 * value_type if exists.
184 * This helper class is needed for map, but may discard qualifiers
185 * for set; however, a set with a const element type is not useful
186 * and should fail in some other place anyway.
188 typedef typename unconst_first_component<value_type>::type nc_value_type;
190 /** @brief Pointer to a node */
191 typedef _Rb_tree_node* _Rb_tree_node_ptr;
193 /** @brief Wrapper comparator class to deal with repetitions
194 transparently according to dictionary type with key _Key and
195 comparator _Compare. Unique dictionaries object
197 StrictlyLess<_Key, _Compare> strictly_less;
199 /** @brief Wrapper comparator class to deal with repetitions
200 transparently according to dictionary type with key _Key and
201 comparator _Compare. Non-unique dictionaries object
203 LessEqual<_Key, _Compare> less_equal;
205 public:
206 /** @brief Renaming of base size_type */
207 typedef typename base_type::size_type size_type;
209 /** @brief Constructor with a given comparator and allocator.
211 * Delegates the basic initialization to the sequential class and
212 * initializes the helper comparators of the parallel class
213 * @param c Comparator object with which to initialize the class
214 * comparator and the helper comparators
215 * @param a Allocator object with which to initialize the class comparator
217 _Rb_tree(const _Compare& c, const _Alloc& a)
218 : base_type(c, a), strictly_less(base_type::_M_impl._M_key_compare),
219 less_equal(base_type::_M_impl._M_key_compare)
222 /** @brief Copy constructor.
224 * Delegates the basic initialization to the sequential class and
225 * initializes the helper comparators of the parallel class
226 * @param __x Parallel red-black instance to copy
228 _Rb_tree(const _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>& __x)
229 : base_type(__x), strictly_less(base_type::_M_impl._M_key_compare),
230 less_equal(base_type::_M_impl._M_key_compare)
233 /** @brief Parallel replacement of the sequential
234 * std::_Rb_tree::_M_insert_unique()
236 * Parallel bulk insertion and construction. If the container is
237 * empty, bulk construction is performed. Otherwise, bulk
238 * insertion is performed
239 * @param __first First element of the input
240 * @param __last Last element of the input
242 template<typename _InputIterator>
243 void
244 _M_insert_unique(_InputIterator __first, _InputIterator __last)
246 if (__first==__last) return;
247 if (_GLIBCXX_PARALLEL_CONDITION(true))
248 if (base_type::_M_impl._M_node_count == 0)
250 _M_bulk_insertion_construction(__first, __last, true,
251 strictly_less);
252 _GLIBCXX_PARALLEL_ASSERT(rb_verify());
254 else
256 _M_bulk_insertion_construction(__first, __last, false,
257 strictly_less);
258 _GLIBCXX_PARALLEL_ASSERT(rb_verify());
260 else
262 base_type::_M_insert_unique(__first, __last);
266 /** @brief Parallel replacement of the sequential
267 * std::_Rb_tree::_M_insert_equal()
269 * Parallel bulk insertion and construction. If the container is
270 * empty, bulk construction is performed. Otherwise, bulk
271 * insertion is performed
272 * @param __first First element of the input
273 * @param __last Last element of the input */
274 template<typename _InputIterator>
275 void
276 _M_insert_equal(_InputIterator __first, _InputIterator __last)
278 if (__first==__last) return;
279 if (_GLIBCXX_PARALLEL_CONDITION(true))
280 if (base_type::_M_impl._M_node_count == 0)
281 _M_bulk_insertion_construction(__first, __last, true, less_equal);
282 else
283 _M_bulk_insertion_construction(__first, __last, false, less_equal);
284 else
285 base_type::_M_insert_equal(__first, __last);
286 _GLIBCXX_PARALLEL_ASSERT(rb_verify());
289 private:
291 /** @brief Helper class of _Rb_tree: node linking.
293 * Nodes linking forming an almost complete tree. The last level
294 * is coloured red, the rest are black
295 * @param ranker Calculates the position of a node in an array of nodes
297 template<typename ranker>
298 class nodes_initializer
300 /** @brief Renaming of tree size_type */
302 typedef _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc> tree_type;
303 typedef typename tree_type::size_type size_type;
304 public:
306 /** @brief mask[%i]= 0..01..1, where the number of 1s is %i+1 */
307 size_type mask[sizeof(size_type)*8];
309 /** @brief Array of nodes (initial address) */
310 const _Rb_tree_node_ptr* r_init;
312 /** @brief Total number of (used) nodes */
313 size_type n;
315 /** @brief Rank of the last tree node that can be calculated
316 taking into account a complete tree
318 size_type splitting_point;
320 /** @brief Rank of the tree root */
321 size_type rank_root;
323 /** @brief Height of the tree */
324 int height;
326 /** @brief Number of threads into which divide the work */
327 const thread_index_t num_threads;
329 /** @brief Helper object to mind potential gaps in r_init */
330 const ranker& rank;
332 /** @brief Constructor
333 * @param r Array of nodes
334 * @param _n Total number of (used) nodes
335 * @param _num_threads Number of threads into which divide the work
336 * @param _rank Helper object to mind potential gaps in @c r_init */
337 nodes_initializer(const _Rb_tree_node_ptr* r, const size_type _n,
338 const thread_index_t _num_threads, const ranker& _rank):
339 r_init(r),
340 n(_n),
341 num_threads(_num_threads),
342 rank(_rank)
344 height = log2(n);
345 splitting_point = 2 * (n - ((1 << height) - 1)) -1;
347 // Rank root.
348 size_type max = 1 << (height + 1);
349 rank_root= (max-2) >> 1;
350 if (rank_root > splitting_point)
351 rank_root = complete_to_original(rank_root);
353 mask[0] = 0x1;
354 for (unsigned int i = 1; i < sizeof(size_type)*8; ++i)
356 mask[i] = (mask[i-1] << 1) + 1;
360 /** @brief Query for tree height
361 * @return Tree height */
362 int
363 get_height() const
364 { return height; }
366 /** @brief Query for the splitting point
367 * @return Splitting point */
368 size_type
369 get_shifted_splitting_point() const
370 { return rank.get_shifted_rank(splitting_point, 0); }
372 /** @brief Query for the tree root node
373 * @return Tree root node */
374 _Rb_tree_node_ptr
375 get_root() const
376 { return r_init[rank.get_shifted_rank(rank_root,num_threads/2)]; }
378 /** @brief Calculation of the parent position in the array of nodes
379 * @hideinitializer */
380 #define CALCULATE_PARENT \
381 if (p_s> splitting_point) \
382 p_s = complete_to_original(p_s); \
383 int s_r = rank.get_shifted_rank(p_s,iam); \
384 r->_M_parent = r_init[s_r]; \
386 /** @brief Link a node with its parent and children taking into
387 account that its rank (without gaps) is different to that in
388 a complete tree
389 * @param r Pointer to the node
390 * @param iam Partition of the array in which the node is, where
391 * iam is in [0..num_threads)
392 * @sa link_complete */
393 void
394 link_incomplete(const _Rb_tree_node_ptr& r, const int iam) const
396 size_type real_pos = rank.get_real_rank(&r-r_init, iam);
397 size_type l_s, r_s, p_s;
398 int mod_pos= original_to_complete(real_pos);
399 int zero= first_0_right(mod_pos);
401 // 1. Convert n to n', where n' will be its rank if the tree
402 // was complete
403 // 2. Calculate neighbours for n'
404 // 3. Convert the neighbors n1', n2' and n3' to their
405 // appropriate values n1, n2, n3. Note that it must be
406 // checked that these neighbors actually exist.
407 calculate_shifts_pos_level(mod_pos, zero, l_s, r_s, p_s);
408 if (l_s > splitting_point)
410 _GLIBCXX_PARALLEL_ASSERT(r_s > splitting_point);
411 if (zero == 1)
413 r->_M_left = 0;
414 r->_M_right = 0;
416 else
418 r->_M_left= r_init[rank.get_shifted_rank(complete_to_original(l_s),iam)];
419 r->_M_right= r_init[rank.get_shifted_rank(complete_to_original(r_s),iam)];
423 else{
424 r->_M_left= r_init[rank.get_shifted_rank(l_s,iam)];
425 if (zero != 1)
427 r->_M_right= r_init[rank.get_shifted_rank(complete_to_original(r_s),iam)];
429 else
431 r->_M_right = 0;
434 r->_M_color = std::_S_black;
435 CALCULATE_PARENT;
438 /** @brief Link a node with its parent and children taking into
439 account that its rank (without gaps) is the same as that in
440 a complete tree
441 * @param r Pointer to the node
442 * @param iam Partition of the array in which the node is, where
443 * iam is in [0..@c num_threads)
444 * @sa link_incomplete
446 void
447 link_complete(const _Rb_tree_node_ptr& r, const int iam) const
449 size_type real_pos = rank.get_real_rank(&r-r_init, iam);
450 size_type p_s;
452 // Test if it is a leaf on the last not necessarily full level
453 if ((real_pos & mask[0]) == 0)
455 if ((real_pos & 0x2) == 0)
456 p_s = real_pos + 1;
457 else
458 p_s = real_pos - 1;
459 r->_M_color = std::_S_red;
460 r->_M_left = 0;
461 r->_M_right = 0;
463 else
465 size_type l_s, r_s;
466 int zero = first_0_right(real_pos);
467 calculate_shifts_pos_level(real_pos, zero, l_s, r_s, p_s);
468 r->_M_color = std::_S_black;
470 r->_M_left = r_init[rank.get_shifted_rank(l_s,iam)];
471 if (r_s > splitting_point)
472 r_s = complete_to_original(r_s);
473 r->_M_right = r_init[rank.get_shifted_rank(r_s,iam)];
475 CALCULATE_PARENT;
478 #undef CALCULATE_PARENT
480 private:
481 /** @brief Change of "base": Convert the rank in the actual tree
482 into the corresponding rank if the tree was complete
483 * @param pos Rank in the actual incomplete tree
484 * @return Rank in the corresponding complete tree
485 * @sa complete_to_original */
486 int
487 original_to_complete(const int pos) const
488 { return (pos << 1) - splitting_point; }
490 /** @brief Change of "base": Convert the rank if the tree was
491 complete into the corresponding rank in the actual tree
492 * @param pos Rank in the complete tree
493 * @return Rank in the actual incomplete tree
494 * @sa original_to_complete */
495 int
496 complete_to_original(const int pos) const
497 { return (pos + splitting_point) >> 1; }
500 /** @brief Calculate the rank in the complete tree of the parent
501 and children of a node
502 * @param pos Rank in the complete tree of the node whose parent
503 * and children rank must be calculated
504 * @param level Tree level in which the node at pos is in
505 * (starting to count at leaves). @pre @c level > 1
506 * @param left_shift Rank in the complete tree of the left child
507 * of pos (out parameter)
508 * @param right_shift Rank in the complete tree of the right
509 * child of pos (out parameter)
510 * @param parent_shift Rank in the complete tree of the parent
511 * of pos (out parameter)
513 void
514 calculate_shifts_pos_level(const size_type pos, const int level,
515 size_type& left_shift, size_type& right_shift,
516 size_type& parent_shift) const
518 int stride = 1 << (level -1);
519 left_shift = pos - stride;
520 right_shift = pos + stride;
521 if (((pos >> (level + 1)) & 0x1) == 0)
522 parent_shift = pos + 2*stride;
523 else
524 parent_shift = pos - 2*stride;
527 /** @brief Search for the first 0 bit (growing the weight)
528 * @param x Binary number (corresponding to a rank in the tree)
529 * whose first 0 bit must be calculated
530 * @return Position of the first 0 bit in @c x (starting to
531 * count with 1)
533 int
534 first_0_right(const size_type x) const
536 if ((x & 0x2) == 0)
537 return 1;
538 else
539 return first_0_right_bs(x);
542 /** @brief Search for the first 0 bit (growing the weight) using
543 * binary search
545 * Binary search can be used instead of a naive loop using the
546 * masks in mask array
547 * @param x Binary number (corresponding to a rank in the tree)
548 * whose first 0 bit must be calculated
549 * @param k_beg Position in which to start searching. By default is 2.
550 * @return Position of the first 0 bit in x (starting to count with 1) */
551 int
552 first_0_right_bs(const size_type x, int k_beg=2) const
554 int k_end = sizeof(size_type)*8;
555 size_type not_x = x ^ mask[k_end-1];
556 while ((k_end-k_beg) > 1)
558 int k = k_beg + (k_end-k_beg)/2;
559 if ((not_x & mask[k-1]) != 0)
560 k_end = k;
561 else
562 k_beg = k;
564 return k_beg;
568 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
569 /** @brief Helper class of nodes_initializer: mind the gaps of an
570 array of nodes.
572 * Get absolute positions in an array of nodes taking into account
573 * the gaps in it @sa ranker_no_gaps
575 class ranker_gaps
577 /** @brief Renaming of tree's size_type */
578 typedef _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc> tree_type;
579 typedef typename tree_type::size_type size_type;
581 /** @brief Array containing the beginning ranks of all the
582 num_threads partitions just considering the valid nodes, not
583 the gaps */
584 size_type* beg_partition;
586 /** @brief Array containing the beginning ranks of all the
587 num_threads partitions considering the valid nodes and the
588 gaps */
589 const size_type* beg_shift_partition;
591 /** @brief Array containing the number of accumulated gaps at
592 the beginning of each partition */
593 const size_type* rank_shift;
595 /** @brief Number of partitions (and threads that work on it) */
596 const thread_index_t num_threads;
598 public:
599 /** @brief Constructor
600 * @param size_p Pointer to the array containing the beginning
601 * ranks of all the @c _num_threads partitions considering the
602 * valid nodes and the gaps
603 * @param shift_r Array containing the number of accumulated
604 * gaps at the beginning of each partition
605 * @param _num_threads Number of partitions (and threads that
606 * work on it) */
607 ranker_gaps(const size_type* size_p, const size_type* shift_r,
608 const thread_index_t _num_threads) :
609 beg_shift_partition(size_p),
610 rank_shift(shift_r),
611 num_threads(_num_threads)
613 beg_partition = new size_type[num_threads+1];
614 beg_partition[0] = 0;
615 for (int i = 1; i <= num_threads; ++i)
617 beg_partition[i] = beg_partition[i-1] + (beg_shift_partition[i] - beg_shift_partition[i-1]) - (rank_shift[i] - rank_shift[i-1]);
621 // Ghost element, strictly larger than any index requested.
622 ++beg_partition[num_threads];
625 /** @brief Destructor
626 * Needs to be defined to deallocate the dynamic memory that has
627 * been allocated for beg_partition array
629 ~ranker_gaps()
630 { delete[] beg_partition; }
632 /** @brief Convert a rank in the array of nodes considering
633 valid nodes and gaps, to the corresponding considering only
634 the valid nodes
635 * @param pos Rank in the array of nodes considering valid nodes and gaps
636 * @param index Partition which the rank belongs to
637 * @return Rank in the array of nodes considering only the valid nodes
638 * @sa get_shifted_rank
640 size_type
641 get_real_rank(const size_type pos, const int index) const
642 { return pos - rank_shift[index]; }
644 /** @brief Inverse of get_real_rank: Convert a rank in the array
645 of nodes considering only valid nodes, to the corresponding
646 considering valid nodes and gaps
647 * @param pos Rank in the array of nodes considering only valid nodes
648 * @param index Partition which the rank is most likely to
649 * belong to (i. e. the corresponding if there were no gaps)
650 * @pre 0 <= @c pos <= number_of_distinct_elements
651 * @return Rank in the array of nodes considering valid nodes and gaps
652 * @post 0 <= @c return <= number_of_elements
653 * @sa get_real_rank()
655 size_type
656 get_shifted_rank(const size_type pos, const int index) const
658 // Heuristic.
659 if (beg_partition[index] <= pos and pos < beg_partition[index+1])
660 return pos + rank_shift[index];
661 else
662 // Called rarely, do not hinder inlining.
663 return get_shifted_rank_loop(pos,index);
666 /** @brief Helper method of get_shifted_rank: in case the given
667 index in get_shifted_rank is not correct, look for it and
668 then calculate the rank
669 * @param pos Rank in the array of nodes considering only valid nodes
670 * @param index Partition which the rank should have belong to
671 * if there were no gaps
672 * @return Rank in the array of nodes considering valid nodes and gaps
674 size_type
675 get_shifted_rank_loop(const size_type pos, int index) const
677 while (pos >= beg_partition[index+1])
678 ++index;
679 while (pos < beg_partition[index])
680 --index;
681 _GLIBCXX_PARALLEL_ASSERT(0 <= index && index < num_threads);
682 return pos + rank_shift[index];
686 /** @brief Helper class of nodes_initializer: access an array of
687 * nodes with no gaps
689 * Get absolute positions in an array of nodes taking into account
690 * that there are no gaps in it. @sa ranker_gaps */
691 class ranker_no_gaps
693 /** @brief Renaming of tree's size_type */
694 typedef _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc> tree_type;
695 typedef typename tree_type::size_type size_type;
697 public:
698 /** @brief Convert a rank in the array of nodes considering
699 * valid nodes and gaps, to the corresponding considering only
700 * the valid nodes
702 * As there are no gaps in this case, get_shifted_rank() and
703 * get_real_rank() are synonyms and make no change on pos
704 * @param pos Rank in the array of nodes considering valid nodes and gaps
705 * @param index Partition which the rank belongs to, unused here
706 * @return Rank in the array of nodes considering only the valid nodes */
707 size_type
708 get_real_rank(const size_type pos, const int index) const
709 { return pos; }
711 /** @brief Inverse of get_real_rank: Convert a rank in the array
712 * of nodes considering only valid nodes, to the corresponding
713 * considering valid nodes and gaps
715 * As there are no gaps in this case, get_shifted_rank() and
716 * get_real_rank() are synonyms and make no change on pos
717 * @param pos Rank in the array of nodes considering only valid nodes
718 * @param index Partition which the rank belongs to, unused here
719 * @return Rank in the array of nodes considering valid nodes and gaps
721 size_type
722 get_shifted_rank(const size_type pos, const int index) const
723 { return pos; }
727 /** @brief Helper comparator class: Invert a binary comparator
728 * @param _Comp Comparator to invert
729 * @param _Iterator Iterator to the elements to compare */
730 template<typename _Comp, typename _Iterator>
731 class gr_or_eq
733 /** @brief Renaming value_type of _Iterator */
734 typedef typename std::iterator_traits<_Iterator>::value_type value_type;
736 /** @brief Comparator to be inverted */
737 const _Comp comp;
739 public:
740 /** @brief Constructor
741 * @param c Comparator */
742 gr_or_eq(const _Comp& c) : comp(c) { }
744 /** @brief Operator()
745 * @param a First value to compare
746 * @param b Second value to compare */
747 bool operator()(const value_type& a, const value_type& b) const
749 if (not (comp(_KeyOfValue()(a), _KeyOfValue()(b))))
750 return true;
751 return false;
755 /** @brief Helper comparator class: Passed as a parameter of
756 list_partition to check that a sequence is sorted
757 * @param _InputIterator Iterator to the elements to compare
758 * @param _CompIsSorted Comparator to check for sortednesss */
759 template<typename _InputIterator, typename _CompIsSorted>
760 class is_sorted_functor
762 /** @brief Element to compare with (first parameter of comp) */
763 _InputIterator prev;
765 /** @brief Comparator to check for sortednesss */
766 const _CompIsSorted comp;
768 /** @brief Sum up the history of the operator() of this
769 * comparator class Its value is true if all calls to comp from
770 * this class have returned true. It is false otherwise */
771 bool sorted;
773 public:
774 /** @brief Constructor
776 * Sorted is set to true
777 * @param first Element to compare with the first time the
778 * operator() is called
779 * @param c Comparator to check for sortedness */
780 is_sorted_functor(const _InputIterator first, const _CompIsSorted c)
781 : prev(first), comp(c), sorted(true) { }
783 /** @brief Operator() with only one explicit parameter. Updates
784 the class member @c prev and sorted.
785 * @param it Iterator to the element which must be compared to
786 * the element pointed by the the class member @c prev */
787 void operator()(const _InputIterator it)
789 if (sorted and it != prev and comp(_KeyOfValue()(*it),
790 _KeyOfValue()(*prev)))
791 sorted = false;
792 prev = it;
795 /** @brief Query method for sorted
796 * @return Current value of sorted */
797 bool is_sorted() const
799 return sorted;
803 /** @brief Helper functor: sort the input based upon elements
804 instead of keys
805 * @param KeyComparator Comparator for the key of values */
806 template<typename KeyComparator>
807 class ValueCompare
808 : public std::binary_function<value_type, value_type, bool>
810 /** @brief Comparator for the key of values */
811 const KeyComparator comp;
813 public:
814 /** @brief Constructor
815 * @param c Comparator for the key of values */
816 ValueCompare(const KeyComparator& c): comp(c) { }
818 /** @brief Operator(): Analogous to comp but for values and not keys
819 * @param v1 First value to compare
820 * @param v2 Second value to compare
821 * @return Result of the comparison */
822 bool operator()(const value_type& v1, const value_type& v2) const
823 { return comp(_KeyOfValue()(v1),_KeyOfValue()(v2)); }
826 /** @brief Helper comparator: compare a key with the key in a node
827 * @param _Comparator Comparator for keys */
828 template<typename _Comparator>
829 struct compare_node_key
831 /** @brief Comparator for keys */
832 const _Comparator& c;
834 /** @brief Constructor
835 * @param _c Comparator for keys */
836 compare_node_key(const _Comparator& _c) : c(_c) { }
838 /** @brief Operator() with the first parameter being a node
839 * @param r Node whose key is to be compared
840 * @param k Key to be compared
841 * @return Result of the comparison */
842 bool operator()(const _Rb_tree_node_ptr r, const key_type& k) const
843 { return c(base_type::_S_key(r),k); }
845 /** @brief Operator() with the second parameter being a node
846 * @param k Key to be compared
847 * @param r Node whose key is to be compared
848 * @return Result of the comparison */
849 bool operator()(const key_type& k, const _Rb_tree_node_ptr r) const
850 { return c(k, base_type::_S_key(r)); }
853 /** @brief Helper comparator: compare a key with the key of a
854 value pointed by an iterator
855 * @param _Comparator Comparator for keys
857 template<typename _Iterator, typename _Comparator>
858 struct compare_value_key
860 /** @brief Comparator for keys */
861 const _Comparator& c;
863 /** @brief Constructor
864 * @param _c Comparator for keys */
865 compare_value_key(const _Comparator& _c) : c(_c){ }
867 /** @brief Operator() with the first parameter being an iterator
868 * @param v Iterator to the value whose key is to be compared
869 * @param k Key to be compared
870 * @return Result of the comparison */
871 bool operator()(const _Iterator& v, const key_type& k) const
872 { return c(_KeyOfValue()(*v),k); }
874 /** @brief Operator() with the second parameter being an iterator
875 * @param k Key to be compared
876 * @param v Iterator to the value whose key is to be compared
877 * @return Result of the comparison */
878 bool operator()(const key_type& k, const _Iterator& v) const
879 { return c(k, _KeyOfValue()(*v)); }
882 /** @brief Helper class of _Rb_tree to avoid some symmetric code
883 in tree operations */
884 struct LeftRight
886 /** @brief Obtain the conceptual left child of a node
887 * @param parent Node whose child must be obtained
888 * @return Reference to the child node */
889 static _Rb_tree_node_base*& left(_Rb_tree_node_base* parent)
890 { return parent->_M_left; }
892 /** @brief Obtain the conceptual right child of a node
893 * @param parent Node whose child must be obtained
894 * @return Reference to the child node */
895 static _Rb_tree_node_base*& right(_Rb_tree_node_base* parent)
896 { return parent->_M_right; }
899 /** @brief Helper class of _Rb_tree to avoid some symmetric code
900 in tree operations: inverse the symmetry
901 * @param S Symmetry to inverse
902 * @sa LeftRight */
903 template<typename S>
904 struct Opposite
906 /** @brief Obtain the conceptual left child of a node, inverting
907 the symmetry
908 * @param parent Node whose child must be obtained
909 * @return Reference to the child node */
910 static _Rb_tree_node_base*& left(_Rb_tree_node_base* parent)
911 { return S::right(parent);}
913 /** @brief Obtain the conceptual right child of a node,
914 inverting the symmetry
915 * @param parent Node whose child must be obtained
916 * @return Reference to the child node */
917 static _Rb_tree_node_base*& right(_Rb_tree_node_base* parent)
918 { return S::left(parent);}
921 /** @brief Inverse symmetry of LeftRight */
922 typedef Opposite<LeftRight> RightLeft;
924 /** @brief Helper comparator to compare value pointers, so that
925 the value is taken
926 * @param Comparator Comparator for values
927 * @param _ValuePtr Pointer to values */
928 template<typename Comparator, typename _ValuePtr>
929 class PtrComparator
930 : public std::binary_function<_ValuePtr, _ValuePtr, bool>
932 /** @brief Comparator for values */
933 Comparator comp;
935 public:
936 /** @brief Constructor
937 * @param comp Comparator for values */
938 PtrComparator(Comparator comp) : comp(comp) { }
940 /** @brief Operator(): compare the values instead of the pointers
941 * @param v1 Pointer to the first element to compare
942 * @param v2 Pointer to the second element to compare */
943 bool operator()(const _ValuePtr& v1, const _ValuePtr& v2) const
944 { return comp(*v1,*v2); }
947 /** @brief Iterator whose elements are pointers
948 * @param value_type Type pointed by the pointers */
949 template<typename _ValueTp>
950 class PtrIterator
952 public:
953 /** @brief The iterator category is random access iterator */
954 typedef typename std::random_access_iterator_tag iterator_category;
955 typedef _ValueTp value_type;
956 typedef size_t difference_type;
957 typedef value_type* ValuePtr;
958 typedef ValuePtr& reference;
959 typedef value_type** pointer;
961 /** @brief Element accessed by the iterator */
962 value_type** ptr;
964 /** @brief Trivial constructor */
965 PtrIterator() { }
967 /** @brief Constructor from an element */
968 PtrIterator(const ValuePtr& __i) : ptr(&__i) { }
970 /** @brief Constructor from a pointer */
971 PtrIterator(const pointer& __i) : ptr(__i) { }
973 /** @brief Copy constructor */
974 PtrIterator(const PtrIterator<value_type>& __i) : ptr(__i.ptr) { }
976 reference
977 operator*() const
978 { return **ptr; }
980 ValuePtr
981 operator->() const
982 { return *ptr; }
984 /** @brief Bidirectional iterator requirement */
985 PtrIterator&
986 operator++()
988 ++ptr;
989 return *this;
992 /** @brief Bidirectional iterator requirement */
993 PtrIterator
994 operator++(int)
995 { return PtrIterator(ptr++); }
997 /** @brief Bidirectional iterator requirement */
998 PtrIterator&
999 operator--()
1001 --ptr;
1002 return *this;
1005 /** @brief Bidirectional iterator requirement */
1006 PtrIterator
1007 operator--(int)
1008 { return PtrIterator(ptr--); }
1010 /** @brief Random access iterator requirement */
1011 reference
1012 operator[](const difference_type& __n) const
1013 { return *ptr[__n]; }
1015 /** @brief Random access iterator requirement */
1016 PtrIterator&
1017 operator+=(const difference_type& __n)
1019 ptr += __n;
1020 return *this;
1023 /** @brief Random access iterator requirement */
1024 PtrIterator
1025 operator+(const difference_type& __n) const
1026 { return PtrIterator(ptr + __n); }
1028 /** @brief Random access iterator requirement */
1029 PtrIterator&
1030 operator-=(const difference_type& __n)
1032 ptr -= __n;
1033 return *this;
1036 /** @brief Random access iterator requirement */
1037 PtrIterator
1038 operator-(const difference_type& __n) const
1039 { return PtrIterator(ptr - __n); }
1041 /** @brief Random access iterator requirement */
1042 difference_type
1043 operator-(const PtrIterator<value_type>& iter) const
1044 { return ptr - iter.ptr; }
1046 /** @brief Random access iterator requirement */
1047 difference_type
1048 operator+(const PtrIterator<value_type>& iter) const
1049 { return ptr + iter.ptr; }
1051 /** @brief Allow assignment of an element ValuePtr to the iterator */
1052 PtrIterator<value_type>& operator=(const ValuePtr sptr)
1054 ptr = &sptr;
1055 return *this;
1058 PtrIterator<value_type>& operator=(const PtrIterator<value_type>& piter)
1060 ptr = piter.ptr;
1061 return *this;
1064 bool operator==(const PtrIterator<value_type>& piter)
1065 { return ptr == piter.ptr; }
1067 bool operator!=(const PtrIterator<value_type>& piter)
1068 { return ptr != piter.ptr; }
1073 /** @brief Bulk insertion helper: synchronization and construction
1074 of the tree bottom up */
1075 struct concat_problem
1077 /** @brief Root of a tree.
1079 * Input: Middle node to concatenate two subtrees. Out: Root of
1080 * the resulting concatenated tree. */
1081 _Rb_tree_node_ptr t;
1083 /** @brief Black height of @c t */
1084 int black_h;
1086 /** @brief Synchronization variable.
1088 * \li READY_YES: the root of the tree can be concatenated with
1089 * the result of the children concatenation problems (both of
1090 * them have finished).
1091 * \li READY_NOT: at least one of the children
1092 * concatenation_problem have not finished */
1093 int is_ready;
1095 /** @brief Parent concatenation problem to solve when @c
1096 is_ready = READY_YES */
1097 concat_problem* par_problem;
1099 /** @brief Left concatenation problem */
1100 concat_problem* left_problem;
1102 /** @brief Right concatenation problem */
1103 concat_problem* right_problem;
1105 /** @brief Value NO for the synchronization variable. */
1106 static const int READY_NO = 0;
1108 /** @brief Value YES for the synchronization variable. */
1109 static const int READY_YES = 1;
1111 /** @brief Trivial constructor.
1113 * Initialize the synchronization variable to not ready. */
1114 concat_problem(): is_ready(READY_NO) { }
1116 /** @brief Constructor.
1118 * Initialize the synchronization variable to not ready.
1119 * @param _t Root of a tree.
1120 * @param _black_h Black height of @c _t
1121 * @param _par_problem Parent concatenation problem to solve
1122 * when @c is_ready = READY_YES
1124 concat_problem(const _Rb_tree_node_ptr _t, const int _black_h,
1125 concat_problem* _par_problem)
1126 : t(_t), black_h(_black_h), is_ready(READY_NO), par_problem(_par_problem)
1128 // The root of an insertion problem must be black.
1129 if (t != NULL and t->_M_color == std::_S_red)
1131 t->_M_color = std::_S_black;
1132 ++black_h;
1138 /** @brief Bulk insertion helper: insertion of a sequence of
1139 elements in a subtree
1140 @invariant t, pos_beg and pos_end will not change after initialization
1142 struct insertion_problem
1144 /** @brief Renaming of _Rb_tree @c size_type */
1145 typedef _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc> tree_type;
1146 typedef typename tree_type::size_type size_type;
1148 /** @brief Root of the tree where the elements are to be inserted */
1149 _Rb_tree_node_ptr t;
1151 /** @brief Position of the first node in the array of nodes to
1152 be inserted into @c t */
1153 size_type pos_beg;
1155 /** @brief Position of the first node in the array of nodes
1156 that won't be inserted into @c t */
1157 size_type pos_end;
1159 /** @brief Partition in the array of nodes of @c pos_beg and @c
1160 pos_end (must be the same for both, and so gaps are
1161 avoided) */
1162 int array_partition;
1164 /** @brief Concatenation problem to solve once the insertion
1165 problem is finished */
1166 concat_problem* conc;
1168 /** @brief Trivial constructor. */
1169 insertion_problem()
1172 /** @brief Constructor.
1173 * @param b Position of the first node in the array of nodes to
1174 * be inserted into @c _conc->t
1175 * @param e Position of the first node in the array of nodes
1176 * that won't be inserted into @c _conc->t
1177 * @param array_p Partition in the array of nodes of @c b and @c e
1178 * @param _conc Concatenation problem to solve once the
1179 * insertion problem is finished
1181 insertion_problem(const size_type b, const size_type e,
1182 const int array_p, concat_problem* _conc)
1183 : t(_conc->t), pos_beg(b), pos_end(e), array_partition(array_p),
1184 conc(_conc)
1186 _GLIBCXX_PARALLEL_ASSERT(pos_beg <= pos_end);
1188 //The root of an insertion problem must be black!!
1189 _GLIBCXX_PARALLEL_ASSERT(t == NULL or t->_M_color != std::_S_red);
1194 /** @brief Main bulk construction and insertion helper method
1195 * @param __first First element in a sequence to be added into the tree
1196 * @param __last End of the sequence of elements to be added into the tree
1197 * @param is_construction If true, the tree was empty and so, this
1198 * is constructed. Otherwise, the elements are added to an
1199 * existing tree.
1200 * @param strictly_less_or_less_equal Comparator to deal
1201 * transparently with repetitions with respect to the uniqueness
1202 * of the wrapping container
1203 * The input sequence is preprocessed so that the bulk
1204 * construction or insertion can be performed
1205 * efficiently. Essentially, the sequence is checked for
1206 * sortednesss and iterators to the middle of the structure are
1207 * saved so that afterwards the sequence can be processed
1208 * effectively in parallel. */
1209 template<typename _InputIterator, typename StrictlyLessOrLessEqual>
1210 void
1211 _M_bulk_insertion_construction(const _InputIterator __first, const _InputIterator __last, const bool is_construction, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1213 thread_index_t num_threads = get_max_threads();
1214 size_type n;
1215 size_type beg_partition[num_threads+1];
1216 _InputIterator access[num_threads+1];
1217 beg_partition[0] = 0;
1218 bool is_sorted= is_sorted_distance_accessors(__first, __last, access, beg_partition,n, num_threads, std::__iterator_category(__first));
1220 if (not is_sorted)
1222 _M_not_sorted_bulk_insertion_construction(access, beg_partition, n, num_threads, is_construction, strictly_less_or_less_equal);
1224 else
1226 // The vector must be moved... all ranges must have at least
1227 // one element, or make just sequential???
1228 if (static_cast<size_type>(num_threads) > n)
1230 int j = 1;
1231 for (int i = 1; i <= num_threads; ++i)
1233 if (beg_partition[j-1] != beg_partition[i])
1235 beg_partition[j] = beg_partition[i];
1236 access[j] = access[i];
1237 ++j;
1240 num_threads = static_cast<thread_index_t>(n);
1243 if (is_construction)
1244 _M_sorted_bulk_construction(access, beg_partition, n, num_threads,
1245 strictly_less_or_less_equal);
1246 else
1247 _M_sorted_bulk_insertion(access, beg_partition, n, num_threads,
1248 strictly_less_or_less_equal);
1252 /** @brief Bulk construction and insertion helper method on an
1253 * input sequence which is not sorted
1255 * The elements are copied, according to the copy policy, in order
1256 * to be sorted. Then the
1257 * _M_not_sorted_bulk_insertion_construction() method is called
1258 * appropriately
1259 * @param access Array of iterators of size @c num_threads +
1260 * 1. Each position contains the first element in each subsequence
1261 * to be added into the tree.
1262 * @param beg_partition Array of positions of size @c num_threads
1263 * + 1. Each position contains the rank of the first element in
1264 * each subsequence to be added into the tree.
1265 * @param n Size of the sequence to be inserted
1266 * @param num_threads Number of threads and corresponding
1267 * subsequences in which the insertion work is going to be shared
1268 * @param is_construction If true, the tree was empty and so, this
1269 * is constructed. Otherwise, the elements are added to an
1270 * existing tree.
1271 * @param strictly_less_or_less_equal Comparator to deal
1272 * transparently with repetitions with respect to the uniqueness
1273 * of the wrapping container
1275 template<typename _InputIterator, typename StrictlyLessOrLessEqual>
1276 void
1277 _M_not_sorted_bulk_insertion_construction(_InputIterator* access,
1278 size_type* beg_partition,
1279 const size_type n,
1280 const thread_index_t num_threads,
1281 const bool is_construction,
1282 StrictlyLessOrLessEqual strictly_less_or_less_equal)
1284 // Copy entire elements. In the case of a map, we would be
1285 // copying the pair. Therefore, the copy should be reconsidered
1286 // when objects are big. Essentially two cases:
1287 // - The key is small: make that the pair, is a pointer to data
1288 // instead of a copy to it
1289 // - The key is big: we simply have a pointer to the iterator
1290 #if _GLIBCXX_TREE_FULL_COPY
1291 nc_value_type* v = static_cast<nc_value_type*> (::operator new(sizeof(nc_value_type)*(n+1)));
1293 uninitialized_copy_from_accessors(access, beg_partition, v, num_threads);
1295 _M_not_sorted_bulk_insertion_construction<nc_value_type, nc_value_type*, ValueCompare<_Compare> >
1296 (beg_partition, v, ValueCompare<_Compare>(base_type::_M_impl._M_key_compare), n, num_threads, is_construction, strictly_less_or_less_equal);
1297 #else
1298 // For sorting, we cannot use the new PtrIterator because we
1299 // want the pointers to be exchanged and not the elements.
1300 typedef PtrComparator<ValueCompare<_Compare>, nc_value_type*> this_ptr_comparator;
1301 nc_value_type** v = static_cast<nc_value_type**> (::operator new(sizeof(nc_value_type*)*(n+1)));
1303 uninitialized_ptr_copy_from_accessors(access, beg_partition, v, num_threads);
1305 _M_not_sorted_bulk_insertion_construction<nc_value_type*, PtrIterator<nc_value_type>, this_ptr_comparator>
1306 (beg_partition, v, this_ptr_comparator(ValueCompare<_Compare>(base_type::_M_impl._M_key_compare)), n, num_threads, is_construction, strictly_less_or_less_equal);
1307 #endif
1310 /** @brief Bulk construction and insertion helper method on an
1311 * input sequence which is not sorted
1313 * The elements are sorted and its accessors calculated. Then,
1314 * _M_sorted_bulk_construction() or _M_sorted_bulk_insertion() is
1315 * called.
1316 * @param beg_partition Array of positions of size @c num_threads
1317 * + 1. Each position contains the rank of the first element in
1318 * each subsequence to be added into the tree.
1319 * @param v Array of elements to be sorted (copy of the original sequence).
1320 * @param comp Comparator to be used for sorting the elements
1321 * @param n Size of the sequence to be inserted
1322 * @param num_threads Number of threads and corresponding
1323 * subsequences in which the insertion work is going to be shared
1324 * @param is_construction If true, _M_sorted_bulk_construction()
1325 * is called. Otherwise, _M_sorted_bulk_insertion() is called.
1326 * @param strictly_less_or_less_equal Comparator to deal
1327 * transparently with repetitions with respect to the uniqueness
1328 * of the wrapping container
1330 template<typename ElementsToSort, typename IteratorSortedElements, typename Comparator, typename StrictlyLessOrLessEqual>
1331 void
1332 _M_not_sorted_bulk_insertion_construction(size_type* beg_partition, ElementsToSort* v, Comparator comp, const size_type n, thread_index_t num_threads, const bool is_construction, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1334 // The accessors have been calculated for the non sorted.
1335 num_threads = static_cast<thread_index_t>(std::min<size_type>(num_threads, n));
1337 std::stable_sort(v, v+n, comp);
1339 IteratorSortedElements sorted_access[num_threads+1];
1340 range_accessors(IteratorSortedElements(v), IteratorSortedElements(v+n), sorted_access, beg_partition, n, num_threads, std::__iterator_category(v));
1342 // Partial template specialization not available.
1343 if (is_construction)
1344 _M_sorted_bulk_construction(sorted_access, beg_partition, n, num_threads, strictly_less_or_less_equal);
1345 else
1346 _M_sorted_bulk_insertion(sorted_access, beg_partition, n, num_threads, strictly_less_or_less_equal);
1347 delete v;
1350 /** @brief Construct a tree sequentially using the parallel routine
1351 * @param r_array Array of nodes from which to take the nodes to
1352 * build the tree
1353 * @param pos_beg Position of the first node in the array of nodes
1354 * to be part of the tree
1355 * @param pos_end Position of the first node in the array of nodes
1356 * that will not be part of the tree
1357 * @param black_h Black height of the resulting tree (out)
1359 static _Rb_tree_node_ptr
1360 simple_tree_construct(_Rb_tree_node_ptr* r_array, const size_type pos_beg,
1361 const size_type pos_end, int& black_h)
1363 if (pos_beg == pos_end)
1365 black_h = 0;
1366 return NULL;
1368 if (pos_beg+1 == pos_end)
1370 // It is needed, not only for efficiency but because the
1371 // last level in our tree construction is red.
1372 make_leaf(r_array[pos_beg], black_h);
1373 return r_array[pos_beg];
1376 // Dummy b_p
1377 size_type b_p[2];
1378 b_p[0] = 0;
1379 b_p[1] = pos_end - pos_beg;
1380 _Rb_tree_node_ptr* r= r_array + pos_beg;
1381 size_type length = pos_end - pos_beg;
1383 ranker_no_gaps rank;
1384 nodes_initializer<ranker_no_gaps> nodes_init(r, length, 1, rank);
1386 black_h = nodes_init.get_height();
1388 size_type split = nodes_init.get_shifted_splitting_point();
1389 for (size_type i = 0; i < split; ++i)
1390 nodes_init.link_complete(r[i],0);
1392 for (size_type i = split; i < length; ++i)
1393 nodes_init.link_incomplete(r[i],0);
1395 _Rb_tree_node_ptr t = nodes_init.get_root();
1396 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(t));
1397 _GLIBCXX_PARALLEL_ASSERT(t->_M_color == std::_S_black);
1398 return t;
1402 /** @brief Allocation of an array of nodes and initialization of
1403 their value fields from an input sequence. Done in parallel.
1404 * @param access Array of iterators of size @c num_threads +
1405 * 1. Each position contains the first value in the subsequence to
1406 * be copied into the corresponding tree node.
1407 * @param beg_partition Array of positions of size @c num_threads
1408 * + 1. Each position contains the rank of the first element in
1409 * the subsequence from which to copy the data to initialize the
1410 * nodes.
1411 * @param n Size of the sequence and the array of nodes to be allocated.
1412 * @param num_threads Number of threads and corresponding
1413 * subsequences in which the allocation and initialization work is
1414 * going to be shared
1416 template<typename _Iterator>
1417 _Rb_tree_node_ptr*
1418 _M_unsorted_bulk_allocation_and_initialization(const _Iterator* access, const size_type* beg_partition, const size_type n, const thread_index_t num_threads)
1420 _Rb_tree_node_ptr* r = static_cast<_Rb_tree_node_ptr*> (::operator new (sizeof(_Rb_tree_node_ptr)*(n+1)));
1422 // Allocate and initialize the nodes (don't check for uniqueness
1423 // because the sequence is not necessarily sorted.
1424 #pragma omp parallel num_threads(num_threads)
1426 #if USE_PAPI
1427 PAPI_register_thread();
1428 #endif
1430 int iam = omp_get_thread_num();
1431 _Iterator it = access[iam];
1432 size_type i = beg_partition[iam];
1433 while (it!= access[iam+1])
1435 r[i] = base_type::_M_create_node(*it);
1436 ++i;
1437 ++it;
1440 return r;
1444 /** @brief Allocation of an array of nodes and initialization of
1445 * their value fields from an input sequence. Done in
1446 * parallel. Besides, the sequence is checked for uniqueness while
1447 * copying the elements, and if there are repetitions, gaps within
1448 * the partitions are created.
1450 * An extra ghost node pointer is reserved in the array to ease
1451 * comparisons later while linking the nodes
1452 * @pre The sequence is sorted.
1453 * @param access Array of iterators of size @c num_threads +
1454 * 1. Each position contains the first value in the subsequence to
1455 * be copied into the corresponding tree node.
1456 * @param beg_partition Array of positions of size @c num_threads
1457 * + 1. Each position contains the rank of the first element in
1458 * the subsequence from which to copy the data to initialize the
1459 * nodes.
1460 * @param rank_shift Array of size @c num_threads + 1 containing
1461 * the number of accumulated gaps at the beginning of each
1462 * partition
1463 * @param n Size of the sequence and the array of nodes (-1) to be
1464 * allocated.
1465 * @param num_threads Number of threads and corresponding
1466 * subsequences in which the allocation and initialization work is
1467 * going to be shared
1468 * @param strictly_less_or_less_equal Comparator to deal
1469 * transparently with repetitions with respect to the uniqueness
1470 * of the wrapping container
1472 template<typename _Iterator, typename StrictlyLessOrLessEqual>
1473 _Rb_tree_node_ptr*
1474 _M_sorted_bulk_allocation_and_initialization(_Iterator* access, size_type* beg_partition, size_type* rank_shift, const size_type n, thread_index_t& num_threads, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1476 // Ghost node at the end to avoid extra comparisons in nodes_initializer.
1477 _Rb_tree_node_ptr* r = static_cast<_Rb_tree_node_ptr*> (::operator new (sizeof(_Rb_tree_node_ptr)*(n+1)));
1478 r[n] = NULL;
1480 // Dealing with repetitions (EFFICIENCY ISSUE).
1481 _Iterator access_copy[num_threads+1];
1482 for (int i = 0; i <= num_threads; ++i)
1483 access_copy[i] = access[i];
1484 // Allocate and initialize the nodes
1485 #pragma omp parallel num_threads(num_threads)
1487 #if USE_PAPI
1488 PAPI_register_thread();
1489 #endif
1490 thread_index_t iam = omp_get_thread_num();
1491 _Iterator prev = access[iam];
1492 size_type i = beg_partition[iam];
1493 _Iterator it = prev;
1494 if (iam != 0)
1496 --prev;
1497 // Dealing with repetitions (CORRECTNESS ISSUE).
1498 while (it!= access_copy[iam+1] and not strictly_less_or_less_equal(_KeyOfValue()(*prev), _KeyOfValue()(*it)))
1500 _GLIBCXX_PARALLEL_ASSERT(not base_type::_M_impl._M_key_compare(_KeyOfValue()(*it),_KeyOfValue()(*prev)));
1501 ++it;
1503 access[iam] = it;
1504 if (it != access_copy[iam+1]){
1505 r[i] = base_type::_M_create_node(*it);
1506 ++i;
1507 prev=it;
1508 ++it;
1512 else
1514 r[i] = base_type::_M_create_node(*prev);
1515 ++i;
1516 ++it;
1518 while (it!= access_copy[iam+1])
1520 /***** Dealing with repetitions (CORRECTNESS ISSUE) *****/
1521 if (strictly_less_or_less_equal(_KeyOfValue()(*prev),_KeyOfValue()(*it)))
1523 r[i] = base_type::_M_create_node(*it);
1524 ++i;
1525 prev=it;
1527 else{
1528 _GLIBCXX_PARALLEL_ASSERT(not base_type::_M_impl._M_key_compare(_KeyOfValue()(*it),_KeyOfValue()(*prev)));
1530 ++it;
1532 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1533 rank_shift[iam+1] = beg_partition[iam+1] - i;
1535 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1536 rank_shift[0] = 0;
1537 /* Guarantee that there are no empty intervals.
1538 - If an empty interval is found, is joined with the previous one
1539 (the rank_shift of the previous is augmented with all the new
1540 repetitions)
1542 thread_index_t i = 1;
1543 while (i <= num_threads and rank_shift[i] != (beg_partition[i] - beg_partition[i-1]))
1545 rank_shift[i] += rank_shift[i-1];
1546 ++i;
1548 if (i <= num_threads)
1550 thread_index_t j = i - 1;
1551 while (true)
1555 rank_shift[j] += rank_shift[i];
1556 ++i;
1557 } while (i <= num_threads and rank_shift[i] == (beg_partition[i] - beg_partition[i-1]));
1559 beg_partition[j] = beg_partition[i-1];
1560 access[j] = access[i-1];
1561 if (i > num_threads) break;
1562 ++j;
1564 // Initialize with the previous.
1565 rank_shift[j] = rank_shift[j-1];
1567 num_threads = j;
1569 return r;
1573 /** @brief Allocation of an array of nodes and initialization of
1574 * their value fields from an input sequence.
1576 * The allocation and initialization is done in parallel. Besides,
1577 * the sequence is checked for uniqueness while copying the
1578 * elements. However, in contrast to
1579 * _M_sorted_bulk_allocation_and_initialization(), if there are
1580 * repetitions, no gaps within the partitions are created. To do
1581 * so efficiently, some extra memory is needed to compute a prefix
1582 * sum.
1583 * @pre The sequence is sorted.
1584 * @param access Array of iterators of size @c num_threads +
1585 * 1. Each position contains the first value in the subsequence to
1586 * be copied into the corresponding tree node.
1587 * @param beg_partition Array of positions of size @c num_threads
1588 * + 1. Each position contains the rank of the first element in
1589 * the subsequence from which to copy the data to initialize the
1590 * nodes.
1591 * @param n Size of the sequence and the array of nodes (-1) to be
1592 * allocated.
1593 * @param num_threads Number of threads and corresponding
1594 * subsequences in which the allocation and initialization work is
1595 * going to be shared
1596 * @param strictly_less_or_less_equal Comparator to deal
1597 * transparently with repetitions with respect to the uniqueness
1598 * of the wrapping container
1600 template<typename _Iterator, typename StrictlyLessOrLessEqual>
1601 _Rb_tree_node_ptr*
1602 _M_sorted_no_gapped_bulk_allocation_and_initialization(_Iterator* access, size_type* beg_partition, size_type& n, const thread_index_t num_threads, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1604 size_type* sums = static_cast<size_type*> (::operator new (sizeof(size_type)*n));
1605 // Allocate and initialize the nodes
1606 /* try
1608 #pragma omp parallel num_threads(num_threads)
1610 #if USE_PAPI
1611 PAPI_register_thread();
1612 #endif
1613 int iam = omp_get_thread_num();
1614 _Iterator prev = access[iam];
1615 size_type i = beg_partition[iam];
1616 _Iterator it = prev;
1617 if (iam !=0)
1619 --prev;
1621 // First iteration here, to update accessor in case was
1622 // equal to the last element of the previous range
1624 // Dealing with repetitions (CORRECTNESS ISSUE).
1625 if (strictly_less_or_less_equal(_KeyOfValue()(*prev),_KeyOfValue()(*it)))
1627 sums[i] = 0;
1628 prev=it;
1630 else
1632 sums[i] = 1;
1634 ++i;
1635 ++it;
1637 else
1639 sums[i] = 0;
1640 ++i;
1641 ++it;
1643 while (it!= access[iam+1])
1645 // Dealing with repetitions (CORRECTNESS ISSUE).
1646 if (strictly_less_or_less_equal(_KeyOfValue()(*prev),_KeyOfValue()(*it)))
1648 sums[i] = 0;
1649 prev=it;
1651 else
1652 sums[i] = 1;
1653 ++i;
1654 ++it;
1657 // Should be done in parallel.
1658 partial_sum(sums,sums + n, sums);
1660 n -= sums[n-1];
1661 _Rb_tree_node_ptr* r = static_cast<_Rb_tree_node_ptr*> (::operator new (sizeof(_Rb_tree_node_ptr)*(n+1)));
1662 r[n]=0;
1664 #pragma omp parallel num_threads(num_threads)
1666 #if USE_PAPI
1667 PAPI_register_thread();
1668 #endif
1669 int iam = omp_get_thread_num();
1670 _Iterator it = access[iam];
1671 size_type i = beg_partition[iam];
1672 size_type j = i;
1673 size_type before = 0;
1674 if (iam > 0)
1676 before = sums[i-1];
1677 j -= sums[i-1];
1679 beg_partition[iam] = j;
1680 while (it!= access[iam+1])
1682 while (it!= access[iam+1] and sums[i]!=before)
1684 before = sums[i];
1685 ++i;
1686 ++it;
1688 if (it!= access[iam+1])
1690 r[j] = base_type::_M_create_node(*it);
1691 ++j;
1692 ++i;
1693 ++it;
1698 beg_partition[num_threads] = n;
1700 // Update beginning of partitions.
1701 ::operator delete(sums);
1702 return r;
1705 /** @brief Main bulk construction method: perform the actual
1706 initialization, allocation and finally node linking once the
1707 input sequence has already been preprocessed.
1708 * @param access Array of iterators of size @c num_threads +
1709 * 1. Each position contains the first value in the subsequence to
1710 * be copied into the corresponding tree node.
1711 * @param beg_partition Array of positions of size @c num_threads
1712 * + 1. Each position contains the rank of the first element in
1713 * the subsequence from which to copy the data to initialize the
1714 * nodes.
1715 * @param n Size of the sequence and the array of nodes (-1) to be
1716 * allocated.
1717 * @param num_threads Number of threads and corresponding
1718 * subsequences in which the work is going to be shared
1719 * @param strictly_less_or_less_equal Comparator to deal
1720 * transparently with repetitions with respect to the uniqueness
1721 * of the wrapping container
1723 template<typename _Iterator, typename StrictlyLessOrLessEqual>
1724 void
1725 _M_sorted_bulk_construction(_Iterator* access, size_type* beg_partition, const size_type n, thread_index_t num_threads, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1727 // Dealing with repetitions (EFFICIENCY ISSUE).
1728 size_type rank_shift[num_threads+1];
1730 _Rb_tree_node_ptr* r = _M_sorted_bulk_allocation_and_initialization(access, beg_partition, rank_shift, n, num_threads, strictly_less_or_less_equal);
1732 // Link the tree appropriately.
1733 // Dealing with repetitions (EFFICIENCY ISSUE).
1734 ranker_gaps rank(beg_partition, rank_shift, num_threads);
1735 nodes_initializer<ranker_gaps> nodes_init(r, n - rank_shift[num_threads], num_threads, rank);
1736 size_type split = nodes_init.get_shifted_splitting_point();
1738 #pragma omp parallel num_threads(num_threads)
1740 #if USE_PAPI
1741 PAPI_register_thread();
1742 #endif
1743 int iam = omp_get_thread_num();
1744 size_type beg = beg_partition[iam];
1745 // Dealing with repetitions (EFFICIENCY ISSUE).
1746 size_type end = beg_partition[iam+1] - (rank_shift[iam+1] - rank_shift[iam]);
1747 if (split >= end)
1749 for (size_type i = beg; i < end; ++i)
1751 nodes_init.link_complete(r[i],iam);
1754 else
1756 if (split <= beg)
1758 for (size_type i = beg; i < end; ++i)
1759 nodes_init.link_incomplete(r[i],iam);
1761 else
1763 for (size_type i = beg; i < split; ++i)
1764 nodes_init.link_complete(r[i],iam);
1765 for (size_type i = split; i < end; ++i)
1766 nodes_init.link_incomplete(r[i],iam);
1770 // If the execution reaches this point, there has been no
1771 // exception, and so the structure can be initialized.
1773 // Join the tree laid on the array of ptrs with the header node.
1774 // Dealing with repetitions (EFFICIENCY ISSUE).
1775 base_type::_M_impl._M_node_count = n - rank_shift[num_threads];
1776 base_type::_M_impl._M_header._M_left = r[0];
1777 thread_index_t with_element = num_threads;
1778 while ((beg_partition[with_element] - beg_partition[with_element-1]) == (rank_shift[with_element] - rank_shift[with_element-1]))
1780 --with_element;
1782 base_type::_M_impl._M_header._M_right = r[beg_partition[with_element] - (rank_shift[with_element] - rank_shift[with_element-1]) - 1];
1783 base_type::_M_impl._M_header._M_parent = nodes_init.get_root();
1784 nodes_init.get_root()->_M_parent= &base_type::_M_impl._M_header;
1786 ::operator delete(r);
1790 /** @brief Main bulk insertion method: perform the actual
1791 initialization, allocation and finally insertion once the
1792 input sequence has already been preprocessed.
1793 * @param access Array of iterators of size @c num_threads +
1794 * 1. Each position contains the first value in the subsequence to
1795 * be copied into the corresponding tree node.
1796 * @param beg_partition Array of positions of size @c num_threads
1797 * + 1. Each position contains the rank of the first element in
1798 * the subsequence from which to copy the data to initialize the
1799 * nodes.
1800 * @param k Size of the sequence to be inserted (including the
1801 * possible repeated elements among the sequence itself and
1802 * against those elements already in the tree)
1803 * @param num_threads Number of threads and corresponding
1804 * subsequences in which the work is going to be shared
1805 * @param strictly_less_or_less_equal Comparator to deal
1806 * transparently with repetitions with respect to the uniqueness
1807 * of the wrapping container
1809 template<typename _Iterator, typename StrictlyLessOrLessEqual>
1810 void
1811 _M_sorted_bulk_insertion(_Iterator* access, size_type* beg_partition, size_type k, thread_index_t num_threads, StrictlyLessOrLessEqual strictly_less_or_less_equal)
1813 _GLIBCXX_PARALLEL_ASSERT((size_type)num_threads <= k);
1814 // num_thr-1 problems in the upper part of the tree
1815 // num_thr problems to further parallelize
1816 std::vector<size_type> existing(num_threads,0);
1817 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1818 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1819 size_type rank_shift[num_threads+1];
1821 // Need to create them dynamically because they are so erased
1822 concat_problem* conc[2*num_threads-1];
1823 #endif
1824 _Rb_tree_node_ptr* r;
1825 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1826 if (not strictly_less_or_less_equal(base_type::_S_key(base_type::_M_root()),base_type::_S_key(base_type::_M_root()) ))
1828 // Unique container
1829 // Set 1 and 2 could be done in parallel ...
1830 // 1. Construct the nodes with their corresponding data
1831 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1832 r = _M_sorted_bulk_allocation_and_initialization(access, beg_partition, rank_shift, k, num_threads, strictly_less_or_less_equal);
1833 #else
1834 r = _M_sorted_no_gapped_bulk_allocation_and_initialization(access, beg_partition, k, num_threads, strictly_less_or_less_equal);
1835 #endif
1837 else
1839 // Not unique container.
1840 r = _M_unsorted_bulk_allocation_and_initialization(access, beg_partition, k, num_threads);
1841 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1842 // Trivial initialization of rank_shift.
1843 for (int i=0; i <= num_threads; ++i)
1844 rank_shift[i] = 0;
1845 #endif
1847 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1848 // Calculate position of last element to be inserted: must be
1849 // done now, or otherwise becomes messy.
1851 /***** Dealing with
1852 repetitions (EFFICIENCY ISSUE) *****/
1853 size_type last = beg_partition[num_threads] - (rank_shift[num_threads] - rank_shift[num_threads - 1]);
1855 //2. Split the tree according to access in num_threads parts
1856 //Initialize upper concat_problems
1857 //Allocate them dynamically because they are afterwards so erased
1858 for (int i=0; i < (2*num_threads-1); ++i)
1860 conc[i] = new concat_problem ();
1862 concat_problem* root_problem = _M_bulk_insertion_initialize_upper_problems(conc, 0, num_threads, NULL);
1864 // The first position of access and the last are ignored, so we
1865 // have exactly num_threads subtrees.
1866 bool before = omp_get_nested();
1867 omp_set_nested(true);
1868 _M_bulk_insertion_split_tree_by_pivot(static_cast<_Rb_tree_node_ptr>(base_type::_M_root()), r, access, beg_partition, rank_shift, 0, num_threads-1, conc, num_threads, strictly_less_or_less_equal);
1869 omp_set_nested(before);
1871 // Construct upper tree with the first elements of ranges if
1872 // they are NULL We cannot do this by default because they could
1873 // be repeated and would not be checked.
1874 size_type r_s = 0;
1875 for (int pos = 1; pos < num_threads; ++pos)
1877 _GLIBCXX_PARALLEL_ASSERT(conc[(pos-1)*2]->t == NULL or conc[pos*2-1]->t == NULL or strictly_less_or_less_equal(base_type::_S_key(base_type::_S_maximum(conc[(pos-1)*2]->t)), base_type::_S_key(conc[pos*2-1]->t)));
1878 _GLIBCXX_PARALLEL_ASSERT(conc[pos*2]->t == NULL or conc[pos*2-1]->t == NULL or strictly_less_or_less_equal( base_type::_S_key(conc[pos*2-1]->t), base_type::_S_key(base_type::_S_minimum(conc[pos*2]->t))));
1879 /***** Dealing with repetitions (CORRECTNESS ISSUE) *****/
1881 // The first element of the range is the root.
1882 if (conc[pos*2-1]->t == NULL or (not(strictly_less_or_less_equal(base_type::_S_key(static_cast<_Rb_tree_node_ptr>(conc[pos*2-1]->t)), _KeyOfValue()(*access[pos])))))
1884 // There was not a candidate element
1885 // or
1886 // Exists an initialized position in the array which
1887 // corresponds to conc[pos*2-1]->t */
1888 if (conc[pos*2-1]->t == NULL)
1890 size_t np = beg_partition[pos];
1891 _GLIBCXX_PARALLEL_ASSERT(conc[(pos-1)*2]->t == NULL or strictly_less_or_less_equal(base_type::_S_key(base_type::_S_maximum(conc[(pos-1)*2]->t)), base_type::_S_key(r[np])));
1892 _GLIBCXX_PARALLEL_ASSERT(conc[pos*2]->t == NULL or strictly_less_or_less_equal( base_type::_S_key(r[np]), base_type::_S_key(base_type::_S_minimum(conc[pos*2]->t))));
1893 conc[pos*2-1]->t = r[np];
1894 r[np]->_M_color = std::_S_black;
1895 ++base_type::_M_impl._M_node_count;
1897 else
1899 base_type::_M_destroy_node(r[beg_partition[pos]]);
1901 ++(access[pos]);
1902 ++(beg_partition[pos]);
1903 ++r_s;
1905 _GLIBCXX_PARALLEL_ASSERT(conc[(pos-1)*2]->t == NULL or conc[(pos-1)*2]->t->_M_color == std::_S_black);
1906 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1907 rank_shift[pos] += r_s;
1909 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1910 rank_shift[num_threads] += r_s;
1911 #else
1912 concat_problem root_problem_on_stack(static_cast<_Rb_tree_node_ptr>(base_type::_M_root()), black_height(static_cast<_Rb_tree_node_ptr>(base_type::_M_root())), NULL);
1913 concat_problem * root_problem = &root_problem_on_stack;
1914 size_type last = k;
1915 #endif
1917 // 3. Split the range according to tree and create
1918 // 3. insertion/concatenation problems to be solved in parallel
1919 #if _GLIBCXX_TREE_DYNAMIC_BALANCING
1920 size_type min_problem = (k/num_threads) / (log2(k/num_threads + 1)+1);
1921 #else
1922 size_type min_problem = base_type::size() + k;
1923 #endif
1925 RestrictedBoundedConcurrentQueue<insertion_problem>* ins_problems[num_threads];
1927 #pragma omp parallel num_threads(num_threads)
1929 int num_thread = omp_get_thread_num();
1930 ins_problems[num_thread] = new RestrictedBoundedConcurrentQueue<insertion_problem>(2*(log2(base_type::size())+1));
1931 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1932 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1933 size_type end_k_thread = beg_partition[num_thread+1] - (rank_shift[num_thread+1] - rank_shift[num_thread]);
1934 ins_problems[num_thread]->push_front(insertion_problem(beg_partition[num_thread], end_k_thread, num_thread, conc[num_thread*2]));
1935 #else
1936 // size_type end_k_thread = beg_partition[num_thread+1];
1937 #endif
1938 insertion_problem ip_to_solve;
1939 bool change;
1941 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1942 #pragma omp barrier
1943 #else
1944 #pragma omp single
1945 ins_problems[num_thread]->push_front(insertion_problem(0, k, num_thread, root_problem));
1946 #endif
1950 // First do own work.
1951 while (ins_problems[num_thread]->pop_front(ip_to_solve))
1953 _GLIBCXX_PARALLEL_ASSERT(ip_to_solve.pos_beg <= ip_to_solve.pos_end);
1954 _M_bulk_insertion_split_sequence(r, ins_problems[num_thread], ip_to_solve, existing[num_thread], min_problem, strictly_less_or_less_equal);
1957 yield();
1958 change = false;
1960 //Then, try to steal from others (and become own).
1961 for (int i=1; i<num_threads; ++i)
1963 if (ins_problems[(num_thread+i)%num_threads]->pop_back(ip_to_solve))
1965 change = true;
1966 _M_bulk_insertion_split_sequence(r, ins_problems[num_thread], ip_to_solve, existing[num_thread], min_problem, strictly_less_or_less_equal);
1967 break;
1970 } while (change);
1973 // Update root and sizes.
1974 base_type::_M_root() = root_problem->t;
1975 root_problem->t->_M_parent = &(base_type::_M_impl._M_header);
1976 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1978 // Add the k elements that wanted to be inserted, minus the ones
1979 // that were repeated.
1980 #if _GLIBCXX_TREE_INITIAL_SPLITTING
1981 base_type::_M_impl._M_node_count += (k - (rank_shift[num_threads]));
1982 #else
1983 base_type::_M_impl._M_node_count += k;
1984 #endif
1985 // Also then, take out the ones that were already existing in the tree.
1986 for (int i = 0; i< num_threads; ++i)
1988 base_type::_M_impl._M_node_count -= existing[i];
1990 // Update leftmost and rightmost.
1991 /***** Dealing with repetitions (EFFICIENCY ISSUE) *****/
1992 if (not strictly_less_or_less_equal(base_type::_S_key(base_type::_M_root()), base_type::_S_key(base_type::_M_root()))){
1993 // Unique container.
1994 if (base_type::_M_impl._M_key_compare(_KeyOfValue()(*(access[0])), base_type::_S_key(base_type::_M_leftmost())))
1995 base_type::_M_leftmost() = r[0];
1996 if (base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_M_rightmost()), _KeyOfValue()(*(--access[num_threads]))))
1997 base_type::_M_rightmost() = r[last - 1];
1999 else{
2000 if (strictly_less_or_less_equal(_KeyOfValue()(*(access[0])), base_type::_S_key(base_type::_M_leftmost())))
2001 base_type::_M_leftmost() = base_type::_S_minimum(base_type::_M_root());
2002 if (strictly_less_or_less_equal(base_type::_S_key(base_type::_M_rightmost()), _KeyOfValue()(*(--access[num_threads]))))
2003 base_type::_M_rightmost() = base_type::_S_maximum(base_type::_M_root());
2009 #if _GLIBCXX_TREE_INITIAL_SPLITTING
2010 // Delete root problem
2011 delete root_problem;
2012 #endif
2014 // Delete queues
2015 for (int pos = 0; pos < num_threads; ++pos)
2017 delete ins_problems[pos];
2020 // Delete array of pointers
2021 ::operator delete(r);
2025 /** @brief Divide a tree according to the splitter elements of a
2026 * given sequence.
2028 * The tree of the initial recursive call is divided in exactly
2029 * num_threads partitions, some of which may be empty. Besides,
2030 * some nodes may be extracted from it to afterwards concatenate
2031 * the subtrees resulting from inserting the elements into it.
2032 * This is done sequentially. It could be done in parallel but the
2033 * performance is much worse.
2034 * @param t Root of the tree to be split
2035 * @param r Array of nodes to be inserted into the tree (here only
2036 * used to look up its elements)
2037 * @param access Array of iterators of size @c num_threads +
2038 * 1. Each position contains the first value in the subsequence
2039 * that has been copied into the corresponding tree node.
2040 * @param beg_partition Array of positions of size @c num_threads
2041 * + 1. Each position contains the rank of the first element in
2042 * the array of nodes to be inserted.
2043 * @param rank_shift Array of size @c num_threads + 1 containing
2044 * the number of accumulated gaps at the beginning of each
2045 * partition
2046 * @param pos_beg First position in the access array to be
2047 * considered to split @c t
2048 * @param pos_end Last position (included) in the access array to
2049 * be considered to split @c t
2050 * @param conc Array of concatenation problems to be initialized
2051 * @param num_threads Number of threads and corresponding
2052 * subsequences in which the original sequence has been
2053 * partitioned
2054 * @param strictly_less_or_less_equal Comparator to deal
2055 * transparently with repetitions with respect to the uniqueness
2056 * of the wrapping container
2058 template<typename _Iterator, typename StrictlyLessOrLessEqual>
2059 void
2060 _M_bulk_insertion_split_tree_by_pivot(_Rb_tree_node_ptr t, _Rb_tree_node_ptr* r, _Iterator* access, size_type* beg_partition, size_type* rank_shift, const size_type pos_beg, const size_type pos_end, concat_problem** conc, const thread_index_t num_threads, StrictlyLessOrLessEqual strictly_less_or_less_equal)
2062 if (pos_beg == pos_end)
2064 //Elements are in [pos_beg, pos_end]
2065 conc[pos_beg*2]->t = t;
2066 conc[pos_beg*2]->black_h = black_height(t);
2067 force_black_root (conc[pos_beg*2]->t, conc[pos_beg*2]->black_h);
2068 return;
2070 if (t == 0)
2072 for (size_type i = pos_beg; i < pos_end; ++i)
2074 conc[i*2]->t = NULL;
2075 conc[i*2]->black_h = 0;
2076 conc[i*2+1]->t = NULL;
2078 conc[pos_end*2]->t = NULL;
2079 conc[pos_end*2]->black_h = 0;
2080 return;
2083 // Return the last pos, in which key >= (pos-1).
2084 // Search in the range [pos_beg, pos_end]
2085 size_type pos = std::upper_bound(access + pos_beg, access + pos_end + 1, base_type::_S_key(t), compare_value_key<_Iterator, _Compare>(base_type::_M_impl._M_key_compare)) - access;
2086 if (pos != pos_beg)
2088 --pos;
2090 _GLIBCXX_PARALLEL_ASSERT(pos == 0 or not base_type::_M_impl._M_key_compare(base_type::_S_key(t), _KeyOfValue()(*access[pos])));
2093 _Rb_tree_node_ptr ll, lr;
2094 int black_h_ll, black_h_lr;
2095 _Rb_tree_node_ptr rl, rr;
2096 int black_h_rl, black_h_rr;
2098 if (pos != pos_beg)
2100 _Rb_tree_node_ptr prev = r[beg_partition[pos] - 1 - (rank_shift[pos] - rank_shift[pos - 1])];
2102 _GLIBCXX_PARALLEL_ASSERT(strictly_less_or_less_equal(base_type::_S_key(prev), _KeyOfValue()(*access[pos])));
2104 split(static_cast<_Rb_tree_node_ptr>(t->_M_left),
2105 static_cast<const key_type&>(_KeyOfValue()(*access[pos])),
2106 static_cast<const key_type&>(base_type::_S_key(prev)),
2107 conc[pos*2-1]->t, ll, lr, black_h_ll, black_h_lr,
2108 strictly_less_or_less_equal);
2110 _M_bulk_insertion_split_tree_by_pivot(ll, r, access, beg_partition, rank_shift, pos_beg, pos-1, conc,num_threads, strictly_less_or_less_equal);
2112 else
2114 lr = static_cast<_Rb_tree_node_ptr>(t->_M_left);
2115 black_h_lr = black_height (lr);
2116 force_black_root (lr, black_h_lr);
2119 if (pos != pos_end)
2121 _Rb_tree_node_ptr prev = r[beg_partition[pos+1] - 1 - (rank_shift[pos+1] - rank_shift[pos])];
2123 _GLIBCXX_PARALLEL_ASSERT(not base_type::_M_impl._M_key_compare(_KeyOfValue()(*access[pos+1]), base_type::_S_key(prev)));
2124 _GLIBCXX_PARALLEL_ASSERT(strictly_less_or_less_equal(base_type::_S_key(prev), _KeyOfValue()(*access[pos+1])));
2126 split(static_cast<_Rb_tree_node_ptr>(t->_M_right),
2127 static_cast<const key_type&>(_KeyOfValue()(*access[pos+1])),
2128 static_cast<const key_type&>(base_type::_S_key(prev)),
2129 conc[pos*2+1]->t, rl, rr, black_h_rl, black_h_rr,
2130 strictly_less_or_less_equal);
2132 _M_bulk_insertion_split_tree_by_pivot(rr, r, access, beg_partition, rank_shift, pos+1, pos_end, conc,num_threads, strictly_less_or_less_equal);
2134 else
2136 rl = static_cast<_Rb_tree_node_ptr>(t->_M_right);
2137 black_h_rl = black_height (rl);
2138 force_black_root (rl, black_h_rl);
2141 // When key(t) is equal to key(access[pos]) and no other key in
2142 // the left tree satisfies the criteria to be conc[pos*2-1]->t,
2143 // key(t) must be assigned to it to avoid repetitions.
2144 // Therefore, we do not have a root parameter for the
2145 // concatenate function and a new concatenate function must be
2146 // provided.
2147 if (pos != pos_beg and conc[pos*2-1]->t == NULL and not strictly_less_or_less_equal(_KeyOfValue()(*access[pos]), base_type::_S_key(t)))
2149 conc[pos*2-1]->t = t;
2150 t = NULL;
2152 concatenate(t, lr, rl, black_h_lr, black_h_rl, conc[pos*2]->t, conc[pos*2]->black_h);
2155 /** @brief Divide the insertion problem until a leaf is reached or
2156 * the problem is small.
2158 * During the recursion, the right subproblem is queued, so that
2159 * it can be handled by any thread. The left subproblem is
2160 * divided recursively, and finally, solved right away
2161 * sequentially.
2162 * @param r Array of nodes containing the nodes to added into the tree
2163 * @param ins_problems Pointer to a queue of insertion
2164 * problems. The calling thread owns this queue, i. e. it is the
2165 * only one to push elements, but other threads could pop elements
2166 * from it in other methods.
2167 * @param ip Current insertion problem to be solved
2168 * @param existing Number of existing elements found when solving
2169 * the insertion problem (out)
2170 * @param min_problem Threshold size on the size of the insertion
2171 * problem in which to stop recursion
2172 * @param strictly_less_or_less_equal Comparator to deal
2173 * transparently with repetitions with respect to the uniqueness
2174 * of the wrapping container
2176 template<typename StrictlyLessOrLessEqual>
2177 void
2178 _M_bulk_insertion_split_sequence(_Rb_tree_node_ptr* r, RestrictedBoundedConcurrentQueue<insertion_problem>* ins_problems, insertion_problem& ip, size_type& existing, const size_type min_problem, StrictlyLessOrLessEqual strictly_less_or_less_equal)
2180 _GLIBCXX_PARALLEL_ASSERT(ip.t == ip.conc->t);
2181 if (ip.t == NULL or (ip.pos_end- ip.pos_beg) <= min_problem)
2183 // SOLVE PROBLEM SEQUENTIALLY
2184 // Start solving the problem.
2185 _GLIBCXX_PARALLEL_ASSERT(ip.pos_beg <= ip.pos_end);
2186 _M_bulk_insertion_merge_concatenate(r, ip, existing, strictly_less_or_less_equal);
2187 return;
2190 size_type pos_beg_right;
2191 size_type pos_end_left = divide(r, ip.pos_beg, ip.pos_end, base_type::_S_key(ip.t), pos_beg_right, existing, strictly_less_or_less_equal);
2193 int black_h_l, black_h_r;
2194 if (ip.t->_M_color == std::_S_black)
2196 black_h_l = black_h_r = ip.conc->black_h - 1;
2198 else
2200 black_h_l = black_h_r = ip.conc->black_h;
2203 // Right problem into the queue.
2204 ip.conc->right_problem = new concat_problem(static_cast<_Rb_tree_node_ptr>(ip.t->_M_right), black_h_r, ip.conc);
2205 ip.conc->left_problem = new concat_problem(static_cast<_Rb_tree_node_ptr>(ip.t->_M_left), black_h_l, ip.conc);
2207 ins_problems->push_front(insertion_problem(pos_beg_right, ip.pos_end, ip.array_partition, ip.conc->right_problem));
2209 // Solve left problem.
2210 insertion_problem ip_left(ip.pos_beg, pos_end_left, ip.array_partition, ip.conc->left_problem);
2211 _M_bulk_insertion_split_sequence(r, ins_problems, ip_left, existing, min_problem, strictly_less_or_less_equal);
2215 /** @brief Insert a sequence of elements into a tree using a
2216 * divide-and-conquer scheme.
2218 * The problem is solved recursively and sequentially dividing the
2219 * sequence to be inserted according to the root of the tree. This
2220 * is done until a leaf is reached or the proportion of elements
2221 * to be inserted is small. Finally, the two resulting trees are
2222 * concatenated.
2223 * @param r_array Array of nodes containing the nodes to be added
2224 * into the tree (among others)
2225 * @param t Root of the tree
2226 * @param pos_beg Position of the first node in the array of
2227 * nodes to be inserted into the tree
2228 * @param pos_end Position of the first node in the array of
2229 * nodes that will not be inserted into the tree
2230 * @param existing Number of existing elements found while
2231 * inserting the range [@c pos_beg, @c pos_end) (out)
2232 * @param black_h Height of the tree @c t and of the resulting
2233 * tree after the recursive calls (in and out)
2234 * @param strictly_less_or_less_equal Comparator to deal
2235 * transparently with repetitions with respect to the uniqueness
2236 * of the wrapping container
2237 * @return Resulting tree after the elements have been inserted
2239 template<typename StrictlyLessOrLessEqual>
2240 _Rb_tree_node_ptr
2241 _M_bulk_insertion_merge(_Rb_tree_node_ptr* r_array, _Rb_tree_node_ptr t, const size_type pos_beg, const size_type pos_end, size_type& existing, int& black_h, StrictlyLessOrLessEqual strictly_less_or_less_equal)
2243 #ifndef NDEBUG
2244 int count;
2245 #endif
2246 _GLIBCXX_PARALLEL_ASSERT(pos_beg<=pos_end);
2248 // Leaf: a tree with the range must be constructed. Returns its
2249 // height in black nodes and its root (in ip.t) If there is
2250 // nothing to insert, we still need the height for balancing.
2251 if (t == NULL)
2253 if (pos_end == pos_beg) return NULL;
2254 t = simple_tree_construct(r_array,pos_beg, pos_end, black_h);
2255 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(t,count));
2256 return t;
2258 if (pos_end == pos_beg)
2259 return t;
2260 if ((pos_end - pos_beg) <= (size_type)(black_h))
2262 // Exponential size tree with respect the number of elements
2263 // to be inserted.
2264 for (size_type p = pos_beg; p < pos_end; ++p)
2266 t = _M_insert_local(t, r_array[p], existing, black_h, strictly_less_or_less_equal);
2268 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(t,count));
2269 return t;
2272 size_type pos_beg_right;
2273 size_type pos_end_left = divide(r_array, pos_beg, pos_end, base_type::_S_key(t), pos_beg_right, existing, strictly_less_or_less_equal);
2276 int black_h_l, black_h_r;
2277 if (t->_M_color == std::_S_black)
2279 black_h_l = black_h_r = black_h - 1;
2281 else
2283 black_h_l = black_h_r = black_h;
2285 force_black_root(t->_M_left, black_h_l);
2286 _Rb_tree_node_ptr l = _M_bulk_insertion_merge(r_array, static_cast<_Rb_tree_node_ptr>(t->_M_left), pos_beg, pos_end_left, existing, black_h_l, strictly_less_or_less_equal);
2287 force_black_root(t->_M_right, black_h_r);
2288 _Rb_tree_node_ptr r = _M_bulk_insertion_merge(r_array, static_cast<_Rb_tree_node_ptr>(t->_M_right), pos_beg_right, pos_end, existing, black_h_r, strictly_less_or_less_equal);
2290 concatenate(t, l, r, black_h_l, black_h_r, t, black_h);
2292 return t;
2295 /** @brief Solve a given insertion problem and all the parent
2296 * concatenation problem that are ready to be solved.
2298 * First, solve an insertion problem.
2300 * Then, check if it is possible to solve the parent
2301 * concatenation problem. If this is the case, solve it and go
2302 * up recursively, as far as possible. Quit otherwise.
2304 * @param r Array of nodes containing the nodes to be added into
2305 * the tree (among others)
2306 * @param ip Insertion problem to solve initially.
2307 * @param existing Number of existing elements found while
2308 * inserting the range defined by the insertion problem (out)
2309 * @param strictly_less_or_less_equal Comparator to deal
2310 * transparently with repetitions with respect to the uniqueness
2311 * of the wrapping container
2313 template<typename StrictlyLessOrLessEqual>
2314 void
2315 _M_bulk_insertion_merge_concatenate(_Rb_tree_node_ptr* r, insertion_problem& ip, size_type& existing, StrictlyLessOrLessEqual strictly_less_or_less_equal)
2317 concat_problem* conc = ip.conc;
2318 _GLIBCXX_PARALLEL_ASSERT(ip.pos_beg <= ip.pos_end);
2320 conc->t = _M_bulk_insertion_merge(r, ip.t, ip.pos_beg, ip.pos_end, existing, conc->black_h, strictly_less_or_less_equal);
2321 _GLIBCXX_PARALLEL_ASSERT(conc->t == NULL or conc->t->_M_color == std::_S_black);
2323 bool is_ready = true;
2324 while (conc->par_problem != NULL and is_ready)
2326 // Pre: exists left and right problem, so there is not a deadlock
2327 if (compare_and_swap(&conc->par_problem->is_ready, concat_problem::READY_NO, concat_problem::READY_YES))
2328 is_ready = false;
2330 if (is_ready)
2332 conc = conc->par_problem;
2333 _GLIBCXX_PARALLEL_ASSERT(conc->left_problem!=NULL and conc->right_problem!=NULL);
2334 _GLIBCXX_PARALLEL_ASSERT (conc->left_problem->black_h >=0 and conc->right_problem->black_h>=0);
2335 // Finished working with the problems.
2336 concatenate(conc->t, conc->left_problem->t, conc->right_problem->t, conc->left_problem->black_h, conc->right_problem->black_h, conc->t, conc->black_h);
2338 delete conc->left_problem;
2339 delete conc->right_problem;
2344 // Begin of sorting, searching and related comparison-based helper methods.
2346 /** @brief Check whether a random-access sequence is sorted, and
2347 * calculate its size.
2349 * @param __first Begin iterator of sequence.
2350 * @param __last End iterator of sequence.
2351 * @param dist Size of the sequence (out)
2352 * @return sequence is sorted. */
2353 template<typename _RandomAccessIterator>
2354 bool
2355 is_sorted_distance(const _RandomAccessIterator __first, const _RandomAccessIterator __last, size_type& dist, std::random_access_iterator_tag) const
2357 gr_or_eq<_Compare, _RandomAccessIterator> geq(base_type::_M_impl._M_key_compare);
2358 dist = __last - __first;
2360 // In parallel.
2361 return equal(__first + 1, __last, __first, geq);
2364 /** @brief Check whether an input sequence is sorted, and
2365 * calculate its size.
2367 * The list partitioning tool is used so that all the work is
2368 * done in only one traversal.
2369 * @param __first Begin iterator of sequence.
2370 * @param __last End iterator of sequence.
2371 * @param dist Size of the sequence (out)
2372 * @return sequence is sorted. */
2373 template<typename _InputIterator>
2374 bool
2375 is_sorted_distance(const _InputIterator __first, const _InputIterator __last, size_type& dist, std::input_iterator_tag) const
2377 dist = 1;
2378 bool is_sorted = true;
2379 _InputIterator it = __first;
2380 _InputIterator prev = it++;
2381 while (it != __last)
2383 ++dist;
2384 if (base_type::_M_impl._M_key_compare(_KeyOfValue()(*it),_KeyOfValue()(*prev)))
2386 is_sorted = false;
2387 ++it;
2388 break;
2390 prev = it;
2391 ++it;
2393 while (it != __last)
2395 ++dist;
2396 ++it;
2398 return is_sorted;
2401 /** @brief Check whether a random-access sequence is sorted,
2402 * calculate its size, and obtain intermediate accessors to the
2403 * sequence to ease parallelization.
2405 * @param __first Begin iterator of sequence.
2406 * @param __last End iterator of sequence.
2407 * @param access Array of size @c num_pieces + 1 that defines @c
2408 * num_pieces subsequences of the original sequence (out). Each
2409 * position @c i will contain an iterator to the first element in
2410 * the subsequence @c i.
2411 * @param beg_partition Array of size @c num_pieces + 1 that
2412 * defines @c num_pieces subsequences of the original sequence
2413 * (out). Each position @c i will contain the rank of the first
2414 * element in the subsequence @c i.
2415 * @param dist Size of the sequence (out)
2416 * @param num_pieces Number of pieces to generate.
2417 * @return Sequence is sorted. */
2418 template<typename _RandomAccessIterator>
2419 bool
2420 is_sorted_distance_accessors(const _RandomAccessIterator __first, const _RandomAccessIterator __last, _RandomAccessIterator* access, size_type* beg_partition, size_type& dist, thread_index_t& num_pieces, std::random_access_iterator_tag) const
2422 bool is_sorted = is_sorted_distance(__first, __last, dist,std::__iterator_category(__first));
2423 if (dist < (unsigned int) num_pieces)
2424 num_pieces = dist;
2426 // Do it opposite way to use accessors in equal function???
2427 range_accessors(__first,__last, access, beg_partition, dist, num_pieces, std::__iterator_category(__first));
2428 return is_sorted;
2431 /** @brief Check whether an input sequence is sorted, calculate
2432 * its size, and obtain intermediate accessors to the sequence to
2433 * ease parallelization.
2435 * The list partitioning tool is used so that all the work is
2436 * done in only one traversal.
2437 * @param __first Begin iterator of sequence.
2438 * @param __last End iterator of sequence.
2439 * @param access Array of size @c num_pieces + 1 that defines @c
2440 * num_pieces subsequences of the original sequence (out). Each
2441 * position @c i will contain an iterator to the first element in
2442 * the subsequence @c i.
2443 * @param beg_partition Array of size @c num_pieces + 1 that
2444 * defines @c num_pieces subsequences of the original sequence
2445 * (out). Each position @c i will contain the rank of the first
2446 * element in the subsequence @c i.
2447 * @param dist Size of the sequence (out)
2448 * @param num_pieces Number of pieces to generate.
2449 * @return Sequence is sorted. */
2450 template<typename _InputIterator>
2451 bool
2452 is_sorted_distance_accessors(const _InputIterator __first, const _InputIterator __last, _InputIterator* access, size_type* beg_partition, size_type& dist, thread_index_t& num_pieces, std::input_iterator_tag) const
2454 is_sorted_functor<_InputIterator, _Compare> sorted(__first, base_type::_M_impl._M_key_compare);
2455 dist = list_partition(__first, __last, access, (beg_partition+1), num_pieces, sorted, 0);
2457 // Calculate the rank of the beginning each partition from the
2458 // sequence sizes (what is stored at this point in beg_partition
2459 // array).
2460 beg_partition[0] = 0;
2461 for (int i = 0; i < num_pieces; ++i)
2463 beg_partition[i+1] += beg_partition[i];
2466 return sorted.is_sorted();
2469 /** @brief Make a full copy of the elements of a sequence
2471 * The uninitialized_copy method from the STL is called in parallel
2472 * using the access array to point to the beginning of each
2473 * partition
2474 * @param access Array of size @c num_threads + 1 that defines @c
2475 * num_threads subsequences. Each position @c i contains an
2476 * iterator to the first element in the subsequence @c i.
2477 * @param beg_partition Array of size @c num_threads + 1 that
2478 * defines @c num_threads subsequences. Each position @c i
2479 * contains the rank of the first element in the subsequence @c
2480 * i.
2481 * @param out Begin iterator of output sequence.
2482 * @param num_threads Number of threads to use. */
2483 template<typename _InputIterator, typename _OutputIterator>
2484 static void
2485 uninitialized_copy_from_accessors(_InputIterator* access, size_type* beg_partition, _OutputIterator out, const thread_index_t num_threads)
2487 #pragma omp parallel num_threads(num_threads)
2489 int iam = omp_get_thread_num();
2490 uninitialized_copy(access[iam], access[iam+1], out+beg_partition[iam]);
2494 /** @brief Make a copy of the pointers of the elements of a sequence
2495 * @param access Array of size @c num_threads + 1 that defines @c
2496 * num_threads subsequences. Each position @c i contains an
2497 * iterator to the first element in the subsequence @c i.
2498 * @param beg_partition Array of size @c num_threads + 1 that
2499 * defines @c num_threads subsequences. Each position @c i
2500 * contains the rank of the first element in the subsequence @c
2501 * i.
2502 * @param out Begin iterator of output sequence.
2503 * @param num_threads Number of threads to use. */
2504 template<typename _InputIterator, typename _OutputIterator>
2505 static void
2506 uninitialized_ptr_copy_from_accessors(_InputIterator* access, size_type* beg_partition, _OutputIterator out, const thread_index_t num_threads)
2508 #pragma omp parallel num_threads(num_threads)
2510 int iam = omp_get_thread_num();
2511 _OutputIterator itout = out + beg_partition[iam];
2512 for (_InputIterator it = access[iam]; it != access[iam+1]; ++it)
2514 *itout = &(*it);
2515 ++itout;
2520 /** @brief Split a sorted node array in two parts according to a key.
2522 * For unique containers, if the splitting key is in the array of
2523 * nodes, the corresponding node is erased.
2524 * @param r Array of nodes containing the nodes to split (among others)
2525 * @param pos_beg Position of the first node in the array of
2526 * nodes to be considered
2527 * @param pos_end Position of the first node in the array of
2528 * nodes to be not considered
2529 * @param key Splitting key
2530 * @param pos_beg_right Position of the first node in the
2531 * resulting right partition (out)
2532 * @param existing Number of existing elements before dividing
2533 * (in) and after (out). Specifically, the counter is
2534 * incremented by one for unique containers if the splitting key
2535 * was already in the array of nodes.
2536 * @param strictly_less_or_less_equal Comparator to deal
2537 * transparently with repetitions with respect to the uniqueness
2538 * of the wrapping container
2539 * @return Position of the last node (not included) in the
2540 * resulting left partition (out)
2542 template<typename StrictlyLessOrLessEqual>
2543 size_type
2544 divide(_Rb_tree_node_ptr* r, const size_type pos_beg, const size_type pos_end, const key_type& key, size_type& pos_beg_right, size_type& existing, StrictlyLessOrLessEqual strictly_less_or_less_equal)
2546 pos_beg_right = std::lower_bound(r + pos_beg, r + pos_end, key, compare_node_key<_Compare>(base_type::_M_impl._M_key_compare)) - r;
2548 //Check if the element exists.
2549 size_type pos_end_left = pos_beg_right;
2551 // If r[pos_beg_right] is equal to key, must be erased
2552 /***** Dealing with repetitions (CORRECTNESS ISSUE) *****/
2553 _GLIBCXX_PARALLEL_ASSERT((pos_beg_right == pos_end) or not base_type::_M_impl._M_key_compare(base_type::_S_key(r[pos_beg_right]),key));
2554 _GLIBCXX_PARALLEL_ASSERT((pos_beg_right + 1 >= pos_end) or strictly_less_or_less_equal(key, base_type::_S_key(r[pos_beg_right + 1])));
2555 if (pos_beg_right != pos_end and not strictly_less_or_less_equal(key, base_type::_S_key(r[pos_beg_right])))
2557 _M_destroy_node(r[pos_beg_right]);
2558 r[pos_beg_right] = NULL;
2559 ++pos_beg_right;
2560 ++existing;
2562 _GLIBCXX_PARALLEL_ASSERT(pos_end_left <= pos_beg_right and pos_beg_right <= pos_end and pos_end_left >= pos_beg);
2563 return pos_end_left;
2567 /** @brief Parallelization helper method: Given a random-access
2568 sequence of known size, divide it into pieces of almost the
2569 same size.
2570 * @param __first Begin iterator of sequence.
2571 * @param __last End iterator of sequence.
2572 * @param access Array of size @c num_pieces + 1 that defines @c
2573 * num_pieces subsequences. Each position @c i contains an
2574 * iterator to the first element in the subsequence @c i.
2575 * @param beg_partition Array of size @c num_pieces + 1 that
2576 * defines @c num_pieces subsequences. Each position @c i
2577 * contains the rank of the first element in the subsequence @c
2578 * i.
2579 * @param n Sequence size
2580 * @param num_pieces Number of pieces. */
2581 template<typename _RandomAccessIterator>
2582 static void
2583 range_accessors(const _RandomAccessIterator __first, const _RandomAccessIterator __last, _RandomAccessIterator* access, size_type* beg_partition, const size_type n, const thread_index_t num_pieces, std::random_access_iterator_tag)
2585 access[0] = __first;
2586 for (int i=1; i< num_pieces; ++i)
2588 access[i] = access[i-1] + (__last-__first)/num_pieces;
2589 beg_partition[i]= beg_partition[i-1]+ (__last-__first)/num_pieces;
2591 beg_partition[num_pieces] = __last - access[num_pieces-1] + beg_partition[num_pieces-1];
2592 access[num_pieces]= __last;
2595 /** @brief Parallelization helper method: Given an input-access
2596 sequence of known size, divide it into pieces of almost the
2597 same size.
2598 * @param __first Begin iterator of sequence.
2599 * @param __last End iterator of sequence.
2600 * @param access Array of size @c num_pieces + 1 that defines @c
2601 * num_pieces subsequences. Each position @c i contains an
2602 * iterator to the first element in the subsequence @c i.
2603 * @param beg_partition Array of size @c num_pieces + 1 that
2604 * defines @c num_pieces subsequences. Each position @c i
2605 * contains the rank of the first element in the subsequence @c
2606 * i.
2607 * @param n Sequence size
2608 * @param num_pieces Number of pieces. */
2609 template<typename _InputIterator>
2610 static void
2611 range_accessors(const _InputIterator __first, const _InputIterator __last, _InputIterator* access, size_type* beg_partition, const size_type n, const thread_index_t num_pieces, std::input_iterator_tag)
2613 access[0] = __first;
2614 _InputIterator it= __first;
2615 for (int i=1; i< num_pieces; ++i)
2617 for (int j=0; j< n/num_pieces; ++j)
2618 ++it;
2619 access[i] = it;
2620 beg_partition[i]= n/num_pieces + beg_partition[i-1];
2622 access[num_pieces] = __last;
2623 beg_partition[num_pieces] = n - (num_pieces-1)*(n/num_pieces) + beg_partition[num_pieces-1];
2626 /** @brief Initialize an array of concatenation problems for bulk
2627 insertion. They are linked as a tree with (end - beg) leaves.
2628 * @param conc Array of concatenation problems pointers to initialize.
2629 * @param beg Rank of the first leave to initialize
2630 * @param end Rank of the last (not included) leave to initialize
2631 * @param parent Pointer to the parent concatenation problem.
2633 static concat_problem*
2634 _M_bulk_insertion_initialize_upper_problems(concat_problem** conc, const int beg, const int end, concat_problem* parent)
2636 if (beg + 1 == end)
2638 conc[2*beg]->par_problem = parent;
2639 return conc[2*beg];
2642 int size = end - beg;
2643 int mid = beg + size/2;
2644 conc[2*mid-1]->par_problem = parent;
2645 conc[2*mid-1]->left_problem = _M_bulk_insertion_initialize_upper_problems(conc, beg, mid, conc[2*mid-1]);
2646 conc[2*mid-1]->right_problem = _M_bulk_insertion_initialize_upper_problems(conc, mid, end, conc[2*mid-1]);
2647 return conc[2*mid-1];
2651 /** @brief Determine black height of a node recursively.
2652 * @param t Node.
2653 * @return Black height of the node. */
2654 static int
2655 black_height(const _Rb_tree_node_ptr t)
2657 if (t == NULL)
2658 return 0;
2659 int bh = black_height (static_cast<const _Rb_tree_node_ptr> (t->_M_left));
2660 if (t->_M_color == std::_S_black)
2661 ++bh;
2662 return bh;
2665 /** @brief Color a leaf black
2666 * @param t Leaf pointer.
2667 * @param black_h Black height of @c t (out) */
2668 static void
2669 make_black_leaf(const _Rb_tree_node_ptr t, int& black_h)
2671 black_h = 0;
2672 if (t != NULL)
2674 _GLIBCXX_PARALLEL_ASSERT(t->_M_left == NULL and t->_M_right == NULL);
2675 black_h = 1;
2676 t->_M_color = std::_S_black;
2680 /** @brief Color a node black.
2681 * @param t Node to color black.
2682 * @param black_h Black height of @c t (out) */
2683 static void
2684 make_leaf(const _Rb_tree_node_ptr t, int& black_h)
2686 _GLIBCXX_PARALLEL_ASSERT(t != NULL);
2687 black_h = 1;
2688 t->_M_color = std::_S_black;
2689 t->_M_left = NULL;
2690 t->_M_right = NULL;
2693 /** @brief Construct a tree from a root, a left subtree and a
2694 right subtree.
2695 * @param root Root of constructed tree.
2696 * @param l Root of left subtree.
2697 * @param r Root of right subtree.
2698 * @pre @c l, @c r are black.
2700 template<typename S>
2701 static _Rb_tree_node_ptr
2702 plant(const _Rb_tree_node_ptr root, const _Rb_tree_node_ptr l,
2703 const _Rb_tree_node_ptr r)
2705 S::left(root) = l;
2706 S::right(root) = r;
2707 if (l != NULL)
2708 l->_M_parent = root;
2709 if (r != NULL)
2710 r->_M_parent = root;
2711 root->_M_color = std::_S_red;
2712 return root;
2715 /** @brief Concatenate two red-black subtrees using and an
2716 intermediate node, which might be NULL
2717 * @param root Intermediate node.
2718 * @param l Left subtree.
2719 * @param r Right subtree.
2720 * @param black_h_l Black height of left subtree.
2721 * @param black_h_r Black height of right subtree.
2722 * @param t Tree resulting of the concatenation
2723 * @param black_h Black height of the resulting tree
2724 * @pre Left tree is higher than left tree
2725 * @post @c t is correct red-black tree with height @c black_h.
2727 void
2728 concatenate(_Rb_tree_node_ptr root, _Rb_tree_node_ptr l,
2729 _Rb_tree_node_ptr r, int black_h_l, int black_h_r,
2730 _Rb_tree_node_ptr& t, int& black_h) const
2732 #ifndef NDEBUG
2733 int count = 0, count1 = 0, count2 = 0;
2734 #endif
2735 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(l, count1));
2736 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(r, count2));
2738 _GLIBCXX_PARALLEL_ASSERT(l != NULL ? l->_M_color != std::_S_red and black_h_l > 0 : black_h_l == 0);
2739 _GLIBCXX_PARALLEL_ASSERT(r != NULL ? r->_M_color != std::_S_red and black_h_r > 0 : black_h_r == 0);
2741 if (black_h_l > black_h_r)
2742 if (root != NULL)
2743 concatenate<LeftRight>(root, l, r, black_h_l, black_h_r, t, black_h);
2744 else
2746 if (r == NULL)
2748 t = l;
2749 black_h = black_h_l;
2751 else
2753 // XXX SHOULD BE the same as extract_min but slower.
2755 root = static_cast<_Rb_tree_node_ptr>(_Rb_tree_node_base::_S_minimum(r));
2756 split(r, _S_key(_Rb_tree_increment(root)), _S_key(root), root, t, r, black_h, black_h_r);
2758 extract_min(r, root, r, black_h_r);
2759 _GLIBCXX_PARALLEL_ASSERT(root != NULL);
2760 concatenate<LeftRight>(root, l, r, black_h_l, black_h_r, t, black_h);
2763 else
2764 if (root != NULL)
2765 concatenate<RightLeft>(root, r, l, black_h_r, black_h_l, t, black_h);
2766 else
2768 if (l == NULL)
2770 t = r;
2771 black_h = black_h_r;
2773 else
2775 // XXX SHOULD BE the same as extract_max but slower
2777 root = static_cast<_Rb_tree_node_ptr>(_Rb_tree_node_base::_S_maximum(l));
2778 split(l, _S_key(root), _S_key(_Rb_tree_decrement(root)), root, l, t, black_h_l, black_h);
2780 extract_max(l, root, l, black_h_l);
2781 _GLIBCXX_PARALLEL_ASSERT(root != NULL);
2782 concatenate<RightLeft>(root, r, l, black_h_r, black_h_l, t, black_h);
2785 #ifndef NDEBUG
2786 if (root!=NULL) ++count1;
2787 _GLIBCXX_PARALLEL_ASSERT(t == NULL or t->_M_color == std::_S_black);
2788 bool b = rb_verify_tree(t, count);
2789 if (not b){
2790 _GLIBCXX_PARALLEL_ASSERT(false);
2792 _GLIBCXX_PARALLEL_ASSERT(count1+count2 == count);
2793 #endif
2796 /** @brief Concatenate two red-black subtrees using and a not NULL
2797 * intermediate node.
2799 * @c S is the symmetry parameter.
2800 * @param rt Intermediate node.
2801 * @param l Left subtree.
2802 * @param r Right subtree.
2803 * @param black_h_l Black height of left subtree.
2804 * @param black_h_r Black height of right subtree.
2805 * @param t Tree resulting of the concatenation
2806 * @param black_h Black height of the resulting tree
2807 * @pre Left tree is higher than right tree. @c rt != NULL
2808 * @post @c t is correct red-black tree with height @c black_h.
2810 template<typename S>
2811 static void
2812 concatenate(const _Rb_tree_node_ptr rt, _Rb_tree_node_ptr l,
2813 _Rb_tree_node_ptr r, int black_h_l, int black_h_r,
2814 _Rb_tree_node_ptr& t, int& black_h)
2816 _Rb_tree_node_base* root = l;
2817 _Rb_tree_node_ptr parent = NULL;
2818 black_h = black_h_l;
2819 _GLIBCXX_PARALLEL_ASSERT(black_h_l >= black_h_r);
2820 while (black_h_l != black_h_r)
2822 if (l->_M_color == std::_S_black)
2823 --black_h_l;
2824 parent = l;
2825 l = static_cast<_Rb_tree_node_ptr>(S::right(l));
2826 _GLIBCXX_PARALLEL_ASSERT((black_h_l == 0 and (l == NULL or l->_M_color == std::_S_red)) or (black_h_l != 0 and l != NULL));
2827 _GLIBCXX_PARALLEL_ASSERT((black_h_r == 0 and (r == NULL or r->_M_color == std::_S_red)) or (black_h_r != 0 and r != NULL));
2829 if (l != NULL and l->_M_color == std::_S_red)
2831 //the root needs to be black
2832 parent = l;
2833 l = static_cast<_Rb_tree_node_ptr>(S::right(l));
2835 _GLIBCXX_PARALLEL_ASSERT(l != NULL ? l->_M_color == std::_S_black : true);
2836 _GLIBCXX_PARALLEL_ASSERT(r != NULL ? r->_M_color == std::_S_black : true);
2837 t = plant<S>(rt, l, r);
2838 t->_M_parent = parent;
2839 if (parent != NULL)
2841 S::right(parent) = t;
2842 black_h += _Rb_tree_rebalance(t, root);
2843 t = static_cast<_Rb_tree_node_ptr> (root);
2845 else
2847 ++black_h;
2848 t->_M_color = std::_S_black;
2850 _GLIBCXX_PARALLEL_ASSERT(t->_M_color == std::_S_black);
2853 /** @brief Split a tree according to key in three parts: a left
2854 * child, a right child and an intermediate node.
2856 * Trees are concatenated once the recursive call returns. That
2857 * is, from bottom to top (i. e. smaller to larger), so the cost
2858 * bounds for split hold.
2859 * @param t Root of the tree to split.
2860 * @param key Key to split according to.
2861 * @param prev_k Key to split the intermediate node
2862 * @param root Out parameter. If a node exists whose key is
2863 * smaller or equal than @c key, but strictly larger than @c
2864 * prev_k, this is returned. Otherwise, it is null.
2865 * @param l Root of left subtree returned, nodes less than @c key.
2866 * @param r Root of right subtree returned, nodes greater or
2867 * equal than @c key.
2868 * @param black_h_l Black height of the left subtree.
2869 * @param black_h_r Black height of the right subtree.
2870 * @param strictly_less_or_less_equal Comparator to deal
2871 * transparently with repetitions with respect to the uniqueness
2872 * of the wrapping container
2873 * @return Black height of t */
2874 template<typename StrictlyLessOrEqual>
2876 split(_Rb_tree_node_ptr t, const key_type& key, const key_type& prev_k,
2877 _Rb_tree_node_ptr& root, _Rb_tree_node_ptr& l, _Rb_tree_node_ptr& r,
2878 int& black_h_l, int& black_h_r,
2879 StrictlyLessOrEqual strictly_less_or_less_equal) const
2881 if (t != NULL)
2883 // Must be initialized, in case we never go left!!!
2884 root = NULL;
2885 int h = split_not_null(t, key, prev_k, root, l, r, black_h_l, black_h_r, strictly_less_or_less_equal);
2886 #ifndef NDEBUG
2887 _GLIBCXX_PARALLEL_ASSERT(l == NULL or base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_maximum(l)),key));
2888 _GLIBCXX_PARALLEL_ASSERT(r == NULL or not base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_minimum(r)),key));
2889 int count1, count2;
2890 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(l, count1));
2891 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(r, count2));
2892 _GLIBCXX_PARALLEL_ASSERT(root == NULL or base_type::_M_impl._M_key_compare(prev_k, base_type::_S_key(root)) and not base_type::_M_impl._M_key_compare(key, base_type::_S_key(root)));
2893 _GLIBCXX_PARALLEL_ASSERT(root != NULL or l==NULL or not base_type::_M_impl._M_key_compare(prev_k, base_type::_S_key(base_type::_S_maximum(l))));
2894 #endif
2895 return h;
2898 r = NULL;
2899 root = NULL;
2900 l = NULL;
2901 black_h_r = 0;
2902 black_h_l = 0;
2903 return 0;
2906 /** @brief Split a tree according to key in three parts: a left
2907 * child, a right child and an intermediate node.
2909 * @param t Root of the tree to split.
2910 * @param key Key to split according to.
2911 * @param prev_k Key to split the intermediate node
2912 * @param root Out parameter. If a node exists whose key is
2913 * smaller or equal than @c key, but strictly larger than @c
2914 * prev_k, this is returned. Otherwise, it is null.
2915 * @param l Root of left subtree returned, nodes less than @c key.
2916 * @param r Root of right subtree returned, nodes greater or
2917 * equal than @c key.
2918 * @param black_h_l Black height of the left subtree.
2919 * @param black_h_r Black height of the right subtree.
2920 * @param strictly_less_or_equal Comparator to deal transparently
2921 * with repetitions with respect to the uniqueness of the
2922 * wrapping container
2923 * @pre t != NULL
2924 * @return Black height of t */
2925 template<typename StrictlyLessOrEqual>
2927 split_not_null(const _Rb_tree_node_ptr t, const key_type& key,
2928 const key_type& prev_k, _Rb_tree_node_ptr& root,
2929 _Rb_tree_node_ptr& l, _Rb_tree_node_ptr& r, int& black_h_l,
2930 int& black_h_r,
2931 StrictlyLessOrEqual strictly_less_or_equal) const
2933 _GLIBCXX_PARALLEL_ASSERT (t != NULL);
2934 int black_h, b_h;
2935 int black_node = 0;
2936 if (t->_M_color == std::_S_black)
2937 ++black_node;
2938 if (strictly_less_or_equal(key, base_type::_S_key(t)))
2940 if (t->_M_left != NULL )
2942 // t->M_right is at most one node
2943 // go to the left
2944 b_h = black_h = split_not_null( static_cast<_Rb_tree_node_ptr>(t->_M_left), key, prev_k, root, l, r, black_h_l, black_h_r, strictly_less_or_equal);
2945 // Moin root and right subtree to already existing right
2946 // half, leave left subtree.
2947 force_black_root(t->_M_right, b_h);
2948 concatenate(t, r, static_cast<_Rb_tree_node_ptr>(t->_M_right), black_h_r, b_h, r, black_h_r);
2950 else
2952 // t->M_right is at most one node
2953 r = t;
2954 black_h_r = black_node;
2955 force_black_root(r, black_h_r);
2957 black_h = 0;
2958 l = NULL;
2959 black_h_l = 0;
2961 _GLIBCXX_PARALLEL_ASSERT(l == NULL or base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_maximum(l)),key));
2962 _GLIBCXX_PARALLEL_ASSERT(r == NULL or not base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_minimum(r)),key));
2964 else
2966 if (t->_M_right != NULL )
2968 // Go to the right.
2969 if (strictly_less_or_equal(prev_k, base_type::_S_key(t)))
2970 root = t;
2971 b_h = black_h = split_not_null(static_cast<_Rb_tree_node_ptr>(t->_M_right), key, prev_k, root, l, r, black_h_l, black_h_r, strictly_less_or_equal);
2972 // Join root and left subtree to already existing left
2973 // half, leave right subtree.
2974 force_black_root(t->_M_left, b_h);
2975 if (root != t)
2977 // There was another point where we went right.
2978 concatenate(t, static_cast<_Rb_tree_node_ptr>(t->_M_left), l, b_h, black_h_l, l, black_h_l);
2980 else
2982 l = static_cast<_Rb_tree_node_ptr>(t->_M_left);
2983 black_h_l = b_h;
2985 _GLIBCXX_PARALLEL_ASSERT(l == NULL or base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_maximum(l)),key));
2986 _GLIBCXX_PARALLEL_ASSERT(r == NULL or not base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_minimum(r)),key));
2988 else
2990 if (strictly_less_or_equal(prev_k, base_type::_S_key(t)))
2992 root = t;
2993 l= static_cast<_Rb_tree_node_ptr>(t->_M_left);
2994 make_black_leaf(l, black_h_l);
2995 _GLIBCXX_PARALLEL_ASSERT(l == NULL or base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_maximum(l)),key));
2997 else
2999 l= t;
3000 black_h_l = black_node;
3001 force_black_root(l, black_h_l);
3002 _GLIBCXX_PARALLEL_ASSERT(l == NULL or base_type::_M_impl._M_key_compare(base_type::_S_key(base_type::_S_maximum(l)),key));
3005 r = NULL;
3006 black_h = 0;
3007 black_h_r = 0;
3010 return black_h + black_node;
3013 /** @brief Color the root black and update the black height accordingly.
3015 * @param t Root of the tree.
3016 * @param black_h Black height of the tree @c t (out) */
3017 static void force_black_root(_Rb_tree_node_base* t, int& black_h)
3019 if (t != NULL and t->_M_color == std::_S_red)
3021 t->_M_color = std::_S_black;
3022 ++ black_h;
3026 /** @brief Split the tree in two parts: the minimum element from a
3027 tree (i. e. leftmost) and the rest (right subtree)
3028 * @param t Root of the tree
3029 * @param root Minimum element (out)
3030 * @param r Right subtree: @c t - {@c root}
3031 * @param black_h_r Black height of the right subtree.
3032 * @return Black height of the original tree */
3034 extract_min(const _Rb_tree_node_ptr t, _Rb_tree_node_ptr& root,
3035 _Rb_tree_node_ptr& r, int& black_h_r) const
3037 _GLIBCXX_PARALLEL_ASSERT (t != NULL);
3038 int black_h, b_h;
3039 int black_node = 0;
3040 if (t->_M_color == std::_S_black)
3041 ++black_node;
3043 if (t->_M_left != NULL )
3045 // t->M_right is at most one node
3046 // go to the left
3047 b_h = black_h = extract_min( static_cast<_Rb_tree_node_ptr>(t->_M_left), root, r, black_h_r);
3049 // Join root and right subtree to already existing right
3050 // half, leave left subtree
3051 force_black_root(t->_M_right, b_h);
3052 concatenate(t, r, static_cast<_Rb_tree_node_ptr>(t->_M_right), black_h_r, b_h, r, black_h_r);
3054 else
3056 // t->M_right is at most one node
3057 root = t;
3058 if (t->_M_right == NULL)
3060 r = NULL;
3061 black_h_r = 0;
3063 else
3065 r = static_cast<_Rb_tree_node_ptr>(t->_M_right);
3066 black_h_r = 1;
3067 r->_M_color = std::_S_black;
3069 black_h = 0;
3071 return black_h + black_node;
3075 /** @brief Split the tree in two parts: the greatest element from
3076 a tree (i. e. rightmost) and the rest (left subtree)
3077 * @param t Root of the tree
3078 * @param root Maximum element (out)
3079 * @param l Left subtree: @c t - {@c root}
3080 * @param black_h_l Black height of the left subtree.
3081 * @return Black height of the original tree */
3083 extract_max(const _Rb_tree_node_ptr t, _Rb_tree_node_ptr& root,
3084 _Rb_tree_node_ptr& l, int& black_h_l) const
3086 _GLIBCXX_PARALLEL_ASSERT (t != NULL);
3087 int black_h, b_h;
3088 int black_node = 0;
3089 if (t->_M_color == std::_S_black)
3090 ++black_node;
3092 if (t->_M_right != NULL )
3094 b_h = black_h = extract_max(static_cast<_Rb_tree_node_ptr>(t->_M_right), root, l, black_h_l);
3096 // Join root and left subtree to already existing left half,
3097 // leave right subtree.
3098 force_black_root(t->_M_left, b_h);
3100 concatenate(t, static_cast<_Rb_tree_node_ptr>(t->_M_left), l, b_h, black_h_l, l, black_h_l);
3102 else
3104 root = t;
3105 if (t->_M_left == NULL)
3107 l = NULL;
3108 black_h_l = 0;
3110 else
3112 l = static_cast<_Rb_tree_node_ptr>(t->_M_left);
3113 black_h_l = 1;
3114 l->_M_color = std::_S_black;
3116 black_h = 0;
3118 return black_h + black_node;
3121 /** @brief Split tree according to key in two parts: a left tree
3122 * and a right subtree
3124 * Trees are concatenated once the recursive call returns. That
3125 * is, from bottom to top (i. e. smaller to larger), so the cost
3126 * bounds for split hold.
3127 * @param t Root of the tree to split.
3128 * @param key Key to split according to.
3129 * @param l Root of left subtree returned, nodes less than @c key.
3130 * @param r Root of right subtree returned, nodes greater than @c key.
3131 * @param black_h_l Black height of the left subtree.
3132 * @param black_h_r Black height of the right subtree.
3133 * @return Black height of the original tree */
3135 split(const _Rb_tree_node_ptr t, const key_type& key,
3136 _Rb_tree_node_ptr& l, _Rb_tree_node_ptr& r, int& black_h_l,
3137 int& black_h_r) const
3139 if (t != NULL)
3141 int black_h, b_h;
3142 int black_node = 0;
3143 if (t->_M_color == std::_S_black)
3144 ++black_node;
3145 if (not (base_type::_M_impl._M_key_compare(base_type::_S_key(t), key)))
3147 // Go to the left.
3148 b_h = black_h = split( static_cast<_Rb_tree_node_ptr>(t->_M_left), key, l, r, black_h_l, black_h_r);
3150 // Join root and right subtree to already existing right
3151 // half, leave left subtree.
3152 force_black_root(t->_M_right, b_h);
3153 concatenate(t, r, static_cast<_Rb_tree_node_ptr>(t->_M_right), black_h_r, b_h, r, black_h_r);
3155 else
3157 // Go to the right.
3158 b_h = black_h = split(static_cast<_Rb_tree_node_ptr>(t->_M_right), key, l, r, black_h_l, black_h_r);
3160 // Join root and left subtree to already existing left
3161 // half, leave right subtree.
3162 force_black_root(t->_M_left, b_h);
3163 concatenate(t, static_cast<_Rb_tree_node_ptr>(t->_M_left), l, b_h, black_h_l, l, black_h_l);
3165 return black_h + black_node;
3167 else
3169 r = NULL;
3170 l = NULL;
3171 black_h_r = 0;
3172 black_h_l = 0;
3173 return 0;
3177 /** @brief Insert an existing node in tree and rebalance it, if
3178 * appropriate.
3180 * The keyword "local" is used because no attributes of the
3181 * red-black tree are changed, so this insertion is not yet seen
3182 * by the global data structure.
3183 * @param t Root of tree to insert into.
3184 * @param new_t Existing node to insert.
3185 * @param existing Number of existing elements before insertion
3186 * (in) and after (out). Specifically, the counter is incremented
3187 * by one for unique containers if the key of new_t was already
3188 * in the tree.
3189 * @param black_h Black height of the resulting tree (out)
3190 * @param strictly_less_or_less_equal Comparator to deal
3191 * transparently with repetitions with respect to the uniqueness
3192 * of the wrapping container
3193 * @return Resulting tree after insertion */
3194 template<typename StrictlyLessOrLessEqual>
3195 _Rb_tree_node_ptr
3196 _M_insert_local(_Rb_tree_node_base* t, const _Rb_tree_node_ptr new_t,
3197 size_type& existing, int& black_h,
3198 StrictlyLessOrLessEqual strictly_less_or_less_equal)
3200 _GLIBCXX_PARALLEL_ASSERT(t != NULL);
3201 if (_M_insert_local_top_down(t, new_t, NULL, NULL, true, strictly_less_or_less_equal))
3203 t->_M_parent = NULL;
3204 black_h += _Rb_tree_rebalance(new_t, t);
3205 _GLIBCXX_PARALLEL_ASSERT(t->_M_color == std::_S_black);
3206 return static_cast<_Rb_tree_node_ptr>(t);
3208 else
3210 base_type::_M_destroy_node(new_t);
3211 ++existing;
3212 force_black_root(t, black_h);
3213 return static_cast<_Rb_tree_node_ptr>(t);
3217 /***** Dealing with repetitions (CORRECTNESS ISSUE) *****/
3218 /** @brief Insert an existing node in tree, do no rebalancing.
3219 * @param t Root of tree to insert into.
3220 * @param new_t Existing node to insert.
3221 * @param eq_t Node candidate to be equal than new_t, only
3222 * relevant for unique containers
3223 * @param parent Parent node of @c t
3224 * @param is_left True if @c t is a left child of @c
3225 * parent. False otherwise.
3226 * @param strictly_less_or_less_equal Comparator to deal
3227 * transparently with repetitions with respect to the uniqueness
3228 * of the wrapping container
3230 * @return Success of the insertion
3232 template<typename StrictlyLessOrLessEqual>
3233 bool
3234 _M_insert_local_top_down(_Rb_tree_node_base* t,
3235 const _Rb_tree_node_ptr new_t,
3236 _Rb_tree_node_base* eq_t,
3237 _Rb_tree_node_base* parent, const bool is_left,
3238 StrictlyLessOrLessEqual strictly_less_or_less_equal) const
3240 if (t != NULL)
3242 if (strictly_less_or_less_equal(_S_key(new_t), _S_key(static_cast<_Rb_tree_node_ptr>(t))))
3244 return _M_insert_local_top_down(t->_M_left, new_t, eq_t, t, true, strictly_less_or_less_equal);
3246 else
3248 return _M_insert_local_top_down(t->_M_right, new_t, t, t, false, strictly_less_or_less_equal);
3252 _GLIBCXX_PARALLEL_ASSERT(parent != NULL);
3254 // Base case.
3255 if (eq_t == NULL or strictly_less_or_less_equal(_S_key(static_cast<_Rb_tree_node_ptr>(eq_t)), _S_key(new_t)))
3257 // The element to be inserted did not existed.
3258 if (is_left)
3260 parent->_M_left = new_t;
3262 else
3264 parent->_M_right = new_t;
3267 new_t->_M_parent = parent;
3268 new_t->_M_left = NULL;
3269 new_t->_M_right = NULL;
3270 new_t->_M_color = std::_S_red;
3272 return true;
3274 else
3275 return false;
3278 /** @brief Rebalance a tree locally.
3280 * Essentially, it is the same function as insert_erase from the
3281 * base class, but without the insertion and without using any
3282 * tree attributes.
3283 * @param __x Root of the current subtree to rebalance.
3284 * @param __root Root of tree where @c __x is in (rebalancing
3285 * stops when root is reached)
3286 * @return Increment in the black height after rebalancing
3288 static int
3289 _Rb_tree_rebalance(_Rb_tree_node_base* __x, _Rb_tree_node_base*& __root)
3291 _GLIBCXX_PARALLEL_ASSERT(__root->_M_color == std::_S_black);
3292 // Rebalance.
3293 while (__x != __root and __x->_M_parent != __root and
3294 __x->_M_parent->_M_color == std::_S_red)
3296 _Rb_tree_node_base* const __xpp = __x->_M_parent->_M_parent;
3298 if (__x->_M_parent == __xpp->_M_left)
3300 _Rb_tree_node_base* const __y = __xpp->_M_right;
3301 if (__y && __y->_M_color == std::_S_red)
3303 __x->_M_parent->_M_color = std::_S_black;
3304 __y->_M_color = std::_S_black;
3305 __xpp->_M_color = std::_S_red;
3306 __x = __xpp;
3308 else
3310 if (__x == __x->_M_parent->_M_right)
3312 __x = __x->_M_parent;
3313 std::_Rb_tree_rotate_left(__x, __root);
3315 __x->_M_parent->_M_color = std::_S_black;
3316 __xpp->_M_color = std::_S_red;
3317 std::_Rb_tree_rotate_right(__xpp, __root);
3320 else
3322 _Rb_tree_node_base* const __y = __xpp->_M_left;
3323 if (__y && __y->_M_color == std::_S_red)
3325 __x->_M_parent->_M_color = std::_S_black;
3326 __y->_M_color = std::_S_black;
3327 __xpp->_M_color = std::_S_red;
3328 __x = __xpp;
3330 else
3332 if (__x == __x->_M_parent->_M_left)
3334 __x = __x->_M_parent;
3335 std::_Rb_tree_rotate_right(__x, __root);
3337 __x->_M_parent->_M_color = std::_S_black;
3338 __xpp->_M_color = std::_S_red;
3339 std::_Rb_tree_rotate_left(__xpp, __root);
3343 if (__root->_M_color == std::_S_red)
3345 __root->_M_color = std::_S_black;
3346 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(static_cast<typename base_type::_Const_Link_type>(__root)));
3347 return 1;
3349 _GLIBCXX_PARALLEL_ASSERT(rb_verify_tree(static_cast<typename base_type::_Const_Link_type>(__root)));
3350 return 0;
3353 /** @brief Analogous to class method rb_verify() but only for a subtree.
3354 * @param __x Pointer to root of subtree to check.
3355 * @param count Returned number of nodes.
3356 * @return Tree correct.
3358 bool
3359 rb_verify_tree(const typename base_type::_Const_Link_type __x, int& count) const
3361 int bh;
3362 return rb_verify_tree_node(__x) and rb_verify_tree(__x, count, bh);
3365 /** @brief Verify that a subtree is binary search tree (verifies
3366 key relationships)
3367 * @param __x Pointer to root of subtree to check.
3368 * @return Tree correct.
3370 bool
3371 rb_verify_tree_node(const typename base_type::_Const_Link_type __x) const
3373 if (__x == NULL)
3374 return true;
3375 else
3377 return rb_verify_node(__x) and
3378 rb_verify_tree_node(base_type::_S_left(__x)) and
3379 rb_verify_tree_node( base_type::_S_right(__x));
3383 /** @brief Verify all the properties of a red-black tree except
3384 for the key ordering
3385 * @param __x Pointer to (subtree) root node.
3386 * @return Tree correct.
3388 static bool
3389 rb_verify_tree(const typename base_type::_Const_Link_type __x)
3391 int bh, count;
3392 return rb_verify_tree(__x, count, bh);
3395 /** @brief Verify all the properties of a red-black tree except
3396 for the key ordering
3397 * @param __x Pointer to (subtree) root node.
3398 * @param count Number of nodes of @c __x (out).
3399 * @param black_h Black height of @c __x (out).
3400 * @return Tree correct.
3402 static bool
3403 rb_verify_tree(const typename base_type::_Const_Link_type __x, int& count,
3404 int& black_h)
3406 if (__x == NULL)
3408 count = 0;
3409 black_h = 0;
3410 return true;
3412 typename base_type::_Const_Link_type __L = base_type::_S_left(__x);
3413 typename base_type::_Const_Link_type __R = base_type::_S_right(__x);
3414 int countL, countR = 0, bhL, bhR;
3415 bool ret = rb_verify_tree(__L, countL, bhL);
3416 ret = ret and rb_verify_tree(__R, countR, bhR);
3417 count = 1 + countL + countR;
3418 ret = ret and bhL == bhR;
3419 black_h = bhL + ((__x->_M_color == std::_S_red)? 0 : 1);
3420 return ret;
3423 /** @brief Verify red-black properties (including key based) for a node
3424 * @param __x Pointer to node.
3425 * @return Node correct.
3427 bool
3428 rb_verify_node(const typename base_type::_Const_Link_type __x) const
3430 typename base_type::_Const_Link_type __L = base_type::_S_left(__x);
3431 typename base_type::_Const_Link_type __R = base_type::_S_right(__x);
3432 if (__x->_M_color == std::_S_red)
3433 if ((__L && __L->_M_color == std::_S_red)
3434 || (__R && __R->_M_color == std::_S_red))
3436 return false;
3438 if (__L != NULL)
3440 __L = static_cast<typename base_type::_Const_Link_type>(base_type::_S_maximum(__L));
3441 if (base_type::_M_impl._M_key_compare(base_type::_S_key(__x), base_type::_S_key(__L)))
3443 return false;
3447 if (__R != NULL)
3449 __R = static_cast<typename base_type::_Const_Link_type>(base_type::_S_minimum(__R));
3450 if (base_type::_M_impl._M_key_compare(base_type::_S_key(__R), base_type::_S_key(__x)))
3452 return false;
3456 return true;
3459 /** @brief Print all the information of the root.
3460 * @param t Root of the tree.
3462 static void
3463 print_root(_Rb_tree_node_base* t)
3466 if (t != NULL)
3467 std::cout<< base_type::_S_key(t) << std::endl;
3468 else
3469 std::cout<< "NULL" << std::endl;
3473 /** @brief Print all the information of the tree.
3474 * @param t Root of the tree.
3476 static void
3477 print_tree(_Rb_tree_node_base* t)
3480 if (t != NULL)
3482 print_tree(t->_M_left);
3483 std::cout<< base_type::_S_key(t) << std::endl;
3484 print_tree(t->_M_right);
3489 /** @brief Print blanks.
3490 * @param b Number of blanks to print.
3491 * @return A string with @c b blanks */
3492 inline static std::string
3493 blanks(int b)
3496 std::string s = "";
3497 for (int i=0; i < b; ++i)
3498 s += " ";
3499 return s;
3503 /** @brief Print all the information of the tree.
3504 * @param t Root of the tree.
3505 * @param c Width of a printed key.
3507 template<typename Pointer>
3508 static void
3509 draw_tree(Pointer t, const int c)
3512 if (t == NULL)
3514 std::cout << blanks(c) << "NULL" << std::endl;
3515 return;
3517 draw_tree(static_cast<Pointer>(t->_M_right), c + 8);
3518 std::cout << blanks(c) << "" << base_type::_S_key(t) << " ";
3519 if (t->_M_color == std::_S_black)
3520 std::cout << "B" << std::endl;
3521 else
3522 std::cout << "R" << std::endl;
3523 draw_tree(static_cast<Pointer>(t->_M_left), c + 8);
3527 public:
3528 /** @brief Verify that all the red-black tree properties hold for
3529 the stored tree, as well as the additional properties that the
3530 STL implementation imposes.
3532 bool
3533 rb_verify()
3535 if (base_type::_M_impl._M_node_count == 0 || base_type::begin() == base_type::end())
3537 bool res = base_type::_M_impl._M_node_count == 0 && base_type::begin() == base_type::end()
3538 && base_type::_M_impl._M_header._M_left ==base_type::_M_end()
3539 && base_type::_M_impl._M_header._M_right == base_type::_M_end();
3540 _GLIBCXX_PARALLEL_ASSERT(res);
3541 return res;
3543 size_type i=0;
3544 unsigned int __len = _Rb_tree_black_count(base_type::_M_leftmost(), base_type::_M_root());
3545 for (typename base_type::const_iterator __it =base_type::begin(); __it != base_type::end(); ++__it)
3547 typename base_type::_Const_Link_type __x = static_cast<typename base_type::_Const_Link_type>(__it._M_node);
3548 if (not rb_verify_node(__x)) return false;
3549 if (!base_type::_S_left(__x)&& !base_type::_S_right(__x) && _Rb_tree_black_count(__x,base_type::_M_root()) != __len)
3551 _GLIBCXX_PARALLEL_ASSERT(false);
3552 return false;
3554 ++i;
3557 if (i != base_type::_M_impl._M_node_count)
3558 printf("%ld != %ld\n", i, base_type::_M_impl._M_node_count);
3560 if (base_type::_M_leftmost() != std::_Rb_tree_node_base::_S_minimum(base_type::_M_root()))
3562 _GLIBCXX_PARALLEL_ASSERT(false);
3563 return false;
3565 if (base_type::_M_rightmost() != std::_Rb_tree_node_base::_S_maximum(base_type::_M_root()))
3567 _GLIBCXX_PARALLEL_ASSERT(false);
3568 return false;
3570 _GLIBCXX_PARALLEL_ASSERT(i == base_type::_M_impl._M_node_count);
3571 return true;
3577 #endif