Install gcc-4.4.0-tdm-1-core-2.tar.gz
[msysgit.git] / mingw / lib / gcc / mingw32 / 4.3.3 / include / c++ / tr1_impl / hashtable_policy.h
blobf23f8c6f51db914484b61c3e4dbe338af02961f8
1 // Internal policy header for TR1 unordered_set and unordered_map -*- C++ -*-
3 // Copyright (C) 2007 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file tr1_impl/hashtable_policy.h
31 * This is an internal header file, included by other library headers.
32 * You should not attempt to use it directly.
35 namespace std
37 _GLIBCXX_BEGIN_NAMESPACE_TR1
39 namespace __detail
41 // Helper function: return distance(first, last) for forward
42 // iterators, or 0 for input iterators.
43 template<class _Iterator>
44 inline typename std::iterator_traits<_Iterator>::difference_type
45 __distance_fw(_Iterator __first, _Iterator __last,
46 std::input_iterator_tag)
47 { return 0; }
49 template<class _Iterator>
50 inline typename std::iterator_traits<_Iterator>::difference_type
51 __distance_fw(_Iterator __first, _Iterator __last,
52 std::forward_iterator_tag)
53 { return std::distance(__first, __last); }
55 template<class _Iterator>
56 inline typename std::iterator_traits<_Iterator>::difference_type
57 __distance_fw(_Iterator __first, _Iterator __last)
59 typedef typename std::iterator_traits<_Iterator>::iterator_category _Tag;
60 return __distance_fw(__first, __last, _Tag());
63 template<typename _RAIter, typename _Tp>
64 _RAIter
65 __lower_bound(_RAIter __first, _RAIter __last, const _Tp& __val)
67 typedef typename std::iterator_traits<_RAIter>::difference_type _DType;
69 _DType __len = __last - __first;
70 while (__len > 0)
72 _DType __half = __len >> 1;
73 _RAIter __middle = __first + __half;
74 if (*__middle < __val)
76 __first = __middle;
77 ++__first;
78 __len = __len - __half - 1;
80 else
81 __len = __half;
83 return __first;
86 // Auxiliary types used for all instantiations of _Hashtable: nodes
87 // and iterators.
89 // Nodes, used to wrap elements stored in the hash table. A policy
90 // template parameter of class template _Hashtable controls whether
91 // nodes also store a hash code. In some cases (e.g. strings) this
92 // may be a performance win.
93 template<typename _Value, bool __cache_hash_code>
94 struct _Hash_node;
96 template<typename _Value>
97 struct _Hash_node<_Value, true>
99 _Value _M_v;
100 std::size_t _M_hash_code;
101 _Hash_node* _M_next;
104 template<typename _Value>
105 struct _Hash_node<_Value, false>
107 _Value _M_v;
108 _Hash_node* _M_next;
111 // Local iterators, used to iterate within a bucket but not between
112 // buckets.
113 template<typename _Value, bool __cache>
114 struct _Node_iterator_base
116 _Node_iterator_base(_Hash_node<_Value, __cache>* __p)
117 : _M_cur(__p) { }
119 void
120 _M_incr()
121 { _M_cur = _M_cur->_M_next; }
123 _Hash_node<_Value, __cache>* _M_cur;
126 template<typename _Value, bool __cache>
127 inline bool
128 operator==(const _Node_iterator_base<_Value, __cache>& __x,
129 const _Node_iterator_base<_Value, __cache>& __y)
130 { return __x._M_cur == __y._M_cur; }
132 template<typename _Value, bool __cache>
133 inline bool
134 operator!=(const _Node_iterator_base<_Value, __cache>& __x,
135 const _Node_iterator_base<_Value, __cache>& __y)
136 { return __x._M_cur != __y._M_cur; }
138 template<typename _Value, bool __constant_iterators, bool __cache>
139 struct _Node_iterator
140 : public _Node_iterator_base<_Value, __cache>
142 typedef _Value value_type;
143 typedef typename
144 __gnu_cxx::__conditional_type<__constant_iterators,
145 const _Value*, _Value*>::__type
146 pointer;
147 typedef typename
148 __gnu_cxx::__conditional_type<__constant_iterators,
149 const _Value&, _Value&>::__type
150 reference;
151 typedef std::ptrdiff_t difference_type;
152 typedef std::forward_iterator_tag iterator_category;
154 _Node_iterator()
155 : _Node_iterator_base<_Value, __cache>(0) { }
157 explicit
158 _Node_iterator(_Hash_node<_Value, __cache>* __p)
159 : _Node_iterator_base<_Value, __cache>(__p) { }
161 reference
162 operator*() const
163 { return this->_M_cur->_M_v; }
165 pointer
166 operator->() const
167 { return &this->_M_cur->_M_v; }
169 _Node_iterator&
170 operator++()
172 this->_M_incr();
173 return *this;
176 _Node_iterator
177 operator++(int)
179 _Node_iterator __tmp(*this);
180 this->_M_incr();
181 return __tmp;
185 template<typename _Value, bool __constant_iterators, bool __cache>
186 struct _Node_const_iterator
187 : public _Node_iterator_base<_Value, __cache>
189 typedef _Value value_type;
190 typedef const _Value* pointer;
191 typedef const _Value& reference;
192 typedef std::ptrdiff_t difference_type;
193 typedef std::forward_iterator_tag iterator_category;
195 _Node_const_iterator()
196 : _Node_iterator_base<_Value, __cache>(0) { }
198 explicit
199 _Node_const_iterator(_Hash_node<_Value, __cache>* __p)
200 : _Node_iterator_base<_Value, __cache>(__p) { }
202 _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
203 __cache>& __x)
204 : _Node_iterator_base<_Value, __cache>(__x._M_cur) { }
206 reference
207 operator*() const
208 { return this->_M_cur->_M_v; }
210 pointer
211 operator->() const
212 { return &this->_M_cur->_M_v; }
214 _Node_const_iterator&
215 operator++()
217 this->_M_incr();
218 return *this;
221 _Node_const_iterator
222 operator++(int)
224 _Node_const_iterator __tmp(*this);
225 this->_M_incr();
226 return __tmp;
230 template<typename _Value, bool __cache>
231 struct _Hashtable_iterator_base
233 _Hashtable_iterator_base(_Hash_node<_Value, __cache>* __node,
234 _Hash_node<_Value, __cache>** __bucket)
235 : _M_cur_node(__node), _M_cur_bucket(__bucket) { }
237 void
238 _M_incr()
240 _M_cur_node = _M_cur_node->_M_next;
241 if (!_M_cur_node)
242 _M_incr_bucket();
245 void
246 _M_incr_bucket();
248 _Hash_node<_Value, __cache>* _M_cur_node;
249 _Hash_node<_Value, __cache>** _M_cur_bucket;
252 // Global iterators, used for arbitrary iteration within a hash
253 // table. Larger and more expensive than local iterators.
254 template<typename _Value, bool __cache>
255 void
256 _Hashtable_iterator_base<_Value, __cache>::
257 _M_incr_bucket()
259 ++_M_cur_bucket;
261 // This loop requires the bucket array to have a non-null sentinel.
262 while (!*_M_cur_bucket)
263 ++_M_cur_bucket;
264 _M_cur_node = *_M_cur_bucket;
267 template<typename _Value, bool __cache>
268 inline bool
269 operator==(const _Hashtable_iterator_base<_Value, __cache>& __x,
270 const _Hashtable_iterator_base<_Value, __cache>& __y)
271 { return __x._M_cur_node == __y._M_cur_node; }
273 template<typename _Value, bool __cache>
274 inline bool
275 operator!=(const _Hashtable_iterator_base<_Value, __cache>& __x,
276 const _Hashtable_iterator_base<_Value, __cache>& __y)
277 { return __x._M_cur_node != __y._M_cur_node; }
279 template<typename _Value, bool __constant_iterators, bool __cache>
280 struct _Hashtable_iterator
281 : public _Hashtable_iterator_base<_Value, __cache>
283 typedef _Value value_type;
284 typedef typename
285 __gnu_cxx::__conditional_type<__constant_iterators,
286 const _Value*, _Value*>::__type
287 pointer;
288 typedef typename
289 __gnu_cxx::__conditional_type<__constant_iterators,
290 const _Value&, _Value&>::__type
291 reference;
292 typedef std::ptrdiff_t difference_type;
293 typedef std::forward_iterator_tag iterator_category;
295 _Hashtable_iterator()
296 : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
298 _Hashtable_iterator(_Hash_node<_Value, __cache>* __p,
299 _Hash_node<_Value, __cache>** __b)
300 : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
302 explicit
303 _Hashtable_iterator(_Hash_node<_Value, __cache>** __b)
304 : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
306 reference
307 operator*() const
308 { return this->_M_cur_node->_M_v; }
310 pointer
311 operator->() const
312 { return &this->_M_cur_node->_M_v; }
314 _Hashtable_iterator&
315 operator++()
317 this->_M_incr();
318 return *this;
321 _Hashtable_iterator
322 operator++(int)
324 _Hashtable_iterator __tmp(*this);
325 this->_M_incr();
326 return __tmp;
330 template<typename _Value, bool __constant_iterators, bool __cache>
331 struct _Hashtable_const_iterator
332 : public _Hashtable_iterator_base<_Value, __cache>
334 typedef _Value value_type;
335 typedef const _Value* pointer;
336 typedef const _Value& reference;
337 typedef std::ptrdiff_t difference_type;
338 typedef std::forward_iterator_tag iterator_category;
340 _Hashtable_const_iterator()
341 : _Hashtable_iterator_base<_Value, __cache>(0, 0) { }
343 _Hashtable_const_iterator(_Hash_node<_Value, __cache>* __p,
344 _Hash_node<_Value, __cache>** __b)
345 : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { }
347 explicit
348 _Hashtable_const_iterator(_Hash_node<_Value, __cache>** __b)
349 : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { }
351 _Hashtable_const_iterator(const _Hashtable_iterator<_Value,
352 __constant_iterators, __cache>& __x)
353 : _Hashtable_iterator_base<_Value, __cache>(__x._M_cur_node,
354 __x._M_cur_bucket) { }
356 reference
357 operator*() const
358 { return this->_M_cur_node->_M_v; }
360 pointer
361 operator->() const
362 { return &this->_M_cur_node->_M_v; }
364 _Hashtable_const_iterator&
365 operator++()
367 this->_M_incr();
368 return *this;
371 _Hashtable_const_iterator
372 operator++(int)
374 _Hashtable_const_iterator __tmp(*this);
375 this->_M_incr();
376 return __tmp;
381 // Many of class template _Hashtable's template parameters are policy
382 // classes. These are defaults for the policies.
384 // Default range hashing function: use division to fold a large number
385 // into the range [0, N).
386 struct _Mod_range_hashing
388 typedef std::size_t first_argument_type;
389 typedef std::size_t second_argument_type;
390 typedef std::size_t result_type;
392 result_type
393 operator()(first_argument_type __num, second_argument_type __den) const
394 { return __num % __den; }
397 // Default ranged hash function H. In principle it should be a
398 // function object composed from objects of type H1 and H2 such that
399 // h(k, N) = h2(h1(k), N), but that would mean making extra copies of
400 // h1 and h2. So instead we'll just use a tag to tell class template
401 // hashtable to do that composition.
402 struct _Default_ranged_hash { };
404 // Default value for rehash policy. Bucket size is (usually) the
405 // smallest prime that keeps the load factor small enough.
406 struct _Prime_rehash_policy
408 _Prime_rehash_policy(float __z = 1.0)
409 : _M_max_load_factor(__z), _M_growth_factor(2.f), _M_next_resize(0) { }
411 float
412 max_load_factor() const
413 { return _M_max_load_factor; }
415 // Return a bucket size no smaller than n.
416 std::size_t
417 _M_next_bkt(std::size_t __n) const;
419 // Return a bucket count appropriate for n elements
420 std::size_t
421 _M_bkt_for_elements(std::size_t __n) const;
423 // __n_bkt is current bucket count, __n_elt is current element count,
424 // and __n_ins is number of elements to be inserted. Do we need to
425 // increase bucket count? If so, return make_pair(true, n), where n
426 // is the new bucket count. If not, return make_pair(false, 0).
427 std::pair<bool, std::size_t>
428 _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
429 std::size_t __n_ins) const;
431 enum { _S_n_primes = sizeof(unsigned long) != 8 ? 256 : 256 + 48 };
433 float _M_max_load_factor;
434 float _M_growth_factor;
435 mutable std::size_t _M_next_resize;
438 extern const unsigned long __prime_list[];
440 // XXX This is a hack. There's no good reason for any of
441 // _Prime_rehash_policy's member functions to be inline.
443 // Return a prime no smaller than n.
444 inline std::size_t
445 _Prime_rehash_policy::
446 _M_next_bkt(std::size_t __n) const
448 const unsigned long* __p = __lower_bound(__prime_list, __prime_list
449 + _S_n_primes, __n);
450 _M_next_resize =
451 static_cast<std::size_t>(__builtin_ceil(*__p * _M_max_load_factor));
452 return *__p;
455 // Return the smallest prime p such that alpha p >= n, where alpha
456 // is the load factor.
457 inline std::size_t
458 _Prime_rehash_policy::
459 _M_bkt_for_elements(std::size_t __n) const
461 const float __min_bkts = __n / _M_max_load_factor;
462 const unsigned long* __p = __lower_bound(__prime_list, __prime_list
463 + _S_n_primes, __min_bkts);
464 _M_next_resize =
465 static_cast<std::size_t>(__builtin_ceil(*__p * _M_max_load_factor));
466 return *__p;
469 // Finds the smallest prime p such that alpha p > __n_elt + __n_ins.
470 // If p > __n_bkt, return make_pair(true, p); otherwise return
471 // make_pair(false, 0). In principle this isn't very different from
472 // _M_bkt_for_elements.
474 // The only tricky part is that we're caching the element count at
475 // which we need to rehash, so we don't have to do a floating-point
476 // multiply for every insertion.
478 inline std::pair<bool, std::size_t>
479 _Prime_rehash_policy::
480 _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
481 std::size_t __n_ins) const
483 if (__n_elt + __n_ins > _M_next_resize)
485 float __min_bkts = ((float(__n_ins) + float(__n_elt))
486 / _M_max_load_factor);
487 if (__min_bkts > __n_bkt)
489 __min_bkts = std::max(__min_bkts, _M_growth_factor * __n_bkt);
490 const unsigned long* __p =
491 __lower_bound(__prime_list, __prime_list + _S_n_primes,
492 __min_bkts);
493 _M_next_resize = static_cast<std::size_t>
494 (__builtin_ceil(*__p * _M_max_load_factor));
495 return std::make_pair(true, *__p);
497 else
499 _M_next_resize = static_cast<std::size_t>
500 (__builtin_ceil(__n_bkt * _M_max_load_factor));
501 return std::make_pair(false, 0);
504 else
505 return std::make_pair(false, 0);
508 // Base classes for std::tr1::_Hashtable. We define these base
509 // classes because in some cases we want to do different things
510 // depending on the value of a policy class. In some cases the
511 // policy class affects which member functions and nested typedefs
512 // are defined; we handle that by specializing base class templates.
513 // Several of the base class templates need to access other members
514 // of class template _Hashtable, so we use the "curiously recurring
515 // template pattern" for them.
517 // class template _Map_base. If the hashtable has a value type of the
518 // form pair<T1, T2> and a key extraction policy that returns the
519 // first part of the pair, the hashtable gets a mapped_type typedef.
520 // If it satisfies those criteria and also has unique keys, then it
521 // also gets an operator[].
522 template<typename _Key, typename _Value, typename _Ex, bool __unique,
523 typename _Hashtable>
524 struct _Map_base { };
526 template<typename _Key, typename _Pair, typename _Hashtable>
527 struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, false, _Hashtable>
529 typedef typename _Pair::second_type mapped_type;
532 template<typename _Key, typename _Pair, typename _Hashtable>
533 struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>
535 typedef typename _Pair::second_type mapped_type;
537 mapped_type&
538 operator[](const _Key& __k);
541 template<typename _Key, typename _Pair, typename _Hashtable>
542 typename _Map_base<_Key, _Pair, std::_Select1st<_Pair>,
543 true, _Hashtable>::mapped_type&
544 _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>::
545 operator[](const _Key& __k)
547 _Hashtable* __h = static_cast<_Hashtable*>(this);
548 typename _Hashtable::_Hash_code_type __code = __h->_M_hash_code(__k);
549 std::size_t __n = __h->_M_bucket_index(__k, __code,
550 __h->_M_bucket_count);
552 typename _Hashtable::_Node* __p =
553 __h->_M_find_node(__h->_M_buckets[__n], __k, __code);
554 if (!__p)
555 return __h->_M_insert_bucket(std::make_pair(__k, mapped_type()),
556 __n, __code)->second;
557 return (__p->_M_v).second;
560 // class template _Rehash_base. Give hashtable the max_load_factor
561 // functions iff the rehash policy is _Prime_rehash_policy.
562 template<typename _RehashPolicy, typename _Hashtable>
563 struct _Rehash_base { };
565 template<typename _Hashtable>
566 struct _Rehash_base<_Prime_rehash_policy, _Hashtable>
568 float
569 max_load_factor() const
571 const _Hashtable* __this = static_cast<const _Hashtable*>(this);
572 return __this->__rehash_policy().max_load_factor();
575 void
576 max_load_factor(float __z)
578 _Hashtable* __this = static_cast<_Hashtable*>(this);
579 __this->__rehash_policy(_Prime_rehash_policy(__z));
583 // Class template _Hash_code_base. Encapsulates two policy issues that
584 // aren't quite orthogonal.
585 // (1) the difference between using a ranged hash function and using
586 // the combination of a hash function and a range-hashing function.
587 // In the former case we don't have such things as hash codes, so
588 // we have a dummy type as placeholder.
589 // (2) Whether or not we cache hash codes. Caching hash codes is
590 // meaningless if we have a ranged hash function.
591 // We also put the key extraction and equality comparison function
592 // objects here, for convenience.
594 // Primary template: unused except as a hook for specializations.
595 template<typename _Key, typename _Value,
596 typename _ExtractKey, typename _Equal,
597 typename _H1, typename _H2, typename _Hash,
598 bool __cache_hash_code>
599 struct _Hash_code_base;
601 // Specialization: ranged hash function, no caching hash codes. H1
602 // and H2 are provided but ignored. We define a dummy hash code type.
603 template<typename _Key, typename _Value,
604 typename _ExtractKey, typename _Equal,
605 typename _H1, typename _H2, typename _Hash>
606 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
607 _Hash, false>
609 protected:
610 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
611 const _H1&, const _H2&, const _Hash& __h)
612 : _M_extract(__ex), _M_eq(__eq), _M_ranged_hash(__h) { }
614 typedef void* _Hash_code_type;
616 _Hash_code_type
617 _M_hash_code(const _Key& __key) const
618 { return 0; }
620 std::size_t
621 _M_bucket_index(const _Key& __k, _Hash_code_type,
622 std::size_t __n) const
623 { return _M_ranged_hash(__k, __n); }
625 std::size_t
626 _M_bucket_index(const _Hash_node<_Value, false>* __p,
627 std::size_t __n) const
628 { return _M_ranged_hash(_M_extract(__p->_M_v), __n); }
630 bool
631 _M_compare(const _Key& __k, _Hash_code_type,
632 _Hash_node<_Value, false>* __n) const
633 { return _M_eq(__k, _M_extract(__n->_M_v)); }
635 void
636 _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
639 void
640 _M_copy_code(_Hash_node<_Value, false>*,
641 const _Hash_node<_Value, false>*) const
644 void
645 _M_swap(_Hash_code_base& __x)
647 std::swap(_M_extract, __x._M_extract);
648 std::swap(_M_eq, __x._M_eq);
649 std::swap(_M_ranged_hash, __x._M_ranged_hash);
652 protected:
653 _ExtractKey _M_extract;
654 _Equal _M_eq;
655 _Hash _M_ranged_hash;
659 // No specialization for ranged hash function while caching hash codes.
660 // That combination is meaningless, and trying to do it is an error.
663 // Specialization: ranged hash function, cache hash codes. This
664 // combination is meaningless, so we provide only a declaration
665 // and no definition.
666 template<typename _Key, typename _Value,
667 typename _ExtractKey, typename _Equal,
668 typename _H1, typename _H2, typename _Hash>
669 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
670 _Hash, true>;
672 // Specialization: hash function and range-hashing function, no
673 // caching of hash codes. H is provided but ignored. Provides
674 // typedef and accessor required by TR1.
675 template<typename _Key, typename _Value,
676 typename _ExtractKey, typename _Equal,
677 typename _H1, typename _H2>
678 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
679 _Default_ranged_hash, false>
681 typedef _H1 hasher;
683 hasher
684 hash_function() const
685 { return _M_h1; }
687 protected:
688 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
689 const _H1& __h1, const _H2& __h2,
690 const _Default_ranged_hash&)
691 : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
693 typedef std::size_t _Hash_code_type;
695 _Hash_code_type
696 _M_hash_code(const _Key& __k) const
697 { return _M_h1(__k); }
699 std::size_t
700 _M_bucket_index(const _Key&, _Hash_code_type __c,
701 std::size_t __n) const
702 { return _M_h2(__c, __n); }
704 std::size_t
705 _M_bucket_index(const _Hash_node<_Value, false>* __p,
706 std::size_t __n) const
707 { return _M_h2(_M_h1(_M_extract(__p->_M_v)), __n); }
709 bool
710 _M_compare(const _Key& __k, _Hash_code_type,
711 _Hash_node<_Value, false>* __n) const
712 { return _M_eq(__k, _M_extract(__n->_M_v)); }
714 void
715 _M_store_code(_Hash_node<_Value, false>*, _Hash_code_type) const
718 void
719 _M_copy_code(_Hash_node<_Value, false>*,
720 const _Hash_node<_Value, false>*) const
723 void
724 _M_swap(_Hash_code_base& __x)
726 std::swap(_M_extract, __x._M_extract);
727 std::swap(_M_eq, __x._M_eq);
728 std::swap(_M_h1, __x._M_h1);
729 std::swap(_M_h2, __x._M_h2);
732 protected:
733 _ExtractKey _M_extract;
734 _Equal _M_eq;
735 _H1 _M_h1;
736 _H2 _M_h2;
739 // Specialization: hash function and range-hashing function,
740 // caching hash codes. H is provided but ignored. Provides
741 // typedef and accessor required by TR1.
742 template<typename _Key, typename _Value,
743 typename _ExtractKey, typename _Equal,
744 typename _H1, typename _H2>
745 struct _Hash_code_base<_Key, _Value, _ExtractKey, _Equal, _H1, _H2,
746 _Default_ranged_hash, true>
748 typedef _H1 hasher;
750 hasher
751 hash_function() const
752 { return _M_h1; }
754 protected:
755 _Hash_code_base(const _ExtractKey& __ex, const _Equal& __eq,
756 const _H1& __h1, const _H2& __h2,
757 const _Default_ranged_hash&)
758 : _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
760 typedef std::size_t _Hash_code_type;
762 _Hash_code_type
763 _M_hash_code(const _Key& __k) const
764 { return _M_h1(__k); }
766 std::size_t
767 _M_bucket_index(const _Key&, _Hash_code_type __c,
768 std::size_t __n) const
769 { return _M_h2(__c, __n); }
771 std::size_t
772 _M_bucket_index(const _Hash_node<_Value, true>* __p,
773 std::size_t __n) const
774 { return _M_h2(__p->_M_hash_code, __n); }
776 bool
777 _M_compare(const _Key& __k, _Hash_code_type __c,
778 _Hash_node<_Value, true>* __n) const
779 { return __c == __n->_M_hash_code && _M_eq(__k, _M_extract(__n->_M_v)); }
781 void
782 _M_store_code(_Hash_node<_Value, true>* __n, _Hash_code_type __c) const
783 { __n->_M_hash_code = __c; }
785 void
786 _M_copy_code(_Hash_node<_Value, true>* __to,
787 const _Hash_node<_Value, true>* __from) const
788 { __to->_M_hash_code = __from->_M_hash_code; }
790 void
791 _M_swap(_Hash_code_base& __x)
793 std::swap(_M_extract, __x._M_extract);
794 std::swap(_M_eq, __x._M_eq);
795 std::swap(_M_h1, __x._M_h1);
796 std::swap(_M_h2, __x._M_h2);
799 protected:
800 _ExtractKey _M_extract;
801 _Equal _M_eq;
802 _H1 _M_h1;
803 _H2 _M_h2;
805 } // namespace __detail
807 _GLIBCXX_END_NAMESPACE_TR1