1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
12
13 use std::container::{Container, Mutable, Map, MutableMap, Set, MutableSet};
14 use std::clone::Clone;
15 use std::cmp::{Eq, TotalEq, Equiv, max};
16 use std::default::Default;
17 use std::fmt;
18 use std::fmt::Show;
19 use std::hash::{Hash, Hasher, sip};
20 use std::iter;
21 use std::iter::{Iterator, FromIterator, Extendable};
22 use std::iter::{FilterMap, Chain, Repeat, Zip};
23 use std::iter::{range, range_inclusive};
24 use std::mem::replace;
25 use std::num;
26 use std::option::{Option, Some, None};
27 use rand;
28 use rand::Rng;
29 use std::result::{Ok, Err};
30 use std::slice::ImmutableVector;
31
32 mod table {
33 extern crate libc;
34
35 use std::clone::Clone;
36 use std::cmp;
37 use std::cmp::Eq;
38 use std::hash::{Hash, Hasher};
39 use std::kinds::marker;
40 use std::num::{CheckedMul, is_power_of_two};
41 use std::option::{Option, Some, None};
42 use std::prelude::Drop;
43 use std::ptr;
44 use std::ptr::RawPtr;
45 use std::rt::global_heap;
46 use std::intrinsics::{size_of, min_align_of, transmute};
47 use std::intrinsics::{move_val_init, set_memory};
48 use std::iter::{Iterator, range_step_inclusive};
49
50 static EMPTY_BUCKET: u64 = 0u64;
51
52 /// The raw hashtable, providing safe-ish access to the unzipped and highly
53 /// optimized arrays of hashes, keys, and values.
54 ///
55 /// This design uses less memory and is a lot faster than the naive
56 /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
57 /// option on every element, and we get a generally more cache-aware design.
58 ///
59 /// Key invariants of this structure:
60 ///
61 /// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
62 /// 'undefined' contents. Don't read from them. This invariant is
63 /// enforced outside this module with the `EmptyIndex`, `FullIndex`,
64 /// and `SafeHash` types.
65 ///
66 /// - An `EmptyIndex` is only constructed for a bucket at an index with
67 /// a hash of EMPTY_BUCKET.
68 ///
69 /// - A `FullIndex` is only constructed for a bucket at an index with a
70 /// non-EMPTY_BUCKET hash.
71 ///
72 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
73 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
74 /// which will likely map to the same bucket, while not being confused
75 /// with "empty".
76 ///
77 /// - All three "arrays represented by pointers" are the same length:
78 /// `capacity`. This is set at creation and never changes. The arrays
79 /// are unzipped to save space (we don't have to pay for the padding
80 /// between odd sized elements, such as in a map from u64 to u8), and
81 /// be more cache aware (scanning through 8 hashes brings in 2 cache
82 /// lines, since they're all right beside each other).
83 ///
84 /// You can kind of think of this module/data structure as a safe wrapper
85 /// around just the "table" part of the hashtable. It enforces some
86 /// invariants at the type level and employs some performance trickery,
87 /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
88 ///
89 /// FIXME(cgaebel):
90 ///
91 /// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
92 /// isn't yet totally safe. There's a "known exploit" that you can create
93 /// multiple FullIndexes for a bucket, `take` one, and then still `take`
94 /// the other causing undefined behavior. Currently, there's no story
95 /// for how to protect against this statically. Therefore, there are asserts
96 /// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
97 /// With time, and when we're confident this works correctly, they should
98 /// be removed. Also, the bounds check in `peek` is especially painful,
99 /// as that's called in the innermost loops of the hashtable and has the
100 /// potential to be a major performance drain. Remove this too.
101 ///
102 /// Or, better than remove, only enable these checks for debug builds.
103 /// There's currently no "debug-only" asserts in rust, so if you're reading
104 /// this and going "what? of course there are debug-only asserts!", then
105 /// please make this use them!
106 pub struct RawTable<K, V> {
107 capacity: uint,
108 size: uint,
109 hashes: *mut u64,
110 keys: *mut K,
111 vals: *mut V,
112 }
113
114 /// Represents an index into a `RawTable` with no key or value in it.
115 pub struct EmptyIndex {
116 idx: int,
117 nocopy: marker::NoCopy,
118 }
119
120 /// Represents an index into a `RawTable` with a key, value, and hash
121 /// in it.
122 pub struct FullIndex {
123 idx: int,
124 hash: SafeHash,
125 nocopy: marker::NoCopy,
126 }
127
128 impl FullIndex {
129 /// Since we get the hash for free whenever we check the bucket state,
130 /// this function is provided for fast access, letting us avoid
131 /// redundant trips back to the hashtable.
132 #[inline(always)]
133 pub fn hash(&self) -> SafeHash { self.hash }
134
135 /// Same comment as with `hash`.
136 #[inline(always)]
137 pub fn raw_index(&self) -> uint { self.idx as uint }
138 }
139
140 /// Represents the state of a bucket: it can either have a key/value
141 /// pair (be full) or not (be empty). You cannot `take` empty buckets,
142 /// and you cannot `put` into full buckets.
143 pub enum BucketState {
144 Empty(EmptyIndex),
145 Full(FullIndex),
146 }
147
148 /// A hash that is not zero, since we use a hash of zero to represent empty
149 /// buckets.
150 #[deriving(Eq)]
151 pub struct SafeHash {
152 hash: u64,
153 }
154
155 impl SafeHash {
156 /// Peek at the hash value, which is guaranteed to be non-zero.
157 #[inline(always)]
158 pub fn inspect(&self) -> u64 { self.hash }
159 }
160
161 /// We need to remove hashes of 0. That's reserved for empty buckets.
162 /// This function wraps up `hash_keyed` to be the only way outside this
163 /// module to generate a SafeHash.
164 pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
165 match hasher.hash(t) {
166 // This constant is exceedingly likely to hash to the same
167 // bucket, but it won't be counted as empty!
168 EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
169 h => SafeHash { hash: h },
170 }
171 }
172
173 fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
174 assert!(is_power_of_two(target_alignment));
175 (unrounded + target_alignment - 1) & !(target_alignment - 1)
176 }
177
178 #[test]
179 fn test_rounding() {
180 assert_eq!(round_up_to_next(0, 4), 0);
181 assert_eq!(round_up_to_next(1, 4), 4);
182 assert_eq!(round_up_to_next(2, 4), 4);
183 assert_eq!(round_up_to_next(3, 4), 4);
184 assert_eq!(round_up_to_next(4, 4), 4);
185 assert_eq!(round_up_to_next(5, 4), 8);
186 }
187
188 fn has_alignment(n: uint, alignment: uint) -> bool {
189 round_up_to_next(n, alignment) == n
190 }
191
192 // Returns a tuple of (minimum required malloc alignment, hash_offset,
193 // key_offset, val_offset, array_size), from the start of a mallocated array.
194 fn calculate_offsets(
195 hash_size: uint, hash_align: uint,
196 keys_size: uint, keys_align: uint,
197 vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
198
199 let hash_offset = 0;
200 let end_of_hashes = hash_offset + hash_size;
201
202 let keys_offset = round_up_to_next(end_of_hashes, keys_align);
203 let end_of_keys = keys_offset + keys_size;
204
205 let vals_offset = round_up_to_next(end_of_keys, vals_align);
206 let end_of_vals = vals_offset + vals_size;
207
208 let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
209
210 (min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
211 }
212
213 #[test]
214 fn test_offset_calculation() {
215 assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
216 assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
217 assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
218 }
219
220 impl<K, V> RawTable<K, V> {
221
222 /// Does not initialize the buckets. The caller should ensure they,
223 /// at the very least, set every hash to EMPTY_BUCKET.
224 unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
225 let hashes_size =
226 capacity.checked_mul(&size_of::<u64>()).expect("capacity overflow");
227 let keys_size =
228 capacity.checked_mul(&size_of::< K >()).expect("capacity overflow");
229 let vals_size =
230 capacity.checked_mul(&size_of::< V >()).expect("capacity overflow");
231
232 // Allocating hashmaps is a little tricky. We need to allocate three
233 // arrays, but since we know their sizes and alignments up front,
234 // we just allocate a single array, and then have the subarrays
235 // point into it.
236 //
237 // This is great in theory, but in practice getting the alignment
238 // right is a little subtle. Therefore, calculating offsets has been
239 // factored out into a different function.
240 let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
241 calculate_offsets(
242 hashes_size, min_align_of::<u64>(),
243 keys_size, min_align_of::< K >(),
244 vals_size, min_align_of::< V >());
245
246 let buffer = global_heap::malloc_raw(size) as *mut u8;
247
248 // FIXME #13094: If malloc was not at as aligned as we expected,
249 // our offset calculations are just plain wrong. We could support
250 // any alignment if we switched from `malloc` to `posix_memalign`.
251 assert!(has_alignment(buffer as uint, malloc_alignment));
252
253 let hashes = buffer.offset(hash_offset as int) as *mut u64;
254 let keys = buffer.offset(keys_offset as int) as *mut K;
255 let vals = buffer.offset(vals_offset as int) as *mut V;
256
257 RawTable {
258 capacity: capacity,
259 size: 0,
260 hashes: hashes,
261 keys: keys,
262 vals: vals,
263 }
264 }
265
266 /// Creates a new raw table from a given capacity. All buckets are
267 /// initially empty.
268 pub fn new(capacity: uint) -> RawTable<K, V> {
269 unsafe {
270 let ret = RawTable::new_uninitialized(capacity);
271 set_memory(ret.hashes, 0u8, capacity);
272 ret
273 }
274 }
275
276 /// Reads a bucket at a given index, returning an enum indicating whether
277 /// there's anything there or not. You need to match on this enum to get
278 /// the appropriate types to pass on to most of the other functions in
279 /// this module.
280 pub fn peek(&self, index: uint) -> BucketState {
281 debug_assert!(index < self.capacity);
282
283 let idx = index as int;
284 let hash = unsafe { *self.hashes.offset(idx) };
285
286 let nocopy = marker::NoCopy;
287
288 match hash {
289 EMPTY_BUCKET =>
290 Empty(EmptyIndex {
291 idx: idx,
292 nocopy: nocopy
293 }),
294 full_hash =>
295 Full(FullIndex {
296 idx: idx,
297 hash: SafeHash { hash: full_hash },
298 nocopy: nocopy,
299 })
300 }
301 }
302
303 /// Gets references to the key and value at a given index.
304 pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
305 let idx = index.idx;
306
307 unsafe {
308 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
309 (&'a *self.keys.offset(idx),
310 &'a *self.vals.offset(idx))
311 }
312 }
313
314 /// Gets references to the key and value at a given index, with the
315 /// value's reference being mutable.
316 pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
317 let idx = index.idx;
318
319 unsafe {
320 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
321 (&'a *self.keys.offset(idx),
322 &'a mut *self.vals.offset(idx))
323 }
324 }
325
326 /// Read everything, mutably.
327 pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
328 -> (&'a mut SafeHash, &'a mut K, &'a mut V) {
329 let idx = index.idx;
330
331 unsafe {
332 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
333 (transmute(self.hashes.offset(idx)),
334 &'a mut *self.keys.offset(idx),
335 &'a mut *self.vals.offset(idx))
336 }
337 }
338
339 /// Puts a key and value pair, along with the key's hash, into a given
340 /// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
341 /// function, because that slot will no longer be empty when we return!
342 /// A FullIndex is returned for later use, pointing to the newly-filled
343 /// slot in the hashtable.
344 ///
345 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
346 pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
347 let idx = index.idx;
348
349 unsafe {
350 debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
351 *self.hashes.offset(idx) = hash.inspect();
352 move_val_init(&mut *self.keys.offset(idx), k);
353 move_val_init(&mut *self.vals.offset(idx), v);
354 }
355
356 self.size += 1;
357
358 FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
359 }
360
361 /// Removes a key and value from the hashtable.
362 ///
363 /// This works similarly to `put`, building an `EmptyIndex` out of the
364 /// taken FullIndex.
365 pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
366 let idx = index.idx;
367
368 unsafe {
369 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
370
371 *self.hashes.offset(idx) = EMPTY_BUCKET;
372
373 // Drop the mutable constraint.
374 let keys = self.keys as *K;
375 let vals = self.vals as *V;
376
377 let k = ptr::read(keys.offset(idx));
378 let v = ptr::read(vals.offset(idx));
379
380 self.size -= 1;
381
382 (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
383 }
384 }
385
386 /// The hashtable's capacity, similar to a vector's.
387 pub fn capacity(&self) -> uint {
388 self.capacity
389 }
390
391 /// The number of elements ever `put` in the hashtable, minus the number
392 /// of elements ever `take`n.
393 pub fn size(&self) -> uint {
394 self.size
395 }
396
397 pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
398 Entries { table: self, idx: 0, elems_seen: 0 }
399 }
400
401 pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
402 MutEntries { table: self, idx: 0, elems_seen: 0 }
403 }
404
405 pub fn move_iter(self) -> MoveEntries<K, V> {
406 MoveEntries { table: self, idx: 0, elems_seen: 0 }
407 }
408 }
409
410 // `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
411 // ensure that a `FullIndex` points to an index with a non-zero hash,
412 // and a `SafeHash` is just a `u64` with a different name, this is
413 // safe.
414 //
415 // This test ensures that a `SafeHash` really IS the same size as a
416 // `u64`. If you need to change the size of `SafeHash` (and
417 // consequently made this test fail), `read_all_mut` needs to be
418 // modified to no longer assume this.
419 #[test]
420 fn can_alias_safehash_as_u64() {
421 unsafe { assert_eq!(size_of::<SafeHash>(), size_of::<u64>()) };
422 }
423
424 pub struct Entries<'a, K, V> {
425 table: &'a RawTable<K, V>,
426 idx: uint,
427 elems_seen: uint,
428 }
429
430 pub struct MutEntries<'a, K, V> {
431 table: &'a mut RawTable<K, V>,
432 idx: uint,
433 elems_seen: uint,
434 }
435
436 pub struct MoveEntries<K, V> {
437 table: RawTable<K, V>,
438 idx: uint,
439 elems_seen: uint,
440 }
441
442 impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
443 fn next(&mut self) -> Option<(&'a K, &'a V)> {
444 while self.idx < self.table.capacity() {
445 let i = self.idx;
446 self.idx += 1;
447
448 match self.table.peek(i) {
449 Empty(_) => {},
450 Full(idx) => {
451 self.elems_seen += 1;
452 return Some(self.table.read(&idx));
453 }
454 }
455 }
456
457 None
458 }
459
460 fn size_hint(&self) -> (uint, Option<uint>) {
461 let size = self.table.size() - self.elems_seen;
462 (size, Some(size))
463 }
464 }
465
466 impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
467 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
468 while self.idx < self.table.capacity() {
469 let i = self.idx;
470 self.idx += 1;
471
472 match self.table.peek(i) {
473 Empty(_) => {},
474 // the transmute here fixes:
475 // error: lifetime of `self` is too short to guarantee its contents
476 // can be safely reborrowed
477 Full(idx) => unsafe {
478 self.elems_seen += 1;
479 return Some(transmute(self.table.read_mut(&idx)));
480 }
481 }
482 }
483
484 None
485 }
486
487 fn size_hint(&self) -> (uint, Option<uint>) {
488 let size = self.table.size() - self.elems_seen;
489 (size, Some(size))
490 }
491 }
492
493 impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
494 fn next(&mut self) -> Option<(SafeHash, K, V)> {
495 while self.idx < self.table.capacity() {
496 let i = self.idx;
497 self.idx += 1;
498
499 match self.table.peek(i) {
500 Empty(_) => {},
501 Full(idx) => {
502 let h = idx.hash();
503 let (_, k, v) = self.table.take(idx);
504 return Some((h, k, v));
505 }
506 }
507 }
508
509 None
510 }
511
512 fn size_hint(&self) -> (uint, Option<uint>) {
513 let size = self.table.size();
514 (size, Some(size))
515 }
516 }
517
518 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
519 fn clone(&self) -> RawTable<K, V> {
520 unsafe {
521 let mut new_ht = RawTable::new_uninitialized(self.capacity());
522
523 for i in range(0, self.capacity()) {
524 match self.peek(i) {
525 Empty(_) => {
526 *new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
527 },
528 Full(idx) => {
529 let hash = idx.hash().inspect();
530 let (k, v) = self.read(&idx);
531 *new_ht.hashes.offset(i as int) = hash;
532 move_val_init(&mut *new_ht.keys.offset(i as int), (*k).clone());
533 move_val_init(&mut *new_ht.vals.offset(i as int), (*v).clone());
534 }
535 }
536 }
537
538 new_ht.size = self.size();
539
540 new_ht
541 }
542 }
543 }
544
545 #[unsafe_destructor]
546 impl<K, V> Drop for RawTable<K, V> {
547 fn drop(&mut self) {
548 // This is in reverse because we're likely to have partially taken
549 // some elements out with `.move_iter()` from the front.
550 for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
551 // Check if the size is 0, so we don't do a useless scan when
552 // dropping empty tables such as on resize.
553 if self.size == 0 { break }
554
555 match self.peek(i as uint) {
556 Empty(_) => {},
557 Full(idx) => { self.take(idx); }
558 }
559 }
560
561 assert_eq!(self.size, 0);
562
563 unsafe {
564 libc::free(self.hashes as *mut libc::c_void);
565 // Remember how everything was allocated out of one buffer
566 // during initialization? We only need one call to free here.
567 }
568 }
569 }
570 }
571
572 // We use this type for the load factor, to avoid floating point operations
573 // which might not be supported efficiently on some hardware.
574 //
575 // We use small u16s here to save space in the hashtable. They get upcasted
576 // to u64s when we actually use them.
577 type Fraction = (u16, u16); // (numerator, denominator)
578
579 // multiplication by a fraction, in a way that won't generally overflow for
580 // array sizes outside a factor of 10 of U64_MAX.
581 fn fraction_mul(lhs: uint, (num, den): Fraction) -> uint {
582 (((lhs as u64) * (num as u64)) / (den as u64)) as uint
583 }
584
585 static INITIAL_LOG2_CAP: uint = 5;
586 static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
587 static INITIAL_LOAD_FACTOR: Fraction = (9, 10);
588
589 // The main performance trick in this hashmap is called Robin Hood Hashing.
590 // It gains its excellent performance from one key invariant:
591 //
592 // If an insertion collides with an existing element, and that elements
593 // "probe distance" (how far away the element is from its ideal location)
594 // is higher than how far we've already probed, swap the elements.
595 //
596 // This massively lowers variance in probe distance, and allows us to get very
597 // high load factors with good performance. The 90% load factor I use is rather
598 // conservative.
599 //
600 // > Why a load factor of 90%?
601 //
602 // In general, all the distances to initial buckets will converge on the mean.
603 // At a load factor of α, the odds of finding the target bucket after k
604 // probes is approximately 1-α^k. If we set this equal to 50% (since we converge
605 // on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
606 // this down to 0.90 to make the math easier on the CPU and avoid its FPU.
607 // Since on average we start the probing in the middle of a cache line, this
608 // strategy pulls in two cache lines of hashes on every lookup. I think that's
609 // pretty good, but if you want to trade off some space, it could go down to one
610 // cache line on average with an α of 0.84.
611 //
612 // > Wait, what? Where did you get 1-α^k from?
613 //
614 // On the first probe, your odds of a collision with an existing element is α.
615 // The odds of doing this twice in a row is approximately α^2. For three times,
616 // α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
617 // colliding after k tries is 1-α^k.
618 //
619 // Future Improvements (FIXME!)
620 // ============================
621 //
622 // Allow the load factor to be changed dynamically and/or at initialization.
623 // I'm having trouble figuring out a sane API for this without exporting my
624 // hackish fraction type, while still avoiding floating point.
625 //
626 // Also, would it be possible for us to reuse storage when growing the
627 // underlying table? This is exactly the use case for 'realloc', and may
628 // be worth exploring.
629 //
630 // Future Optimizations (FIXME!)
631 // =============================
632 //
633 // The paper cited below mentions an implementation which keeps track of the
634 // distance-to-initial-bucket histogram. I'm suspicious of this approach because
635 // it requires maintaining an internal map. If this map were replaced with a
636 // hashmap, it would be faster, but now our data structure is self-referential
637 // and blows up. Also, this allows very good first guesses, but array accesses
638 // are no longer linear and in one direction, as we have now. There is also
639 // memory and cache pressure that this map would entail that would be very
640 // difficult to properly see in a microbenchmark.
641 //
642 // Another possible design choice that I made without any real reason is
643 // parameterizing the raw table over keys and values. Technically, all we need
644 // is the size and alignment of keys and values, and the code should be just as
645 // efficient (well, we might need one for power-of-two size and one for not...).
646 // This has the potential to reduce code bloat in rust executables, without
647 // really losing anything except 4 words (key size, key alignment, val size,
648 // val alignment) which can be passed in to every call of a `RawTable` function.
649 // This would definitely be an avenue worth exploring if people start complaining
650 // about the size of rust executables.
651 //
652 // There's also an "optimization" that has been omitted regarding how the
653 // hashtable allocates. The vector type has set the expectation that a hashtable
654 // which never has an element inserted should not allocate. I'm suspicious of
655 // implementing this for hashtables, because supporting it has no performance
656 // benefit over using an `Option<HashMap<K, V>>`, and is significantly more
657 // complicated.
658
659 /// A hash map implementation which uses linear probing with Robin
660 /// Hood bucket stealing.
661 ///
662 /// The hashes are all keyed by the task-local random number generator
663 /// on creation by default, this means the ordering of the keys is
664 /// randomized, but makes the tables more resistant to
665 /// denial-of-service attacks (Hash DoS). This behaviour can be
666 /// overriden with one of the constructors.
667 ///
668 /// It is required that the keys implement the `Eq` and `Hash` traits, although
669 /// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
670 ///
671 /// Relevant papers/articles:
672 ///
673 /// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
674 /// 2. Emmanuel Goossaert. ["Robin Hood
675 /// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
676 /// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
677 /// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
678 ///
679 /// # Example
680 ///
681 /// ```rust
682 /// use collections::HashMap;
683 ///
684 /// // type inference lets us omit an explicit type signature (which
685 /// // would be `HashMap<&str, &str>` in this example).
686 /// let mut book_reviews = HashMap::new();
687 ///
688 /// // review some books.
689 /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
690 /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
691 /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
692 /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
693 ///
694 /// // check for a specific one.
695 /// if !book_reviews.contains_key(&("Les Misérables")) {
696 /// println!("We've got {} reviews, but Les Misérables ain't one.",
697 /// book_reviews.len());
698 /// }
699 ///
700 /// // oops, this review has a lot of spelling mistakes, let's delete it.
701 /// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
702 ///
703 /// // look up the values associated with some keys.
704 /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
705 /// for book in to_find.iter() {
706 /// match book_reviews.find(book) {
707 /// Some(review) => println!("{}: {}", *book, *review),
708 /// None => println!("{} is unreviewed.", *book)
709 /// }
710 /// }
711 ///
712 /// // iterate over everything.
713 /// for (book, review) in book_reviews.iter() {
714 /// println!("{}: \"{}\"", *book, *review);
715 /// }
716 /// ```
717 #[deriving(Clone)]
718 pub struct HashMap<K, V, H = sip::SipHasher> {
719 // All hashes are keyed on these values, to prevent hash collision attacks.
720 hasher: H,
721
722 // When size == grow_at, we double the capacity.
723 grow_at: uint,
724
725 // The capacity must never drop below this.
726 minimum_capacity: uint,
727
728 table: table::RawTable<K, V>,
729
730 // We keep this at the end since it's 4-bytes, unlike everything else
731 // in this struct. Might as well save a word of padding!
732 load_factor: Fraction,
733 }
734
735 /// Get the number of elements which will force the capacity to grow.
736 fn grow_at(capacity: uint, load_factor: Fraction) -> uint {
737 fraction_mul(capacity, load_factor)
738 }
739
740 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
741 /// Get the number of elements which will force the capacity to shrink.
742 /// When size == self.shrink_at(), we halve the capacity.
743 fn shrink_at(&self) -> uint {
744 self.table.capacity() >> 2
745 }
746
747 // Probe the `idx`th bucket for a given hash, returning the index of the
748 // target bucket.
749 //
750 // This exploits the power-of-two size of the hashtable. As long as this
751 // is always true, we can use a bitmask of cap-1 to do modular arithmetic.
752 //
753 // Prefer using this with increasing values of `idx` rather than repeatedly
754 // calling `probe_next`. This reduces data-dependencies between loops, which
755 // can help the optimizer, and certainly won't hurt it. `probe_next` is
756 // simply for convenience, and is no more efficient than `probe`.
757 fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
758 let hash_mask = self.table.capacity() - 1;
759
760 // So I heard a rumor that unsigned overflow is safe in rust..
761 ((hash.inspect() as uint) + idx) & hash_mask
762 }
763
764 // Generate the next probe in a sequence. Prefer using 'probe' by itself,
765 // but this can sometimes be useful.
766 fn probe_next(&self, probe: uint) -> uint {
767 let hash_mask = self.table.capacity() - 1;
768 (probe + 1) & hash_mask
769 }
770
771 fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
772 table::make_hash(&self.hasher, x)
773 }
774
775 /// Get the distance of the bucket at the given index that it lies
776 /// from its 'ideal' location.
777 ///
778 /// In the cited blog posts above, this is called the "distance to
779 /// initial bucket", or DIB.
780 fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
781 // where the hash of the element that happens to reside at
782 // `index_of_elem` tried to place itself first.
783 let first_probe_index = self.probe(&index_of_elem.hash(), 0);
784
785 let raw_index = index_of_elem.raw_index();
786
787 if first_probe_index <= raw_index {
788 // probe just went forward
789 raw_index - first_probe_index
790 } else {
791 // probe wrapped around the hashtable
792 raw_index + (self.table.capacity() - first_probe_index)
793 }
794 }
795
796 /// Search for a pre-hashed key.
797 fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
798 -> Option<table::FullIndex> {
799 for num_probes in range(0u, self.table.size()) {
800 let probe = self.probe(hash, num_probes);
801
802 let idx = match self.table.peek(probe) {
803 table::Empty(_) => return None, // hit an empty bucket
804 table::Full(idx) => idx
805 };
806
807 // We can finish the search early if we hit any bucket
808 // with a lower distance to initial bucket than we've probed.
809 if self.bucket_distance(&idx) < num_probes { return None }
810
811 // If the hash doesn't match, it can't be this one..
812 if *hash != idx.hash() { continue }
813
814 let (k, _) = self.table.read(&idx);
815
816 // If the key doesn't match, it can't be this one..
817 if !is_match(k) { continue }
818
819 return Some(idx);
820 }
821
822 return None
823 }
824
825 fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
826 self.search_hashed_generic(hash, |k_| *k == *k_)
827 }
828
829 fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
830 self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
831 }
832
833 /// Search for a key, yielding the index if it's found in the hashtable.
834 /// If you already have the hash for the key lying around, use
835 /// search_hashed.
836 fn search(&self, k: &K) -> Option<table::FullIndex> {
837 self.search_hashed(&self.make_hash(k), k)
838 }
839
840 fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
841 let starting_probe = starting_index.raw_index();
842
843 let ending_probe = {
844 let mut probe = self.probe_next(starting_probe);
845 for _ in range(0u, self.table.size()) {
846 match self.table.peek(probe) {
847 table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
848 table::Full(idx) => {
849 // Bucket that isn't us, which has a non-zero probe distance.
850 // This isn't the ending index, so keep searching.
851 if self.bucket_distance(&idx) != 0 {
852 probe = self.probe_next(probe);
853 continue;
854 }
855
856 // if we do have a bucket_distance of zero, we're at the end
857 // of what we need to shift.
858 }
859 }
860 break;
861 }
862
863 probe
864 };
865
866 let (_, _, retval) = self.table.take(starting_index);
867
868 let mut probe = starting_probe;
869 let mut next_probe = self.probe_next(probe);
870
871 // backwards-shift all the elements after our newly-deleted one.
872 while next_probe != ending_probe {
873 match self.table.peek(next_probe) {
874 table::Empty(_) => {
875 // nothing to shift in. just empty it out.
876 match self.table.peek(probe) {
877 table::Empty(_) => {},
878 table::Full(idx) => { self.table.take(idx); }
879 }
880 },
881 table::Full(next_idx) => {
882 // something to shift. move it over!
883 let next_hash = next_idx.hash();
884 let (_, next_key, next_val) = self.table.take(next_idx);
885 match self.table.peek(probe) {
886 table::Empty(idx) => {
887 self.table.put(idx, next_hash, next_key, next_val);
888 },
889 table::Full(idx) => {
890 let (emptyidx, _, _) = self.table.take(idx);
891 self.table.put(emptyidx, next_hash, next_key, next_val);
892 }
893 }
894 }
895 }
896
897 probe = next_probe;
898 next_probe = self.probe_next(next_probe);
899 }
900
901 // Done the backwards shift, but there's still an element left!
902 // Empty it out.
903 match self.table.peek(probe) {
904 table::Empty(_) => {},
905 table::Full(idx) => { self.table.take(idx); }
906 }
907
908 // Now we're done all our shifting. Return the value we grabbed
909 // earlier.
910 return Some(retval);
911 }
912
913 /// Like `pop`, but can operate on any type that is equivalent to a key.
914 #[experimental]
915 pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
916 if self.table.size() == 0 {
917 return None
918 }
919
920 let potential_new_size = self.table.size() - 1;
921 self.make_some_room(potential_new_size);
922
923 let starting_index = match self.search_equiv(k) {
924 Some(idx) => idx,
925 None => return None,
926 };
927
928 self.pop_internal(starting_index)
929 }
930 }
931
932 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> Container for HashMap<K, V, H> {
933 /// Return the number of elements in the map
934 fn len(&self) -> uint { self.table.size() }
935 }
936
937 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
938 /// Clear the map, removing all key-value pairs.
939 fn clear(&mut self) {
940 self.minimum_capacity = self.table.size();
941
942 for i in range(0, self.table.capacity()) {
943 match self.table.peek(i) {
944 table::Empty(_) => {},
945 table::Full(idx) => { self.table.take(idx); }
946 }
947 }
948 }
949 }
950
951
952 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
953 fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
954 self.search(k).map(|idx| {
955 let (_, v) = self.table.read(&idx);
956 v
957 })
958 }
959
960 fn contains_key(&self, k: &K) -> bool {
961 self.search(k).is_some()
962 }
963 }
964
965 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
966 fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
967 match self.search(k) {
968 None => None,
969 Some(idx) => {
970 let (_, v) = self.table.read_mut(&idx);
971 Some(v)
972 }
973 }
974 }
975
976 fn swap(&mut self, k: K, v: V) -> Option<V> {
977 let hash = self.make_hash(&k);
978 let potential_new_size = self.table.size() + 1;
979 self.make_some_room(potential_new_size);
980
981 for dib in range_inclusive(0u, self.table.size()) {
982 let probe = self.probe(&hash, dib);
983
984 let idx = match self.table.peek(probe) {
985 table::Empty(idx) => {
986 // Found a hole!
987 self.table.put(idx, hash, k, v);
988 return None;
989 },
990 table::Full(idx) => idx
991 };
992
993 if idx.hash() == hash {
994 let (bucket_k, bucket_v) = self.table.read_mut(&idx);
995 if k == *bucket_k {
996 // Found an existing value.
997 return Some(replace(bucket_v, v));
998 }
999 }
1000
1001 let probe_dib = self.bucket_distance(&idx);
1002
1003 if probe_dib < dib {
1004 // Found a luckier bucket. This implies that the key does not
1005 // already exist in the hashtable. Just do a robin hood
1006 // insertion, then.
1007 self.robin_hood(idx, probe_dib, hash, k, v);
1008 return None;
1009 }
1010 }
1011
1012 // We really shouldn't be here.
1013 fail!("Internal HashMap error: Out of space.");
1014 }
1015
1016 fn pop(&mut self, k: &K) -> Option<V> {
1017 if self.table.size() == 0 {
1018 return None
1019 }
1020
1021 let potential_new_size = self.table.size() - 1;
1022 self.make_some_room(potential_new_size);
1023
1024 let starting_index = match self.search(k) {
1025 Some(idx) => idx,
1026 None => return None,
1027 };
1028
1029 self.pop_internal(starting_index)
1030 }
1031
1032 }
1033
1034 impl<K: Hash + TotalEq, V> HashMap<K, V, sip::SipHasher> {
1035 /// Create an empty HashMap.
1036 pub fn new() -> HashMap<K, V, sip::SipHasher> {
1037 HashMap::with_capacity(INITIAL_CAPACITY)
1038 }
1039
1040 pub fn with_capacity(capacity: uint) -> HashMap<K, V, sip::SipHasher> {
1041 let mut r = rand::task_rng();
1042 let r0 = r.gen();
1043 let r1 = r.gen();
1044 let hasher = sip::SipHasher::new_with_keys(r0, r1);
1045 HashMap::with_capacity_and_hasher(capacity, hasher)
1046 }
1047 }
1048
1049 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
1050 pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
1051 HashMap::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
1052 }
1053
1054 /// Create an empty HashMap with space for at least `capacity`
1055 /// elements, using `hasher` to hash the keys.
1056 ///
1057 /// Warning: `hasher` is normally randomly generated, and
1058 /// is designed to allow HashMaps to be resistant to attacks that
1059 /// cause many collisions and very poor performance. Setting it
1060 /// manually using this function can expose a DoS attack vector.
1061 pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
1062 let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
1063 HashMap {
1064 hasher: hasher,
1065 load_factor: INITIAL_LOAD_FACTOR,
1066 grow_at: grow_at(cap, INITIAL_LOAD_FACTOR),
1067 minimum_capacity: cap,
1068 table: table::RawTable::new(cap),
1069 }
1070 }
1071
1072 /// The hashtable will never try to shrink below this size. You can use
1073 /// this function to reduce reallocations if your hashtable frequently
1074 /// grows and shrinks by large amounts.
1075 ///
1076 /// This function has no effect on the operational semantics of the
1077 /// hashtable, only on performance.
1078 pub fn reserve(&mut self, new_minimum_capacity: uint) {
1079 let cap = num::next_power_of_two(
1080 max(INITIAL_CAPACITY, new_minimum_capacity));
1081
1082 self.minimum_capacity = cap;
1083
1084 if self.table.capacity() < cap {
1085 self.resize(cap);
1086 }
1087 }
1088
1089 /// Resizes the internal vectors to a new capacity. It's your responsibility to:
1090 /// 1) Make sure the new capacity is enough for all the elements, accounting
1091 /// for the load factor.
1092 /// 2) Ensure new_capacity is a power of two.
1093 fn resize(&mut self, new_capacity: uint) {
1094 assert!(self.table.size() <= new_capacity);
1095 assert!(num::is_power_of_two(new_capacity));
1096
1097 self.grow_at = grow_at(new_capacity, self.load_factor);
1098
1099 let old_table = replace(&mut self.table, table::RawTable::new(new_capacity));
1100 let old_size = old_table.size();
1101
1102 for (h, k, v) in old_table.move_iter() {
1103 self.insert_hashed_nocheck(h, k, v);
1104 }
1105
1106 assert_eq!(self.table.size(), old_size);
1107 }
1108
1109 /// Performs any necessary resize operations, such that there's space for
1110 /// new_size elements.
1111 fn make_some_room(&mut self, new_size: uint) {
1112 let should_shrink = new_size <= self.shrink_at();
1113 let should_grow = self.grow_at <= new_size;
1114
1115 if should_grow {
1116 let new_capacity = self.table.capacity() << 1;
1117 self.resize(new_capacity);
1118 } else if should_shrink {
1119 let new_capacity = self.table.capacity() >> 1;
1120
1121 // Never shrink below the minimum capacity
1122 if self.minimum_capacity <= new_capacity {
1123 self.resize(new_capacity);
1124 }
1125 }
1126 }
1127
1128 /// Perform robin hood bucket stealing at the given 'index'. You must
1129 /// also pass that probe's "distance to initial bucket" so we don't have
1130 /// to recalculate it, as well as the total number of probes already done
1131 /// so we have some sort of upper bound on the number of probes to do.
1132 ///
1133 /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
1134 fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
1135 mut hash: table::SafeHash, mut k: K, mut v: V) {
1136 'outer: loop {
1137 let (old_hash, old_key, old_val) = {
1138 let (old_hash_ref, old_key_ref, old_val_ref) =
1139 self.table.read_all_mut(&index);
1140
1141 let old_hash = replace(old_hash_ref, hash);
1142 let old_key = replace(old_key_ref, k);
1143 let old_val = replace(old_val_ref, v);
1144
1145 (old_hash, old_key, old_val)
1146 };
1147
1148 let mut probe = self.probe_next(index.raw_index());
1149
1150 for dib in range(dib_param + 1, self.table.size()) {
1151 let full_index = match self.table.peek(probe) {
1152 table::Empty(idx) => {
1153 // Finally. A hole!
1154 self.table.put(idx, old_hash, old_key, old_val);
1155 return;
1156 },
1157 table::Full(idx) => idx
1158 };
1159
1160 let probe_dib = self.bucket_distance(&full_index);
1161
1162 // Robin hood! Steal the spot.
1163 if probe_dib < dib {
1164 index = full_index;
1165 dib_param = probe_dib;
1166 hash = old_hash;
1167 k = old_key;
1168 v = old_val;
1169 continue 'outer;
1170 }
1171
1172 probe = self.probe_next(probe);
1173 }
1174
1175 fail!("HashMap fatal error: 100% load factor?");
1176 }
1177 }
1178
1179 /// Insert a pre-hashed key-value pair, without first checking
1180 /// that there's enough room in the buckets. Returns a reference to the
1181 /// newly insert value.
1182 ///
1183 /// If the key already exists, the hashtable will be returned untouched
1184 /// and a reference to the existing element will be returned.
1185 fn insert_hashed_nocheck<'a>(
1186 &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
1187
1188 for dib in range_inclusive(0u, self.table.size()) {
1189 let probe = self.probe(&hash, dib);
1190
1191 let idx = match self.table.peek(probe) {
1192 table::Empty(idx) => {
1193 // Found a hole!
1194 let fullidx = self.table.put(idx, hash, k, v);
1195 let (_, val) = self.table.read_mut(&fullidx);
1196 return val;
1197 },
1198 table::Full(idx) => idx
1199 };
1200
1201 if idx.hash() == hash {
1202 let (bucket_k, bucket_v) = self.table.read_mut(&idx);
1203 // FIXME #12147 the conditional return confuses
1204 // borrowck if we return bucket_v directly
1205 let bv: *mut V = bucket_v;
1206 if k == *bucket_k {
1207 // Key already exists. Get its reference.
1208 return unsafe {&mut *bv};
1209 }
1210 }
1211
1212 let probe_dib = self.bucket_distance(&idx);
1213
1214 if probe_dib < dib {
1215 // Found a luckier bucket than me. Better steal his spot.
1216 self.robin_hood(idx, probe_dib, hash, k, v);
1217
1218 // Now that it's stolen, just read the value's pointer
1219 // right out of the table!
1220 match self.table.peek(probe) {
1221 table::Empty(_) => fail!("Just stole a spot, but now that spot's empty."),
1222 table::Full(idx) => {
1223 let (_, v) = self.table.read_mut(&idx);
1224 return v;
1225 }
1226 }
1227 }
1228 }
1229
1230 // We really shouldn't be here.
1231 fail!("Internal HashMap error: Out of space.");
1232 }
1233
1234 /// Inserts an element which has already been hashed, returning a reference
1235 /// to that element inside the hashtable. This is more efficient that using
1236 /// `insert`, since the key will not be rehashed.
1237 fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
1238 let potential_new_size = self.table.size() + 1;
1239 self.make_some_room(potential_new_size);
1240 self.insert_hashed_nocheck(hash, k, v)
1241 }
1242
1243 /// Return the value corresponding to the key in the map, or insert
1244 /// and return the value if it doesn't exist.
1245 pub fn find_or_insert<'a>(&'a mut self, k: K, v: V) -> &'a mut V {
1246 let hash = self.make_hash(&k);
1247 match self.search_hashed(&hash, &k) {
1248 Some(idx) => {
1249 let (_, v_ref) = self.table.read_mut(&idx);
1250 v_ref
1251 },
1252 None => self.insert_hashed(hash, k, v)
1253 }
1254 }
1255
1256 /// Return the value corresponding to the key in the map, or create,
1257 /// insert, and return a new value if it doesn't exist.
1258 pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
1259 -> &'a mut V {
1260 let hash = self.make_hash(&k);
1261 match self.search_hashed(&hash, &k) {
1262 Some(idx) => {
1263 let (_, v_ref) = self.table.read_mut(&idx);
1264 v_ref
1265 },
1266 None => {
1267 let v = f(&k);
1268 self.insert_hashed(hash, k, v)
1269 }
1270 }
1271 }
1272
1273 /// Insert a key-value pair into the map if the key is not already present.
1274 /// Otherwise, modify the existing value for the key.
1275 /// Returns the new or modified value for the key.
1276 pub fn insert_or_update_with<'a>(
1277 &'a mut self,
1278 k: K,
1279 v: V,
1280 f: |&K, &mut V|)
1281 -> &'a mut V {
1282 let hash = self.make_hash(&k);
1283 match self.search_hashed(&hash, &k) {
1284 None => self.insert_hashed(hash, k, v),
1285 Some(idx) => {
1286 let (_, v_ref) = self.table.read_mut(&idx);
1287 f(&k, v_ref);
1288 v_ref
1289 }
1290 }
1291 }
1292
1293 /// Retrieves a value for the given key, failing if the key is not present.
1294 pub fn get<'a>(&'a self, k: &K) -> &'a V {
1295 match self.find(k) {
1296 Some(v) => v,
1297 None => fail!("No entry found for key: {:?}", k)
1298 }
1299 }
1300
1301 /// Retrieves a (mutable) value for the given key, failing if the key is not present.
1302 pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
1303 match self.find_mut(k) {
1304 Some(v) => v,
1305 None => fail!("No entry found for key: {:?}", k)
1306 }
1307 }
1308
1309 /// Return true if the map contains a value for the specified key,
1310 /// using equivalence.
1311 pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
1312 self.search_equiv(key).is_some()
1313 }
1314
1315 /// Return the value corresponding to the key in the map, using
1316 /// equivalence.
1317 pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
1318 match self.search_equiv(k) {
1319 None => None,
1320 Some(idx) => {
1321 let (_, v_ref) = self.table.read(&idx);
1322 Some(v_ref)
1323 }
1324 }
1325 }
1326
1327 /// An iterator visiting all keys in arbitrary order.
1328 /// Iterator element type is &'a K.
1329 pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
1330 self.iter().map(|(k, _v)| k)
1331 }
1332
1333 /// An iterator visiting all values in arbitrary order.
1334 /// Iterator element type is &'a V.
1335 pub fn values<'a>(&'a self) -> Values<'a, K, V> {
1336 self.iter().map(|(_k, v)| v)
1337 }
1338
1339 /// An iterator visiting all key-value pairs in arbitrary order.
1340 /// Iterator element type is (&'a K, &'a V).
1341 pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
1342 self.table.iter()
1343 }
1344
1345 /// An iterator visiting all key-value pairs in arbitrary order,
1346 /// with mutable references to the values.
1347 /// Iterator element type is (&'a K, &'a mut V).
1348 pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
1349 self.table.mut_iter()
1350 }
1351
1352 /// Creates a consuming iterator, that is, one that moves each key-value
1353 /// pair out of the map in arbitrary order. The map cannot be used after
1354 /// calling this.
1355 pub fn move_iter(self) -> MoveEntries<K, V> {
1356 self.table.move_iter().map(|(_, k, v)| (k, v))
1357 }
1358 }
1359
1360 impl<K: TotalEq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
1361 /// Like `find`, but returns a copy of the value.
1362 pub fn find_copy(&self, k: &K) -> Option<V> {
1363 self.find(k).map(|v| (*v).clone())
1364 }
1365
1366 /// Like `get`, but returns a copy of the value.
1367 pub fn get_copy(&self, k: &K) -> V {
1368 (*self.get(k)).clone()
1369 }
1370 }
1371
1372 impl<K: TotalEq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {
1373 fn eq(&self, other: &HashMap<K, V, H>) -> bool {
1374 if self.len() != other.len() { return false; }
1375
1376 self.iter()
1377 .all(|(key, value)| {
1378 match other.find(key) {
1379 None => false,
1380 Some(v) => *value == *v
1381 }
1382 })
1383 }
1384 }
1385
1386 impl<K: TotalEq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
1387 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1388 try!(write!(f.buf, r"\{"));
1389
1390 for (i, (k, v)) in self.iter().enumerate() {
1391 if i != 0 { try!(write!(f.buf, ", ")); }
1392 try!(write!(f.buf, "{}: {}", *k, *v));
1393 }
1394
1395 write!(f.buf, r"\}")
1396 }
1397 }
1398
1399 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
1400 fn default() -> HashMap<K, V, H> {
1401 HashMap::with_hasher(Default::default())
1402 }
1403 }
1404
1405 /// HashMap iterator
1406 pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
1407
1408 /// HashMap mutable values iterator
1409 pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
1410
1411 /// HashMap move iterator
1412 pub type MoveEntries<K, V> =
1413 iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
1414
1415 /// HashMap keys iterator
1416 pub type Keys<'a, K, V> =
1417 iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
1418
1419 /// HashMap values iterator
1420 pub type Values<'a, K, V> =
1421 iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
1422
1423 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
1424 fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
1425 let (lower, _) = iter.size_hint();
1426 let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
1427 map.extend(iter);
1428 map
1429 }
1430 }
1431
1432 impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
1433 fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
1434 for (k, v) in iter {
1435 self.insert(k, v);
1436 }
1437 }
1438 }
1439
1440 /// HashSet iterator
1441 pub type SetItems<'a, K> =
1442 iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
1443
1444 /// HashSet move iterator
1445 pub type SetMoveItems<K> =
1446 iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
1447
1448 /// An implementation of a hash set using the underlying representation of a
1449 /// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
1450 /// requires that the elements implement the `Eq` and `Hash` traits.
1451 #[deriving(Clone)]
1452 pub struct HashSet<T, H = sip::SipHasher> {
1453 map: HashMap<T, (), H>
1454 }
1455
1456 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {
1457 fn eq(&self, other: &HashSet<T, H>) -> bool {
1458 if self.len() != other.len() { return false; }
1459
1460 self.iter().all(|key| other.contains(key))
1461 }
1462 }
1463
1464 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> Container for HashSet<T, H> {
1465 fn len(&self) -> uint { self.map.len() }
1466 }
1467
1468 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
1469 fn clear(&mut self) { self.map.clear() }
1470 }
1471
1472 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
1473 fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
1474
1475 fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
1476 self.iter().all(|v| !other.contains(v))
1477 }
1478
1479 fn is_subset(&self, other: &HashSet<T, H>) -> bool {
1480 self.iter().all(|v| other.contains(v))
1481 }
1482 }
1483
1484 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
1485 fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
1486
1487 fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
1488 }
1489
1490 impl<T: Hash + TotalEq> HashSet<T, sip::SipHasher> {
1491 /// Create an empty HashSet
1492 pub fn new() -> HashSet<T, sip::SipHasher> {
1493 HashSet::with_capacity(INITIAL_CAPACITY)
1494 }
1495
1496 /// Create an empty HashSet with space for at least `n` elements in
1497 /// the hash table.
1498 pub fn with_capacity(capacity: uint) -> HashSet<T, sip::SipHasher> {
1499 HashSet { map: HashMap::with_capacity(capacity) }
1500 }
1501 }
1502
1503 impl<T: TotalEq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
1504 pub fn with_hasher(hasher: H) -> HashSet<T, H> {
1505 HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
1506 }
1507
1508 /// Create an empty HashSet with space for at least `capacity`
1509 /// elements in the hash table, using `hasher` to hash the keys.
1510 ///
1511 /// Warning: `hasher` is normally randomly generated, and
1512 /// is designed to allow `HashSet`s to be resistant to attacks that
1513 /// cause many collisions and very poor performance. Setting it
1514 /// manually using this function can expose a DoS attack vector.
1515 pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
1516 HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
1517 }
1518
1519 /// Reserve space for at least `n` elements in the hash table.
1520 pub fn reserve(&mut self, n: uint) {
1521 self.map.reserve(n)
1522 }
1523
1524 /// Returns true if the hash set contains a value equivalent to the
1525 /// given query value.
1526 pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
1527 self.map.contains_key_equiv(value)
1528 }
1529
1530 /// An iterator visiting all elements in arbitrary order.
1531 /// Iterator element type is &'a T.
1532 pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
1533 self.map.keys()
1534 }
1535
1536 /// Creates a consuming iterator, that is, one that moves each value out
1537 /// of the set in arbitrary order. The set cannot be used after calling
1538 /// this.
1539 pub fn move_iter(self) -> SetMoveItems<T> {
1540 self.map.move_iter().map(|(k, _)| k)
1541 }
1542
1543 /// Visit the values representing the difference
1544 pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
1545 Repeat::new(other).zip(self.iter())
1546 .filter_map(|(other, elt)| {
1547 if !other.contains(elt) { Some(elt) } else { None }
1548 })
1549 }
1550
1551 /// Visit the values representing the symmetric difference
1552 pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
1553 -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
1554 self.difference(other).chain(other.difference(self))
1555 }
1556
1557 /// Visit the values representing the intersection
1558 pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
1559 -> SetAlgebraItems<'a, T, H> {
1560 Repeat::new(other).zip(self.iter())
1561 .filter_map(|(other, elt)| {
1562 if other.contains(elt) { Some(elt) } else { None }
1563 })
1564 }
1565
1566 /// Visit the values representing the union
1567 pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
1568 -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
1569 self.iter().chain(other.difference(self))
1570 }
1571 }
1572
1573 impl<T: TotalEq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
1574 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1575 try!(write!(f.buf, r"\{"));
1576
1577 for (i, x) in self.iter().enumerate() {
1578 if i != 0 { try!(write!(f.buf, ", ")); }
1579 try!(write!(f.buf, "{}", *x));
1580 }
1581
1582 write!(f.buf, r"\}")
1583 }
1584 }
1585
1586 impl<T: TotalEq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
1587 fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
1588 let (lower, _) = iter.size_hint();
1589 let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
1590 set.extend(iter);
1591 set
1592 }
1593 }
1594
1595 impl<T: TotalEq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
1596 fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
1597 for k in iter {
1598 self.insert(k);
1599 }
1600 }
1601 }
1602
1603 impl<T: TotalEq + Hash> Default for HashSet<T, sip::SipHasher> {
1604 fn default() -> HashSet<T> { HashSet::new() }
1605 }
1606
1607 // `Repeat` is used to feed the filter closure an explicit capture
1608 // of a reference to the other set
1609 /// Set operations iterator
1610 pub type SetAlgebraItems<'a, T, H> =
1611 FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
1612 Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
1613
1614 #[cfg(test)]
1615 mod test_map {
1616 use super::HashMap;
1617 use std::cmp::Equiv;
1618 use std::hash::Hash;
1619 use std::iter::{Iterator,range_inclusive,range_step_inclusive};
1620 use std::cell::RefCell;
1621
1622 struct KindaIntLike(int);
1623
1624 impl Equiv<int> for KindaIntLike {
1625 fn equiv(&self, other: &int) -> bool {
1626 let KindaIntLike(this) = *self;
1627 this == *other
1628 }
1629 }
1630 impl<S: Writer> Hash<S> for KindaIntLike {
1631 fn hash(&self, state: &mut S) {
1632 let KindaIntLike(this) = *self;
1633 this.hash(state)
1634 }
1635 }
1636
1637 #[test]
1638 fn test_create_capacity_zero() {
1639 let mut m = HashMap::with_capacity(0);
1640
1641 assert!(m.insert(1, 1));
1642
1643 assert!(m.contains_key(&1));
1644 assert!(!m.contains_key(&0));
1645 }
1646
1647 #[test]
1648 fn test_insert() {
1649 let mut m = HashMap::new();
1650 assert_eq!(m.len(), 0);
1651 assert!(m.insert(1, 2));
1652 assert_eq!(m.len(), 1);
1653 assert!(m.insert(2, 4));
1654 assert_eq!(m.len(), 2);
1655 assert_eq!(*m.find(&1).unwrap(), 2);
1656 assert_eq!(*m.find(&2).unwrap(), 4);
1657 }
1658
1659 local_data_key!(drop_vector: RefCell<Vec<int>>)
1660
1661 #[deriving(Hash, Eq, TotalEq)]
1662 struct Dropable {
1663 k: uint
1664 }
1665
1666
1667 impl Dropable {
1668 fn new(k: uint) -> Dropable {
1669 let v = drop_vector.get().unwrap();
1670 v.borrow_mut().as_mut_slice()[k] += 1;
1671
1672 Dropable { k: k }
1673 }
1674 }
1675
1676 impl Drop for Dropable {
1677 fn drop(&mut self) {
1678 let v = drop_vector.get().unwrap();
1679 v.borrow_mut().as_mut_slice()[self.k] -= 1;
1680 }
1681 }
1682
1683 #[test]
1684 fn test_drops() {
1685 drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0))));
1686
1687 {
1688 let mut m = HashMap::new();
1689
1690 let v = drop_vector.get().unwrap();
1691 for i in range(0u, 200) {
1692 assert_eq!(v.borrow().as_slice()[i], 0);
1693 }
1694 drop(v);
1695
1696 for i in range(0u, 100) {
1697 let d1 = Dropable::new(i);
1698 let d2 = Dropable::new(i+100);
1699 m.insert(d1, d2);
1700 }
1701
1702 let v = drop_vector.get().unwrap();
1703 for i in range(0u, 200) {
1704 assert_eq!(v.borrow().as_slice()[i], 1);
1705 }
1706 drop(v);
1707
1708 for i in range(0u, 50) {
1709 let k = Dropable::new(i);
1710 let v = m.pop(&k);
1711
1712 assert!(v.is_some());
1713
1714 let v = drop_vector.get().unwrap();
1715 assert_eq!(v.borrow().as_slice()[i], 1);
1716 assert_eq!(v.borrow().as_slice()[i+100], 1);
1717 }
1718
1719 let v = drop_vector.get().unwrap();
1720 for i in range(0u, 50) {
1721 assert_eq!(v.borrow().as_slice()[i], 0);
1722 assert_eq!(v.borrow().as_slice()[i+100], 0);
1723 }
1724
1725 for i in range(50u, 100) {
1726 assert_eq!(v.borrow().as_slice()[i], 1);
1727 assert_eq!(v.borrow().as_slice()[i+100], 1);
1728 }
1729 }
1730
1731 let v = drop_vector.get().unwrap();
1732 for i in range(0u, 200) {
1733 assert_eq!(v.borrow().as_slice()[i], 0);
1734 }
1735 }
1736
1737 #[test]
1738 fn test_empty_pop() {
1739 let mut m: HashMap<int, bool> = HashMap::new();
1740 assert_eq!(m.pop(&0), None);
1741 }
1742
1743 #[test]
1744 fn test_lots_of_insertions() {
1745 let mut m = HashMap::new();
1746
1747 // Try this a few times to make sure we never screw up the hashmap's
1748 // internal state.
1749 for _ in range(0, 10) {
1750 assert!(m.is_empty());
1751
1752 for i in range_inclusive(1, 1000) {
1753 assert!(m.insert(i, i));
1754
1755 for j in range_inclusive(1, i) {
1756 let r = m.find(&j);
1757 assert_eq!(r, Some(&j));
1758 }
1759
1760 for j in range_inclusive(i+1, 1000) {
1761 let r = m.find(&j);
1762 assert_eq!(r, None);
1763 }
1764 }
1765
1766 for i in range_inclusive(1001, 2000) {
1767 assert!(!m.contains_key(&i));
1768 }
1769
1770 // remove forwards
1771 for i in range_inclusive(1, 1000) {
1772 assert!(m.remove(&i));
1773
1774 for j in range_inclusive(1, i) {
1775 assert!(!m.contains_key(&j));
1776 }
1777
1778 for j in range_inclusive(i+1, 1000) {
1779 assert!(m.contains_key(&j));
1780 }
1781 }
1782
1783 for i in range_inclusive(1, 1000) {
1784 assert!(!m.contains_key(&i));
1785 }
1786
1787 for i in range_inclusive(1, 1000) {
1788 assert!(m.insert(i, i));
1789 }
1790
1791 // remove backwards
1792 for i in range_step_inclusive(1000, 1, -1) {
1793 assert!(m.remove(&i));
1794
1795 for j in range_inclusive(i, 1000) {
1796 assert!(!m.contains_key(&j));
1797 }
1798
1799 for j in range_inclusive(1, i-1) {
1800 assert!(m.contains_key(&j));
1801 }
1802 }
1803 }
1804 }
1805
1806 #[test]
1807 fn test_find_mut() {
1808 let mut m = HashMap::new();
1809 assert!(m.insert(1, 12));
1810 assert!(m.insert(2, 8));
1811 assert!(m.insert(5, 14));
1812 let new = 100;
1813 match m.find_mut(&5) {
1814 None => fail!(), Some(x) => *x = new
1815 }
1816 assert_eq!(m.find(&5), Some(&new));
1817 }
1818
1819 #[test]
1820 fn test_insert_overwrite() {
1821 let mut m = HashMap::new();
1822 assert!(m.insert(1, 2));
1823 assert_eq!(*m.find(&1).unwrap(), 2);
1824 assert!(!m.insert(1, 3));
1825 assert_eq!(*m.find(&1).unwrap(), 3);
1826 }
1827
1828 #[test]
1829 fn test_insert_conflicts() {
1830 let mut m = HashMap::with_capacity(4);
1831 assert!(m.insert(1, 2));
1832 assert!(m.insert(5, 3));
1833 assert!(m.insert(9, 4));
1834 assert_eq!(*m.find(&9).unwrap(), 4);
1835 assert_eq!(*m.find(&5).unwrap(), 3);
1836 assert_eq!(*m.find(&1).unwrap(), 2);
1837 }
1838
1839 #[test]
1840 fn test_conflict_remove() {
1841 let mut m = HashMap::with_capacity(4);
1842 assert!(m.insert(1, 2));
1843 assert_eq!(*m.find(&1).unwrap(), 2);
1844 assert!(m.insert(5, 3));
1845 assert_eq!(*m.find(&1).unwrap(), 2);
1846 assert_eq!(*m.find(&5).unwrap(), 3);
1847 assert!(m.insert(9, 4));
1848 assert_eq!(*m.find(&1).unwrap(), 2);
1849 assert_eq!(*m.find(&5).unwrap(), 3);
1850 assert_eq!(*m.find(&9).unwrap(), 4);
1851 assert!(m.remove(&1));
1852 assert_eq!(*m.find(&9).unwrap(), 4);
1853 assert_eq!(*m.find(&5).unwrap(), 3);
1854 }
1855
1856 #[test]
1857 fn test_is_empty() {
1858 let mut m = HashMap::with_capacity(4);
1859 assert!(m.insert(1, 2));
1860 assert!(!m.is_empty());
1861 assert!(m.remove(&1));
1862 assert!(m.is_empty());
1863 }
1864
1865 #[test]
1866 fn test_pop() {
1867 let mut m = HashMap::new();
1868 m.insert(1, 2);
1869 assert_eq!(m.pop(&1), Some(2));
1870 assert_eq!(m.pop(&1), None);
1871 }
1872
1873 #[test]
1874 #[allow(experimental)]
1875 fn test_pop_equiv() {
1876 let mut m = HashMap::new();
1877 m.insert(1, 2);
1878 assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
1879 assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
1880 }
1881
1882 #[test]
1883 fn test_swap() {
1884 let mut m = HashMap::new();
1885 assert_eq!(m.swap(1, 2), None);
1886 assert_eq!(m.swap(1, 3), Some(2));
1887 assert_eq!(m.swap(1, 4), Some(3));
1888 }
1889
1890 #[test]
1891 fn test_move_iter() {
1892 let hm = {
1893 let mut hm = HashMap::new();
1894
1895 hm.insert('a', 1);
1896 hm.insert('b', 2);
1897
1898 hm
1899 };
1900
1901 let v = hm.move_iter().collect::<Vec<(char, int)>>();
1902 assert!([('a', 1), ('b', 2)] == v.as_slice() || [('b', 2), ('a', 1)] == v.as_slice());
1903 }
1904
1905 #[test]
1906 fn test_iterate() {
1907 let mut m = HashMap::with_capacity(4);
1908 for i in range(0u, 32) {
1909 assert!(m.insert(i, i*2));
1910 }
1911 assert_eq!(m.len(), 32);
1912
1913 let mut observed = 0;
1914
1915 for (k, v) in m.iter() {
1916 assert_eq!(*v, *k * 2);
1917 observed |= 1 << *k;
1918 }
1919 assert_eq!(observed, 0xFFFF_FFFF);
1920 }
1921
1922 #[test]
1923 fn test_keys() {
1924 let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
1925 let map = vec.move_iter().collect::<HashMap<int, char>>();
1926 let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
1927 assert_eq!(keys.len(), 3);
1928 assert!(keys.contains(&1));
1929 assert!(keys.contains(&2));
1930 assert!(keys.contains(&3));
1931 }
1932
1933 #[test]
1934 fn test_values() {
1935 let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
1936 let map = vec.move_iter().collect::<HashMap<int, char>>();
1937 let values = map.values().map(|&v| v).collect::<Vec<char>>();
1938 assert_eq!(values.len(), 3);
1939 assert!(values.contains(&'a'));
1940 assert!(values.contains(&'b'));
1941 assert!(values.contains(&'c'));
1942 }
1943
1944 #[test]
1945 fn test_find() {
1946 let mut m = HashMap::new();
1947 assert!(m.find(&1).is_none());
1948 m.insert(1, 2);
1949 match m.find(&1) {
1950 None => fail!(),
1951 Some(v) => assert_eq!(*v, 2)
1952 }
1953 }
1954
1955 #[test]
1956 fn test_eq() {
1957 let mut m1 = HashMap::new();
1958 m1.insert(1, 2);
1959 m1.insert(2, 3);
1960 m1.insert(3, 4);
1961
1962 let mut m2 = HashMap::new();
1963 m2.insert(1, 2);
1964 m2.insert(2, 3);
1965
1966 assert!(m1 != m2);
1967
1968 m2.insert(3, 4);
1969
1970 assert_eq!(m1, m2);
1971 }
1972
1973 #[test]
1974 fn test_expand() {
1975 let mut m = HashMap::new();
1976
1977 assert_eq!(m.len(), 0);
1978 assert!(m.is_empty());
1979
1980 let mut i = 0u;
1981 let old_resize_at = m.grow_at;
1982 while old_resize_at == m.grow_at {
1983 m.insert(i, i);
1984 i += 1;
1985 }
1986
1987 assert_eq!(m.len(), i);
1988 assert!(!m.is_empty());
1989 }
1990
1991 #[test]
1992 fn test_find_equiv() {
1993 let mut m = HashMap::new();
1994
1995 let (foo, bar, baz) = (1,2,3);
1996 m.insert("foo".to_owned(), foo);
1997 m.insert("bar".to_owned(), bar);
1998 m.insert("baz".to_owned(), baz);
1999
2000
2001 assert_eq!(m.find_equiv(&("foo")), Some(&foo));
2002 assert_eq!(m.find_equiv(&("bar")), Some(&bar));
2003 assert_eq!(m.find_equiv(&("baz")), Some(&baz));
2004
2005 assert_eq!(m.find_equiv(&("qux")), None);
2006 }
2007
2008 #[test]
2009 fn test_from_iter() {
2010 let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2011
2012 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2013
2014 for &(k, v) in xs.iter() {
2015 assert_eq!(map.find(&k), Some(&v));
2016 }
2017 }
2018
2019 #[test]
2020 fn test_size_hint() {
2021 let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2022
2023 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2024
2025 let mut iter = map.iter();
2026
2027 for _ in iter.by_ref().take(3) {}
2028
2029 assert_eq!(iter.size_hint(), (3, Some(3)));
2030 }
2031
2032 #[test]
2033 fn test_mut_size_hint() {
2034 let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2035
2036 let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2037
2038 let mut iter = map.mut_iter();
2039
2040 for _ in iter.by_ref().take(3) {}
2041
2042 assert_eq!(iter.size_hint(), (3, Some(3)));
2043 }
2044 }
2045
2046 #[cfg(test)]
2047 mod test_set {
2048 use super::HashSet;
2049 use std::container::Container;
2050 use std::slice::ImmutableEqVector;
2051
2052 #[test]
2053 fn test_disjoint() {
2054 let mut xs = HashSet::new();
2055 let mut ys = HashSet::new();
2056 assert!(xs.is_disjoint(&ys));
2057 assert!(ys.is_disjoint(&xs));
2058 assert!(xs.insert(5));
2059 assert!(ys.insert(11));
2060 assert!(xs.is_disjoint(&ys));
2061 assert!(ys.is_disjoint(&xs));
2062 assert!(xs.insert(7));
2063 assert!(xs.insert(19));
2064 assert!(xs.insert(4));
2065 assert!(ys.insert(2));
2066 assert!(ys.insert(-11));
2067 assert!(xs.is_disjoint(&ys));
2068 assert!(ys.is_disjoint(&xs));
2069 assert!(ys.insert(7));
2070 assert!(!xs.is_disjoint(&ys));
2071 assert!(!ys.is_disjoint(&xs));
2072 }
2073
2074 #[test]
2075 fn test_subset_and_superset() {
2076 let mut a = HashSet::new();
2077 assert!(a.insert(0));
2078 assert!(a.insert(5));
2079 assert!(a.insert(11));
2080 assert!(a.insert(7));
2081
2082 let mut b = HashSet::new();
2083 assert!(b.insert(0));
2084 assert!(b.insert(7));
2085 assert!(b.insert(19));
2086 assert!(b.insert(250));
2087 assert!(b.insert(11));
2088 assert!(b.insert(200));
2089
2090 assert!(!a.is_subset(&b));
2091 assert!(!a.is_superset(&b));
2092 assert!(!b.is_subset(&a));
2093 assert!(!b.is_superset(&a));
2094
2095 assert!(b.insert(5));
2096
2097 assert!(a.is_subset(&b));
2098 assert!(!a.is_superset(&b));
2099 assert!(!b.is_subset(&a));
2100 assert!(b.is_superset(&a));
2101 }
2102
2103 #[test]
2104 fn test_iterate() {
2105 let mut a = HashSet::new();
2106 for i in range(0u, 32) {
2107 assert!(a.insert(i));
2108 }
2109 let mut observed = 0;
2110 for k in a.iter() {
2111 observed |= 1 << *k;
2112 }
2113 assert_eq!(observed, 0xFFFF_FFFF);
2114 }
2115
2116 #[test]
2117 fn test_intersection() {
2118 let mut a = HashSet::new();
2119 let mut b = HashSet::new();
2120
2121 assert!(a.insert(11));
2122 assert!(a.insert(1));
2123 assert!(a.insert(3));
2124 assert!(a.insert(77));
2125 assert!(a.insert(103));
2126 assert!(a.insert(5));
2127 assert!(a.insert(-5));
2128
2129 assert!(b.insert(2));
2130 assert!(b.insert(11));
2131 assert!(b.insert(77));
2132 assert!(b.insert(-9));
2133 assert!(b.insert(-42));
2134 assert!(b.insert(5));
2135 assert!(b.insert(3));
2136
2137 let mut i = 0;
2138 let expected = [3, 5, 11, 77];
2139 for x in a.intersection(&b) {
2140 assert!(expected.contains(x));
2141 i += 1
2142 }
2143 assert_eq!(i, expected.len());
2144 }
2145
2146 #[test]
2147 fn test_difference() {
2148 let mut a = HashSet::new();
2149 let mut b = HashSet::new();
2150
2151 assert!(a.insert(1));
2152 assert!(a.insert(3));
2153 assert!(a.insert(5));
2154 assert!(a.insert(9));
2155 assert!(a.insert(11));
2156
2157 assert!(b.insert(3));
2158 assert!(b.insert(9));
2159
2160 let mut i = 0;
2161 let expected = [1, 5, 11];
2162 for x in a.difference(&b) {
2163 assert!(expected.contains(x));
2164 i += 1
2165 }
2166 assert_eq!(i, expected.len());
2167 }
2168
2169 #[test]
2170 fn test_symmetric_difference() {
2171 let mut a = HashSet::new();
2172 let mut b = HashSet::new();
2173
2174 assert!(a.insert(1));
2175 assert!(a.insert(3));
2176 assert!(a.insert(5));
2177 assert!(a.insert(9));
2178 assert!(a.insert(11));
2179
2180 assert!(b.insert(-2));
2181 assert!(b.insert(3));
2182 assert!(b.insert(9));
2183 assert!(b.insert(14));
2184 assert!(b.insert(22));
2185
2186 let mut i = 0;
2187 let expected = [-2, 1, 5, 11, 14, 22];
2188 for x in a.symmetric_difference(&b) {
2189 assert!(expected.contains(x));
2190 i += 1
2191 }
2192 assert_eq!(i, expected.len());
2193 }
2194
2195 #[test]
2196 fn test_union() {
2197 let mut a = HashSet::new();
2198 let mut b = HashSet::new();
2199
2200 assert!(a.insert(1));
2201 assert!(a.insert(3));
2202 assert!(a.insert(5));
2203 assert!(a.insert(9));
2204 assert!(a.insert(11));
2205 assert!(a.insert(16));
2206 assert!(a.insert(19));
2207 assert!(a.insert(24));
2208
2209 assert!(b.insert(-2));
2210 assert!(b.insert(1));
2211 assert!(b.insert(5));
2212 assert!(b.insert(9));
2213 assert!(b.insert(13));
2214 assert!(b.insert(19));
2215
2216 let mut i = 0;
2217 let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
2218 for x in a.union(&b) {
2219 assert!(expected.contains(x));
2220 i += 1
2221 }
2222 assert_eq!(i, expected.len());
2223 }
2224
2225 #[test]
2226 fn test_from_iter() {
2227 let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
2228
2229 let set: HashSet<int> = xs.iter().map(|&x| x).collect();
2230
2231 for x in xs.iter() {
2232 assert!(set.contains(x));
2233 }
2234 }
2235
2236 #[test]
2237 fn test_move_iter() {
2238 let hs = {
2239 let mut hs = HashSet::new();
2240
2241 hs.insert('a');
2242 hs.insert('b');
2243
2244 hs
2245 };
2246
2247 let v = hs.move_iter().collect::<Vec<char>>();
2248 assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
2249 }
2250
2251 #[test]
2252 fn test_eq() {
2253 // These constants once happened to expose a bug in insert().
2254 // I'm keeping them around to prevent a regression.
2255 let mut s1 = HashSet::new();
2256
2257 s1.insert(1);
2258 s1.insert(2);
2259 s1.insert(3);
2260
2261 let mut s2 = HashSet::new();
2262
2263 s2.insert(1);
2264 s2.insert(2);
2265
2266 assert!(s1 != s2);
2267
2268 s2.insert(3);
2269
2270 assert_eq!(s1, s2);
2271 }
2272
2273 #[test]
2274 fn test_show() {
2275 let mut set: HashSet<int> = HashSet::new();
2276 let empty: HashSet<int> = HashSet::new();
2277
2278 set.insert(1);
2279 set.insert(2);
2280
2281 let set_str = format!("{}", set);
2282
2283 assert!(set_str == "{1, 2}".to_owned() || set_str == "{2, 1}".to_owned());
2284 assert_eq!(format!("{}", empty), "{}".to_owned());
2285 }
2286 }
2287
2288 #[cfg(test)]
2289 mod bench {
2290 extern crate test;
2291 use self::test::Bencher;
2292 use std::iter::{range_inclusive};
2293
2294 #[bench]
2295 fn new_drop(b : &mut Bencher) {
2296 use super::HashMap;
2297
2298 b.iter(|| {
2299 let m : HashMap<int, int> = HashMap::new();
2300 assert_eq!(m.len(), 0);
2301 })
2302 }
2303
2304 #[bench]
2305 fn new_insert_drop(b : &mut Bencher) {
2306 use super::HashMap;
2307
2308 b.iter(|| {
2309 let mut m = HashMap::new();
2310 m.insert(0, 0);
2311 assert_eq!(m.len(), 1);
2312 })
2313 }
2314
2315 #[bench]
2316 fn insert(b: &mut Bencher) {
2317 use super::HashMap;
2318
2319 let mut m = HashMap::new();
2320
2321 for i in range_inclusive(1, 1000) {
2322 m.insert(i, i);
2323 }
2324
2325 let mut k = 1001;
2326
2327 b.iter(|| {
2328 m.insert(k, k);
2329 k += 1;
2330 });
2331 }
2332
2333 #[bench]
2334 fn find_existing(b: &mut Bencher) {
2335 use super::HashMap;
2336
2337 let mut m = HashMap::new();
2338
2339 for i in range_inclusive(1, 1000) {
2340 m.insert(i, i);
2341 }
2342
2343 b.iter(|| {
2344 for i in range_inclusive(1, 1000) {
2345 m.contains_key(&i);
2346 }
2347 });
2348 }
2349
2350 #[bench]
2351 fn find_nonexisting(b: &mut Bencher) {
2352 use super::HashMap;
2353
2354 let mut m = HashMap::new();
2355
2356 for i in range_inclusive(1, 1000) {
2357 m.insert(i, i);
2358 }
2359
2360 b.iter(|| {
2361 for i in range_inclusive(1001, 2000) {
2362 m.contains_key(&i);
2363 }
2364 });
2365 }
2366
2367 #[bench]
2368 fn hashmap_as_queue(b: &mut Bencher) {
2369 use super::HashMap;
2370
2371 let mut m = HashMap::new();
2372
2373 for i in range_inclusive(1, 1000) {
2374 m.insert(i, i);
2375 }
2376
2377 let mut k = 1;
2378
2379 b.iter(|| {
2380 m.pop(&k);
2381 m.insert(k + 1000, k + 1000);
2382 k += 1;
2383 });
2384 }
2385
2386 #[bench]
2387 fn find_pop_insert(b: &mut Bencher) {
2388 use super::HashMap;
2389
2390 let mut m = HashMap::new();
2391
2392 for i in range_inclusive(1, 1000) {
2393 m.insert(i, i);
2394 }
2395
2396 let mut k = 1;
2397
2398 b.iter(|| {
2399 m.find(&(k + 400));
2400 m.find(&(k + 2000));
2401 m.pop(&k);
2402 m.insert(k + 1000, k + 1000);
2403 k += 1;
2404 })
2405 }
2406 }
libcollections/hashmap.rs:430:4-430:4 -struct- definition:
pub struct MutEntries<'a, K, V> {
table: &'a mut RawTable<K, V>,
idx: uint,
references:- 4401: pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
402: MutEntries { table: self, idx: 0, elems_seen: 0 }
403: }
--
1408: /// HashMap mutable values iterator
1409: pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
libcollections/hashmap.rs:1451:19-1451:19 -struct- definition:
pub struct HashSet<T, H = sip::SipHasher> {
map: HashMap<T, (), H>
}
references:- 32libcollections/hashmap.rs:151:4-151:4 -struct- definition:
pub struct SafeHash {
hash: u64,
}
references:- 26167: // bucket, but it won't be counted as empty!
168: EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
169: h => SafeHash { hash: h },
170: }
--
296: idx: idx,
297: hash: SafeHash { hash: full_hash },
298: nocopy: nocopy,
--
1185: fn insert_hashed_nocheck<'a>(
1186: &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
--
1236: /// `insert`, since the key will not be rehashed.
1237: fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
1238: let potential_new_size = self.table.size() + 1;
--
1412: pub type MoveEntries<K, V> =
1413: iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
libcollections/hashmap.rs:122:4-122:4 -struct- definition:
pub struct FullIndex {
idx: int,
hash: SafeHash,
references:- 16358: FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
359: }
--
835: /// search_hashed.
836: fn search(&self, k: &K) -> Option<table::FullIndex> {
837: self.search_hashed(&self.make_hash(k), k)
--
840: fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
841: let starting_probe = starting_index.raw_index();
--
1133: /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
1134: fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
1135: mut hash: table::SafeHash, mut k: K, mut v: V) {
libcollections/hashmap.rs:436:4-436:4 -struct- definition:
pub struct MoveEntries<K, V> {
table: RawTable<K, V>,
idx: uint,
references:- 4405: pub fn move_iter(self) -> MoveEntries<K, V> {
406: MoveEntries { table: self, idx: 0, elems_seen: 0 }
--
1412: pub type MoveEntries<K, V> =
1413: iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
libcollections/hashmap.rs:1609:28-1609:28 -NK_AS_STR_TODO- definition:
/// Set operations iterator
pub type SetAlgebraItems<'a, T, H> =
FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
references:- 51558: pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
1559: -> SetAlgebraItems<'a, T, H> {
1560: Repeat::new(other).zip(self.iter())
--
1567: pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
1568: -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
1569: self.iter().chain(other.difference(self))
libcollections/hashmap.rs:1440:21-1440:21 -NK_AS_STR_TODO- definition:
/// HashSet iterator
pub type SetItems<'a, K> =
iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
references:- 31531: /// Iterator element type is &'a T.
1532: pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
1533: self.map.keys()
--
1567: pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
1568: -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
1569: self.iter().chain(other.difference(self))
--
1611: FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
1612: Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
libcollections/hashmap.rs:173:4-173:4 -fn- definition:
fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
assert!(is_power_of_two(target_alignment));
(unrounded + target_alignment - 1) & !(target_alignment - 1)
references:- 3205: let vals_offset = round_up_to_next(end_of_keys, vals_align);
206: let end_of_vals = vals_offset + vals_size;
libcollections/hashmap.rs:106:4-106:4 -struct- definition:
pub struct RawTable<K, V> {
capacity: uint,
size: uint,
references:- 11257: RawTable {
258: capacity: capacity,
--
518: impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
519: fn clone(&self) -> RawTable<K, V> {
--
728: table: table::RawTable<K, V>,
libcollections/hashmap.rs:1411:26-1411:26 -NK_AS_STR_TODO- definition:
/// HashMap move iterator
pub type MoveEntries<K, V> =
iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
references:- 21354: /// calling this.
1355: pub fn move_iter(self) -> MoveEntries<K, V> {
1356: self.table.move_iter().map(|(_, k, v)| (k, v))
--
1445: pub type SetMoveItems<K> =
1446: iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
libcollections/hashmap.rs:735:70-735:70 -fn- definition:
/// Get the number of elements which will force the capacity to grow.
fn grow_at(capacity: uint, load_factor: Fraction) -> uint {
fraction_mul(capacity, load_factor)
references:- 21097: self.grow_at = grow_at(new_capacity, self.load_factor);
libcollections/hashmap.rs:424:4-424:4 -struct- definition:
pub struct Entries<'a, K, V> {
table: &'a RawTable<K, V>,
idx: uint,
references:- 41405: /// HashMap iterator
1406: pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
libcollections/hashmap.rs:1405:21-1405:21 -NK_AS_STR_TODO- definition:
/// HashMap iterator
pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
/// HashMap mutable values iterator
references:- 41340: /// Iterator element type is (&'a K, &'a V).
1341: pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
1342: self.table.iter()
--
1420: pub type Values<'a, K, V> =
1421: iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
--
1441: pub type SetItems<'a, K> =
1442: iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
libcollections/hashmap.rs:717:19-717:19 -struct- definition:
pub struct HashMap<K, V, H = sip::SipHasher> {
// All hashes are keyed on these values, to prevent hash collision attacks.
hasher: H,
references:- 27716: /// ```
718: pub struct HashMap<K, V, H = sip::SipHasher> {
--
1062: let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
1063: HashMap {
1064: hasher: hasher,
--
1399: impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
1400: fn default() -> HashMap<K, V, H> {
--
1423: impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
1424: fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
--
1452: pub struct HashSet<T, H = sip::SipHasher> {
1453: map: HashMap<T, (), H>
1454: }
libcollections/lru_cache.rs:
59: pub struct LruCache<K, V> {
60: map: HashMap<KeyRef<K>, Box<LruEntry<K, V>>>,
61: max_size: uint,
libcollections/hashmap.rs:
937: impl<K: TotalEq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
938: /// Clear the map, removing all key-value pairs.
libcollections/hashmap.rs:115:4-115:4 -struct- definition:
pub struct EmptyIndex {
idx: int,
nocopy: marker::NoCopy,
references:- 5382: (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
383: }
libcollections/hashmap.rs:576:38-576:38 -NK_AS_STR_TODO- definition:
// to u64s when we actually use them.
type Fraction = (u16, u16); // (numerator, denominator)
// multiplication by a fraction, in a way that won't generally overflow for
references:- 4735: /// Get the number of elements which will force the capacity to grow.
736: fn grow_at(capacity: uint, load_factor: Fraction) -> uint {
737: fraction_mul(capacity, load_factor)