/
hashmap.rs
3060 lines (2654 loc) · 95.8 KB
/
hashmap.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15883
//! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
use clone::Clone;
use cmp::{max, Eq, Equiv, PartialEq};
use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
use default::Default;
use fmt::Show;
use fmt;
use hash::{Hash, Hasher, RandomSipHasher};
use iter::{Iterator, FilterMap, Chain, Repeat, Zip, Extendable};
use iter::{range, range_inclusive, FromIterator};
use iter;
use mem::replace;
use num;
use option::{Some, None, Option};
use result::{Ok, Err};
mod table {
use clone::Clone;
use cmp;
use hash::{Hash, Hasher};
use iter::range_step_inclusive;
use iter::{Iterator, range};
use kinds::marker;
use mem::{min_align_of, size_of};
use mem::{overwrite, transmute};
use num::{CheckedMul, is_power_of_two};
use ops::Drop;
use option::{Some, None, Option};
use ptr::RawPtr;
use ptr::set_memory;
use ptr;
use rt::heap::{allocate, deallocate};
static EMPTY_BUCKET: u64 = 0u64;
/// The raw hashtable, providing safe-ish access to the unzipped and highly
/// optimized arrays of hashes, keys, and values.
///
/// This design uses less memory and is a lot faster than the naive
/// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
/// option on every element, and we get a generally more cache-aware design.
///
/// Key invariants of this structure:
///
/// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
/// 'undefined' contents. Don't read from them. This invariant is
/// enforced outside this module with the `EmptyIndex`, `FullIndex`,
/// and `SafeHash` types.
///
/// - An `EmptyIndex` is only constructed for a bucket at an index with
/// a hash of EMPTY_BUCKET.
///
/// - A `FullIndex` is only constructed for a bucket at an index with a
/// non-EMPTY_BUCKET hash.
///
/// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
/// around hashes of zero by changing them to 0x8000_0000_0000_0000,
/// which will likely map to the same bucket, while not being confused
/// with "empty".
///
/// - All three "arrays represented by pointers" are the same length:
/// `capacity`. This is set at creation and never changes. The arrays
/// are unzipped to save space (we don't have to pay for the padding
/// between odd sized elements, such as in a map from u64 to u8), and
/// be more cache aware (scanning through 8 hashes brings in 2 cache
/// lines, since they're all right beside each other).
///
/// You can kind of think of this module/data structure as a safe wrapper
/// around just the "table" part of the hashtable. It enforces some
/// invariants at the type level and employs some performance trickery,
/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
///
/// FIXME(cgaebel):
///
/// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
/// isn't yet totally safe. There's a "known exploit" that you can create
/// multiple FullIndexes for a bucket, `take` one, and then still `take`
/// the other causing undefined behavior. Currently, there's no story
/// for how to protect against this statically. Therefore, there are asserts
/// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
/// With time, and when we're confident this works correctly, they should
/// be removed. Also, the bounds check in `peek` is especially painful,
/// as that's called in the innermost loops of the hashtable and has the
/// potential to be a major performance drain. Remove this too.
///
/// Or, better than remove, only enable these checks for debug builds.
/// There's currently no "debug-only" asserts in rust, so if you're reading
/// this and going "what? of course there are debug-only asserts!", then
/// please make this use them!
#[unsafe_no_drop_flag]
pub struct RawTable<K, V> {
capacity: uint,
size: uint,
hashes: *mut u64,
keys: *mut K,
vals: *mut V,
}
/// Represents an index into a `RawTable` with no key or value in it.
pub struct EmptyIndex {
idx: int,
nocopy: marker::NoCopy,
}
/// Represents an index into a `RawTable` with a key, value, and hash
/// in it.
pub struct FullIndex {
idx: int,
hash: SafeHash,
nocopy: marker::NoCopy,
}
impl FullIndex {
/// Since we get the hash for free whenever we check the bucket state,
/// this function is provided for fast access, letting us avoid
/// redundant trips back to the hashtable.
#[inline(always)]
pub fn hash(&self) -> SafeHash { self.hash }
/// Same comment as with `hash`.
#[inline(always)]
pub fn raw_index(&self) -> uint { self.idx as uint }
}
/// Represents the state of a bucket: it can either have a key/value
/// pair (be full) or not (be empty). You cannot `take` empty buckets,
/// and you cannot `put` into full buckets.
pub enum BucketState {
Empty(EmptyIndex),
Full(FullIndex),
}
/// A hash that is not zero, since we use a hash of zero to represent empty
/// buckets.
#[deriving(PartialEq)]
pub struct SafeHash {
hash: u64,
}
impl SafeHash {
/// Peek at the hash value, which is guaranteed to be non-zero.
#[inline(always)]
pub fn inspect(&self) -> u64 { self.hash }
}
/// We need to remove hashes of 0. That's reserved for empty buckets.
/// This function wraps up `hash_keyed` to be the only way outside this
/// module to generate a SafeHash.
pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
match hasher.hash(t) {
// This constant is exceedingly likely to hash to the same
// bucket, but it won't be counted as empty!
EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
h => SafeHash { hash: h },
}
}
fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
assert!(is_power_of_two(target_alignment));
(unrounded + target_alignment - 1) & !(target_alignment - 1)
}
#[test]
fn test_rounding() {
assert_eq!(round_up_to_next(0, 4), 0);
assert_eq!(round_up_to_next(1, 4), 4);
assert_eq!(round_up_to_next(2, 4), 4);
assert_eq!(round_up_to_next(3, 4), 4);
assert_eq!(round_up_to_next(4, 4), 4);
assert_eq!(round_up_to_next(5, 4), 8);
}
// Returns a tuple of (minimum required malloc alignment, hash_offset,
// key_offset, val_offset, array_size), from the start of a mallocated array.
fn calculate_offsets(
hash_size: uint, hash_align: uint,
keys_size: uint, keys_align: uint,
vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
let hash_offset = 0;
let end_of_hashes = hash_offset + hash_size;
let keys_offset = round_up_to_next(end_of_hashes, keys_align);
let end_of_keys = keys_offset + keys_size;
let vals_offset = round_up_to_next(end_of_keys, vals_align);
let end_of_vals = vals_offset + vals_size;
let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
(min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
}
#[test]
fn test_offset_calculation() {
assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
}
impl<K, V> RawTable<K, V> {
/// Does not initialize the buckets. The caller should ensure they,
/// at the very least, set every hash to EMPTY_BUCKET.
unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
let hashes_size = capacity.checked_mul(&size_of::<u64>())
.expect("capacity overflow");
let keys_size = capacity.checked_mul(&size_of::< K >())
.expect("capacity overflow");
let vals_size = capacity.checked_mul(&size_of::< V >())
.expect("capacity overflow");
// Allocating hashmaps is a little tricky. We need to allocate three
// arrays, but since we know their sizes and alignments up front,
// we just allocate a single array, and then have the subarrays
// point into it.
//
// This is great in theory, but in practice getting the alignment
// right is a little subtle. Therefore, calculating offsets has been
// factored out into a different function.
let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
calculate_offsets(
hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::< K >(),
vals_size, min_align_of::< V >());
let buffer = allocate(size, malloc_alignment);
let hashes = buffer.offset(hash_offset as int) as *mut u64;
let keys = buffer.offset(keys_offset as int) as *mut K;
let vals = buffer.offset(vals_offset as int) as *mut V;
RawTable {
capacity: capacity,
size: 0,
hashes: hashes,
keys: keys,
vals: vals,
}
}
/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
#[allow(experimental)]
pub fn new(capacity: uint) -> RawTable<K, V> {
unsafe {
let ret = RawTable::new_uninitialized(capacity);
set_memory(ret.hashes, 0u8, capacity);
ret
}
}
/// Reads a bucket at a given index, returning an enum indicating whether
/// there's anything there or not. You need to match on this enum to get
/// the appropriate types to pass on to most of the other functions in
/// this module.
pub fn peek(&self, index: uint) -> BucketState {
debug_assert!(index < self.capacity);
let idx = index as int;
let hash = unsafe { *self.hashes.offset(idx) };
let nocopy = marker::NoCopy;
match hash {
EMPTY_BUCKET =>
Empty(EmptyIndex {
idx: idx,
nocopy: nocopy
}),
full_hash =>
Full(FullIndex {
idx: idx,
hash: SafeHash { hash: full_hash },
nocopy: nocopy,
})
}
}
/// Gets references to the key and value at a given index.
pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
let idx = index.idx;
unsafe {
debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
(&*self.keys.offset(idx), &*self.vals.offset(idx))
}
}
/// Gets references to the key and value at a given index, with the
/// value's reference being mutable.
pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
let idx = index.idx;
unsafe {
debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
(&*self.keys.offset(idx), &mut *self.vals.offset(idx))
}
}
/// Read everything, mutably.
pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
-> (&'a mut SafeHash, &'a mut K, &'a mut V) {
let idx = index.idx;
unsafe {
debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
(transmute(self.hashes.offset(idx)),
&mut *self.keys.offset(idx), &mut *self.vals.offset(idx))
}
}
/// Puts a key and value pair, along with the key's hash, into a given
/// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
/// function, because that slot will no longer be empty when we return!
/// A FullIndex is returned for later use, pointing to the newly-filled
/// slot in the hashtable.
///
/// Use `make_hash` to construct a `SafeHash` to pass to this function.
pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
let idx = index.idx;
unsafe {
debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
*self.hashes.offset(idx) = hash.inspect();
overwrite(&mut *self.keys.offset(idx), k);
overwrite(&mut *self.vals.offset(idx), v);
}
self.size += 1;
FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
}
/// Removes a key and value from the hashtable.
///
/// This works similarly to `put`, building an `EmptyIndex` out of the
/// taken FullIndex.
pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
let idx = index.idx;
unsafe {
debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
*self.hashes.offset(idx) = EMPTY_BUCKET;
// Drop the mutable constraint.
let keys = self.keys as *const K;
let vals = self.vals as *const V;
let k = ptr::read(keys.offset(idx));
let v = ptr::read(vals.offset(idx));
self.size -= 1;
(EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
}
}
/// The hashtable's capacity, similar to a vector's.
pub fn capacity(&self) -> uint {
self.capacity
}
/// The number of elements ever `put` in the hashtable, minus the number
/// of elements ever `take`n.
pub fn size(&self) -> uint {
self.size
}
pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
Entries { table: self, idx: 0, elems_seen: 0 }
}
pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
MutEntries { table: self, idx: 0, elems_seen: 0 }
}
pub fn move_iter(self) -> MoveEntries<K, V> {
MoveEntries { table: self, idx: 0 }
}
}
// `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
// ensure that a `FullIndex` points to an index with a non-zero hash,
// and a `SafeHash` is just a `u64` with a different name, this is
// safe.
//
// This test ensures that a `SafeHash` really IS the same size as a
// `u64`. If you need to change the size of `SafeHash` (and
// consequently made this test fail), `read_all_mut` needs to be
// modified to no longer assume this.
#[test]
fn can_alias_safehash_as_u64() {
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
}
/// Iterator over shared references to entries in a table.
pub struct Entries<'a, K, V> {
table: &'a RawTable<K, V>,
idx: uint,
elems_seen: uint,
}
/// Iterator over mutable references to entries in a table.
pub struct MutEntries<'a, K, V> {
table: &'a mut RawTable<K, V>,
idx: uint,
elems_seen: uint,
}
/// Iterator over the entries in a table, consuming the table.
pub struct MoveEntries<K, V> {
table: RawTable<K, V>,
idx: uint
}
impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
fn next(&mut self) -> Option<(&'a K, &'a V)> {
while self.idx < self.table.capacity() {
let i = self.idx;
self.idx += 1;
match self.table.peek(i) {
Empty(_) => {},
Full(idx) => {
self.elems_seen += 1;
return Some(self.table.read(&idx));
}
}
}
None
}
fn size_hint(&self) -> (uint, Option<uint>) {
let size = self.table.size() - self.elems_seen;
(size, Some(size))
}
}
impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
while self.idx < self.table.capacity() {
let i = self.idx;
self.idx += 1;
match self.table.peek(i) {
Empty(_) => {},
// the transmute here fixes:
// error: lifetime of `self` is too short to guarantee its contents
// can be safely reborrowed
Full(idx) => unsafe {
self.elems_seen += 1;
return Some(transmute(self.table.read_mut(&idx)));
}
}
}
None
}
fn size_hint(&self) -> (uint, Option<uint>) {
let size = self.table.size() - self.elems_seen;
(size, Some(size))
}
}
impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
fn next(&mut self) -> Option<(SafeHash, K, V)> {
while self.idx < self.table.capacity() {
let i = self.idx;
self.idx += 1;
match self.table.peek(i) {
Empty(_) => {},
Full(idx) => {
let h = idx.hash();
let (_, k, v) = self.table.take(idx);
return Some((h, k, v));
}
}
}
None
}
fn size_hint(&self) -> (uint, Option<uint>) {
let size = self.table.size();
(size, Some(size))
}
}
impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
fn clone(&self) -> RawTable<K, V> {
unsafe {
let mut new_ht = RawTable::new_uninitialized(self.capacity());
for i in range(0, self.capacity()) {
match self.peek(i) {
Empty(_) => {
*new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
},
Full(idx) => {
let hash = idx.hash().inspect();
let (k, v) = self.read(&idx);
*new_ht.hashes.offset(i as int) = hash;
overwrite(&mut *new_ht.keys.offset(i as int), (*k).clone());
overwrite(&mut *new_ht.vals.offset(i as int), (*v).clone());
}
}
}
new_ht.size = self.size();
new_ht
}
}
}
#[unsafe_destructor]
impl<K, V> Drop for RawTable<K, V> {
fn drop(&mut self) {
// This is in reverse because we're likely to have partially taken
// some elements out with `.move_iter()` from the front.
for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
// Check if the size is 0, so we don't do a useless scan when
// dropping empty tables such as on resize.
if self.size == 0 { break }
match self.peek(i as uint) {
Empty(_) => {},
Full(idx) => { self.take(idx); }
}
}
assert_eq!(self.size, 0);
if self.hashes.is_not_null() {
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());
unsafe {
deallocate(self.hashes as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}
self.hashes = RawPtr::null();
}
}
}
}
static INITIAL_LOG2_CAP: uint = 5;
static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
/// The default behavior of HashMap implements a load factor of 90.9%.
/// This behavior is characterized by the following conditions:
///
/// - if `size * 1.1 < cap < size * 4` then shouldn't resize
/// - if `cap < minimum_capacity * 2` then shouldn't shrink
#[deriving(Clone)]
struct DefaultResizePolicy {
/// Doubled minimal capacity. The capacity must never drop below
/// the minimum capacity. (The check happens before the capacity
/// is potentially halved.)
minimum_capacity2: uint
}
impl DefaultResizePolicy {
fn new(new_capacity: uint) -> DefaultResizePolicy {
DefaultResizePolicy {
minimum_capacity2: new_capacity << 1
}
}
#[inline]
fn capacity_range(&self, new_size: uint) -> (uint, uint) {
((new_size * 11) / 10, max(new_size << 3, self.minimum_capacity2))
}
#[inline]
fn reserve(&mut self, new_capacity: uint) {
self.minimum_capacity2 = new_capacity << 1;
}
}
// The main performance trick in this hashmap is called Robin Hood Hashing.
// It gains its excellent performance from one key invariant:
//
// If an insertion collides with an existing element, and that elements
// "probe distance" (how far away the element is from its ideal location)
// is higher than how far we've already probed, swap the elements.
//
// This massively lowers variance in probe distance, and allows us to get very
// high load factors with good performance. The 90% load factor I use is rather
// conservative.
//
// > Why a load factor of approximately 90%?
//
// In general, all the distances to initial buckets will converge on the mean.
// At a load factor of α, the odds of finding the target bucket after k
// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
// this down to make the math easier on the CPU and avoid its FPU.
// Since on average we start the probing in the middle of a cache line, this
// strategy pulls in two cache lines of hashes on every lookup. I think that's
// pretty good, but if you want to trade off some space, it could go down to one
// cache line on average with an α of 0.84.
//
// > Wait, what? Where did you get 1-α^k from?
//
// On the first probe, your odds of a collision with an existing element is α.
// The odds of doing this twice in a row is approximately α^2. For three times,
// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
// colliding after k tries is 1-α^k.
//
// Future Improvements (FIXME!)
// ============================
//
// Allow the load factor to be changed dynamically and/or at initialization.
//
// Also, would it be possible for us to reuse storage when growing the
// underlying table? This is exactly the use case for 'realloc', and may
// be worth exploring.
//
// Future Optimizations (FIXME!)
// =============================
//
// The paper cited below mentions an implementation which keeps track of the
// distance-to-initial-bucket histogram. I'm suspicious of this approach because
// it requires maintaining an internal map. If this map were replaced with a
// hashmap, it would be faster, but now our data structure is self-referential
// and blows up. Also, this allows very good first guesses, but array accesses
// are no longer linear and in one direction, as we have now. There is also
// memory and cache pressure that this map would entail that would be very
// difficult to properly see in a microbenchmark.
//
// Another possible design choice that I made without any real reason is
// parameterizing the raw table over keys and values. Technically, all we need
// is the size and alignment of keys and values, and the code should be just as
// efficient (well, we might need one for power-of-two size and one for not...).
// This has the potential to reduce code bloat in rust executables, without
// really losing anything except 4 words (key size, key alignment, val size,
// val alignment) which can be passed in to every call of a `RawTable` function.
// This would definitely be an avenue worth exploring if people start complaining
// about the size of rust executables.
//
// There's also an "optimization" that has been omitted regarding how the
// hashtable allocates. The vector type has set the expectation that a hashtable
// which never has an element inserted should not allocate. I'm suspicious of
// implementing this for hashtables, because supporting it has no performance
// benefit over using an `Option<HashMap<K, V>>`, and is significantly more
// complicated.
/// A hash map implementation which uses linear probing with Robin
/// Hood bucket stealing.
///
/// The hashes are all keyed by the task-local random number generator
/// on creation by default, this means the ordering of the keys is
/// randomized, but makes the tables more resistant to
/// denial-of-service attacks (Hash DoS). This behaviour can be
/// overridden with one of the constructors.
///
/// It is required that the keys implement the `Eq` and `Hash` traits, although
/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
///
/// Relevant papers/articles:
///
/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
/// 2. Emmanuel Goossaert. ["Robin Hood
/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
///
/// # Example
///
/// ```
/// use std::collections::HashMap;
///
/// // type inference lets us omit an explicit type signature (which
/// // would be `HashMap<&str, &str>` in this example).
/// let mut book_reviews = HashMap::new();
///
/// // review some books.
/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
///
/// // check for a specific one.
/// if !book_reviews.contains_key(&("Les Misérables")) {
/// println!("We've got {} reviews, but Les Misérables ain't one.",
/// book_reviews.len());
/// }
///
/// // oops, this review has a lot of spelling mistakes, let's delete it.
/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
///
/// // look up the values associated with some keys.
/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
/// for book in to_find.iter() {
/// match book_reviews.find(book) {
/// Some(review) => println!("{}: {}", *book, *review),
/// None => println!("{} is unreviewed.", *book)
/// }
/// }
///
/// // iterate over everything.
/// for (book, review) in book_reviews.iter() {
/// println!("{}: \"{}\"", *book, *review);
/// }
/// ```
///
/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
/// We must also derive `PartialEq`.
///
/// ```
/// use std::collections::HashMap;
///
/// #[deriving(Hash, Eq, PartialEq, Show)]
/// struct Viking<'a> {
/// name: &'a str,
/// power: uint,
/// }
///
/// let mut vikings = HashMap::new();
///
/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
///
/// // Use derived implementation to print the vikings.
/// for (land, viking) in vikings.iter() {
/// println!("{} at {}", viking, land);
/// }
/// ```
#[deriving(Clone)]
pub struct HashMap<K, V, H = RandomSipHasher> {
// All hashes are keyed on these values, to prevent hash collision attacks.
hasher: H,
table: table::RawTable<K, V>,
// We keep this at the end since it might as well have tail padding.
resize_policy: DefaultResizePolicy,
}
impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
// Probe the `idx`th bucket for a given hash, returning the index of the
// target bucket.
//
// This exploits the power-of-two size of the hashtable. As long as this
// is always true, we can use a bitmask of cap-1 to do modular arithmetic.
//
// Prefer using this with increasing values of `idx` rather than repeatedly
// calling `probe_next`. This reduces data-dependencies between loops, which
// can help the optimizer, and certainly won't hurt it. `probe_next` is
// simply for convenience, and is no more efficient than `probe`.
fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
let hash_mask = self.table.capacity() - 1;
// So I heard a rumor that unsigned overflow is safe in rust..
((hash.inspect() as uint) + idx) & hash_mask
}
// Generate the next probe in a sequence. Prefer using 'probe' by itself,
// but this can sometimes be useful.
fn probe_next(&self, probe: uint) -> uint {
let hash_mask = self.table.capacity() - 1;
(probe + 1) & hash_mask
}
fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
table::make_hash(&self.hasher, x)
}
/// Get the distance of the bucket at the given index that it lies
/// from its 'ideal' location.
///
/// In the cited blog posts above, this is called the "distance to
/// initial bucket", or DIB.
fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
// where the hash of the element that happens to reside at
// `index_of_elem` tried to place itself first.
let first_probe_index = self.probe(&index_of_elem.hash(), 0);
let raw_index = index_of_elem.raw_index();
if first_probe_index <= raw_index {
// probe just went forward
raw_index - first_probe_index
} else {
// probe wrapped around the hashtable
raw_index + (self.table.capacity() - first_probe_index)
}
}
/// Search for a pre-hashed key.
fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
-> Option<table::FullIndex> {
for num_probes in range(0u, self.table.size()) {
let probe = self.probe(hash, num_probes);
let idx = match self.table.peek(probe) {
table::Empty(_) => return None, // hit an empty bucket
table::Full(idx) => idx
};
// We can finish the search early if we hit any bucket
// with a lower distance to initial bucket than we've probed.
if self.bucket_distance(&idx) < num_probes { return None }
// If the hash doesn't match, it can't be this one..
if *hash != idx.hash() { continue }
let (k, _) = self.table.read(&idx);
// If the key doesn't match, it can't be this one..
if !is_match(k) { continue }
return Some(idx);
}
return None
}
fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
self.search_hashed_generic(hash, |k_| *k == *k_)
}
fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
}
/// Search for a key, yielding the index if it's found in the hashtable.
/// If you already have the hash for the key lying around, use
/// search_hashed.
fn search(&self, k: &K) -> Option<table::FullIndex> {
self.search_hashed(&self.make_hash(k), k)
}
fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
let starting_probe = starting_index.raw_index();
let ending_probe = {
let mut probe = self.probe_next(starting_probe);
for _ in range(0u, self.table.size()) {
match self.table.peek(probe) {
table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
table::Full(idx) => {
// Bucket that isn't us, which has a non-zero probe distance.
// This isn't the ending index, so keep searching.
if self.bucket_distance(&idx) != 0 {
probe = self.probe_next(probe);
continue;
}
// if we do have a bucket_distance of zero, we're at the end
// of what we need to shift.
}
}
break;
}
probe
};
let (_, _, retval) = self.table.take(starting_index);
let mut probe = starting_probe;
let mut next_probe = self.probe_next(probe);
// backwards-shift all the elements after our newly-deleted one.
while next_probe != ending_probe {
match self.table.peek(next_probe) {
table::Empty(_) => {
// nothing to shift in. just empty it out.
match self.table.peek(probe) {
table::Empty(_) => {},
table::Full(idx) => { self.table.take(idx); }
}
},
table::Full(next_idx) => {
// something to shift. move it over!
let next_hash = next_idx.hash();
let (_, next_key, next_val) = self.table.take(next_idx);
match self.table.peek(probe) {
table::Empty(idx) => {
self.table.put(idx, next_hash, next_key, next_val);
},
table::Full(idx) => {
let (emptyidx, _, _) = self.table.take(idx);
self.table.put(emptyidx, next_hash, next_key, next_val);
}
}
}
}
probe = next_probe;
next_probe = self.probe_next(next_probe);
}
// Done the backwards shift, but there's still an element left!
// Empty it out.
match self.table.peek(probe) {
table::Empty(_) => {},
table::Full(idx) => { self.table.take(idx); }
}
// Now we're done all our shifting. Return the value we grabbed
// earlier.
return Some(retval);
}
}
impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
/// Return the number of elements in the map.
fn len(&self) -> uint { self.table.size() }
}
impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
/// Clear the map, removing all key-value pairs. Keeps the allocated memory
/// for reuse.
fn clear(&mut self) {
// Prevent reallocations from happening from now on. Makes it possible
// for the map to be reused but has a downside: reserves permanently.
self.resize_policy.reserve(self.table.size());
for i in range(0, self.table.capacity()) {
match self.table.peek(i) {
table::Empty(_) => {},
table::Full(idx) => { self.table.take(idx); }
}
}
}
}
impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
self.search(k).map(|idx| {
let (_, v) = self.table.read(&idx);
v
})
}
fn contains_key(&self, k: &K) -> bool {
self.search(k).is_some()
}
}
impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
match self.search(k) {
None => None,
Some(idx) => {
let (_, v) = self.table.read_mut(&idx);
Some(v)
}
}
}
fn swap(&mut self, k: K, v: V) -> Option<V> {
let hash = self.make_hash(&k);
let potential_new_size = self.table.size() + 1;
self.make_some_room(potential_new_size);
for dib in range_inclusive(0u, self.table.size()) {
let probe = self.probe(&hash, dib);
let idx = match self.table.peek(probe) {
table::Empty(idx) => {
// Found a hole!
self.table.put(idx, hash, k, v);
return None;
},
table::Full(idx) => idx
};
if idx.hash() == hash {
let (bucket_k, bucket_v) = self.table.read_mut(&idx);
if k == *bucket_k {