@@ -9,7 +9,7 @@ use std::{
9
9
} ;
10
10
11
11
use dashmap:: { DashMap , SharedValue } ;
12
- use hashbrown:: HashMap ;
12
+ use hashbrown:: { hash_map :: RawEntryMut , HashMap } ;
13
13
use once_cell:: sync:: OnceCell ;
14
14
use rustc_hash:: FxHasher ;
15
15
use triomphe:: Arc ;
@@ -26,56 +26,58 @@ pub struct Interned<T: Internable + ?Sized> {
26
26
27
27
impl < T : Internable > Interned < T > {
28
28
pub fn new ( obj : T ) -> Self {
29
- match Interned :: lookup ( & obj) {
30
- Ok ( this) => this,
31
- Err ( shard) => {
32
- let arc = Arc :: new ( obj) ;
33
- Self :: alloc ( arc, shard)
34
- }
29
+ let ( mut shard, hash) = Self :: select ( & obj) ;
30
+ // Atomically,
31
+ // - check if `obj` is already in the map
32
+ // - if so, clone its `Arc` and return it
33
+ // - if not, box it up, insert it, and return a clone
34
+ // This needs to be atomic (locking the shard) to avoid races with other thread, which could
35
+ // insert the same object between us looking it up and inserting it.
36
+ match shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash as u64 , & obj) {
37
+ RawEntryMut :: Occupied ( occ) => Self { arc : occ. key ( ) . clone ( ) } ,
38
+ RawEntryMut :: Vacant ( vac) => Self {
39
+ arc : vac
40
+ . insert_hashed_nocheck ( hash as u64 , Arc :: new ( obj) , SharedValue :: new ( ( ) ) )
41
+ . 0
42
+ . clone ( ) ,
43
+ } ,
35
44
}
36
45
}
37
46
}
38
47
39
- impl < T : Internable + ?Sized > Interned < T > {
40
- fn lookup ( obj : & T ) -> Result < Self , Guard < T > > {
41
- let storage = T :: storage ( ) . get ( ) ;
42
- let shard_idx = storage. determine_map ( obj) ;
43
- let shard = & storage. shards ( ) [ shard_idx] ;
44
- let shard = shard. write ( ) ;
45
-
48
+ impl Interned < str > {
49
+ pub fn new_str ( s : & str ) -> Self {
50
+ let ( mut shard, hash) = Self :: select ( s) ;
46
51
// Atomically,
47
52
// - check if `obj` is already in the map
48
53
// - if so, clone its `Arc` and return it
49
54
// - if not, box it up, insert it, and return a clone
50
55
// This needs to be atomic (locking the shard) to avoid races with other thread, which could
51
56
// insert the same object between us looking it up and inserting it.
52
-
53
- // FIXME: avoid double lookup/hashing by using raw entry API (once stable, or when
54
- // hashbrown can be plugged into dashmap)
55
- match shard. get_key_value ( obj) {
56
- Some ( ( arc, _) ) => Ok ( Self { arc : arc. clone ( ) } ) ,
57
- None => Err ( shard) ,
57
+ match shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash as u64 , s) {
58
+ RawEntryMut :: Occupied ( occ) => Self { arc : occ. key ( ) . clone ( ) } ,
59
+ RawEntryMut :: Vacant ( vac) => Self {
60
+ arc : vac
61
+ . insert_hashed_nocheck ( hash as u64 , Arc :: from ( s) , SharedValue :: new ( ( ) ) )
62
+ . 0
63
+ . clone ( ) ,
64
+ } ,
58
65
}
59
66
}
60
-
61
- fn alloc ( arc : Arc < T > , mut shard : Guard < T > ) -> Self {
62
- let arc2 = arc. clone ( ) ;
63
-
64
- shard. insert ( arc2, SharedValue :: new ( ( ) ) ) ;
65
-
66
- Self { arc }
67
- }
68
67
}
69
68
70
- impl Interned < str > {
71
- pub fn new_str ( s : & str ) -> Self {
72
- match Interned :: lookup ( s) {
73
- Ok ( this) => this,
74
- Err ( shard) => {
75
- let arc = Arc :: < str > :: from ( s) ;
76
- Self :: alloc ( arc, shard)
77
- }
78
- }
69
+ impl < T : Internable + ?Sized > Interned < T > {
70
+ #[ inline]
71
+ fn select ( obj : & T ) -> ( Guard < T > , u64 ) {
72
+ let storage = T :: storage ( ) . get ( ) ;
73
+ let hash = {
74
+ let mut hasher = std:: hash:: BuildHasher :: build_hasher ( storage. hasher ( ) ) ;
75
+ obj. hash ( & mut hasher) ;
76
+ hasher. finish ( )
77
+ } ;
78
+ let shard_idx = storage. determine_shard ( hash as usize ) ;
79
+ let shard = & storage. shards ( ) [ shard_idx] ;
80
+ ( shard. write ( ) , hash)
79
81
}
80
82
}
81
83
@@ -94,20 +96,17 @@ impl<T: Internable + ?Sized> Drop for Interned<T> {
94
96
impl < T : Internable + ?Sized > Interned < T > {
95
97
#[ cold]
96
98
fn drop_slow ( & mut self ) {
97
- let storage = T :: storage ( ) . get ( ) ;
98
- let shard_idx = storage. determine_map ( & self . arc ) ;
99
- let shard = & storage. shards ( ) [ shard_idx] ;
100
- let mut shard = shard. write ( ) ;
101
-
102
- // FIXME: avoid double lookup
103
- let ( arc, _) = shard. get_key_value ( & self . arc ) . expect ( "interned value removed prematurely" ) ;
99
+ let ( mut shard, hash) = Self :: select ( & self . arc ) ;
104
100
105
- if Arc :: count ( arc) != 2 {
101
+ if Arc :: count ( & self . arc ) != 2 {
106
102
// Another thread has interned another copy
107
103
return ;
108
104
}
109
105
110
- shard. remove ( & self . arc ) ;
106
+ match shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash, & self . arc ) {
107
+ RawEntryMut :: Occupied ( occ) => occ. remove ( ) ,
108
+ RawEntryMut :: Vacant ( _) => unreachable ! ( ) ,
109
+ } ;
111
110
112
111
// Shrink the backing storage if the shard is less than 50% occupied.
113
112
if shard. len ( ) * 2 < shard. capacity ( ) {
0 commit comments