Skip to content

Commit d65cdf7

Browse files
Avoid another HashMap
1 parent a3d63db commit d65cdf7

File tree

1 file changed

+31
-21
lines changed
  • compiler/rustc_query_system/src/query

1 file changed

+31
-21
lines changed

compiler/rustc_query_system/src/query/caches.rs

Lines changed: 31 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use std::fmt::Debug;
22
use std::hash::Hash;
3-
use std::sync::OnceLock;
4-
use std::sync::atomic::AtomicU32;
3+
use std::sync::{Mutex, OnceLock};
54

65
use rustc_data_structures::sharded::ShardedHashMap;
76
pub use rustc_data_structures::vec_cache::VecCache;
@@ -48,8 +47,7 @@ pub trait QueryCache: Sized {
4847
/// more specialized kinds of cache. Backed by a sharded hashmap.
4948
pub struct DefaultCache<K, V> {
5049
cache: ShardedHashMap<K, (V, DepNodeIndex)>,
51-
// FIXME: if perf is bad, try pushing this into `cache` with Option<V> or so.
52-
active: ShardedHashMap<K, u32>,
50+
active: Mutex<Vec<Option<K>>>,
5351
}
5452

5553
impl<K, V> Default for DefaultCache<K, V> {
@@ -72,25 +70,30 @@ where
7270
}
7371

7472
fn to_unique_index(&self, key: &Self::Key) -> usize {
75-
static ACTIVE_INDEX: AtomicU32 = AtomicU32::new(0);
76-
self.active
77-
.get_or_insert_with(*key, || {
78-
ACTIVE_INDEX.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
79-
})
80-
.try_into()
81-
.unwrap()
82-
}
83-
84-
fn to_key(&self, idx: usize) -> Self::Key {
85-
for shard in self.active.lock_shards() {
86-
for (k, v) in shard.iter() {
87-
if (*v as usize) == idx {
88-
return *k;
89-
}
73+
let mut guard = self.active.lock().unwrap();
74+
75+
for (idx, slot) in guard.iter_mut().enumerate() {
76+
if let Some(k) = slot
77+
&& k == key
78+
{
79+
// Return idx if we found the slot containing this key.
80+
return idx;
81+
} else if slot.is_none() {
82+
// If slot is empty, reuse it.
83+
*slot = Some(*key);
84+
return idx;
9085
}
9186
}
9287

93-
unreachable!("index not currently mapped")
88+
// If no slot currently contains our key, add a new slot.
89+
let idx = guard.len();
90+
guard.push(Some(*key));
91+
return idx;
92+
}
93+
94+
fn to_key(&self, idx: usize) -> Self::Key {
95+
let guard = self.active.lock().unwrap();
96+
guard[idx].expect("still present")
9497
}
9598

9699
#[inline]
@@ -101,7 +104,14 @@ where
101104

102105
// Make sure to do this second -- this ensures lookups return success prior to active
103106
// getting removed, helping avoiding assignment of multiple indices per logical key.
104-
self.active.remove(key);
107+
let mut guard = self.active.lock().unwrap();
108+
for slot in guard.iter_mut() {
109+
if let Some(k) = slot
110+
&& *k == key
111+
{
112+
*slot = None;
113+
}
114+
}
105115
}
106116

107117
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {

0 commit comments

Comments
 (0)