@@ -14,15 +14,15 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
14
14
use crate :: HandleCycleError ;
15
15
use rustc_data_structures:: fingerprint:: Fingerprint ;
16
16
use rustc_data_structures:: fx:: FxHashMap ;
17
- use rustc_data_structures:: sharded:: Sharded ;
17
+ use rustc_data_structures:: sharded:: { self , Sharded } ;
18
18
use rustc_data_structures:: stack:: ensure_sufficient_stack;
19
19
use rustc_data_structures:: sync:: Lock ;
20
20
#[ cfg( parallel_compiler) ]
21
21
use rustc_data_structures:: { cold_path, sync} ;
22
22
use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
23
23
use rustc_span:: { Span , DUMMY_SP } ;
24
24
use std:: cell:: Cell ;
25
- use std:: collections:: hash_map:: Entry ;
25
+ use std:: collections:: hash_map:: RawEntryMut ;
26
26
use std:: fmt:: Debug ;
27
27
use std:: hash:: Hash ;
28
28
use std:: mem;
@@ -142,7 +142,7 @@ where
142
142
{
143
143
/// Completes the query by updating the query cache with the `result`,
144
144
/// signals the waiter and forgets the JobOwner, so it won't poison the query
145
- fn complete < C > ( self , cache : & C , result : C :: Value , dep_node_index : DepNodeIndex )
145
+ fn complete < C > ( self , cache : & C , key_hash : u64 , result : C :: Value , dep_node_index : DepNodeIndex )
146
146
where
147
147
C : QueryCache < Key = K > ,
148
148
{
@@ -154,13 +154,17 @@ where
154
154
155
155
// Mark as complete before we remove the job from the active state
156
156
// so no other thread can re-execute this query.
157
- cache. complete ( key, result, dep_node_index) ;
157
+ cache. complete ( key, key_hash , result, dep_node_index) ;
158
158
159
159
let job = {
160
- let mut lock = state. active . lock_shard_by_value ( & key) ;
161
- match lock. remove ( & key) . unwrap ( ) {
162
- QueryResult :: Started ( job) => job,
163
- QueryResult :: Poisoned => panic ! ( ) ,
160
+ let mut lock = state. active . lock_shard_by_hash ( key_hash) ;
161
+
162
+ match lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash, & key) {
163
+ RawEntryMut :: Vacant ( _) => panic ! ( ) ,
164
+ RawEntryMut :: Occupied ( occupied) => match occupied. remove ( ) {
165
+ QueryResult :: Started ( job) => job,
166
+ QueryResult :: Poisoned => panic ! ( ) ,
167
+ } ,
164
168
}
165
169
} ;
166
170
@@ -209,7 +213,8 @@ where
209
213
C : QueryCache ,
210
214
Tcx : DepContext ,
211
215
{
212
- match cache. lookup ( & key) {
216
+ let key_hash = sharded:: make_hash ( key) ;
217
+ match cache. lookup ( & key, key_hash) {
213
218
Some ( ( value, index) ) => {
214
219
tcx. profiler ( ) . query_cache_hit ( index. into ( ) ) ;
215
220
tcx. dep_graph ( ) . read_index ( index) ;
@@ -246,6 +251,7 @@ fn wait_for_query<Q, Qcx>(
246
251
qcx : Qcx ,
247
252
span : Span ,
248
253
key : Q :: Key ,
254
+ key_hash : u64 ,
249
255
latch : QueryLatch ,
250
256
current : Option < QueryJobId > ,
251
257
) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -264,7 +270,7 @@ where
264
270
265
271
match result {
266
272
Ok ( ( ) ) => {
267
- let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key) else {
273
+ let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) else {
268
274
cold_path ( || {
269
275
// We didn't find the query result in the query cache. Check if it was
270
276
// poisoned due to a panic instead.
@@ -301,7 +307,8 @@ where
301
307
Qcx : QueryContext ,
302
308
{
303
309
let state = query. query_state ( qcx) ;
304
- let mut state_lock = state. active . lock_shard_by_value ( & key) ;
310
+ let key_hash = sharded:: make_hash ( & key) ;
311
+ let mut state_lock = state. active . lock_shard_by_hash ( key_hash) ;
305
312
306
313
// For the parallel compiler we need to check both the query cache and query state structures
307
314
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -310,28 +317,28 @@ where
310
317
// executing, but another thread may have already completed the query and stores it result
311
318
// in the query cache.
312
319
if cfg ! ( parallel_compiler) && qcx. dep_context ( ) . sess ( ) . threads ( ) > 1 {
313
- if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key) {
320
+ if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) {
314
321
qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
315
322
return ( value, Some ( index) ) ;
316
323
}
317
324
}
318
325
319
326
let current_job_id = qcx. current_query_job ( ) ;
320
327
321
- match state_lock. entry ( key) {
322
- Entry :: Vacant ( entry) => {
328
+ match state_lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash , & key) {
329
+ RawEntryMut :: Vacant ( entry) => {
323
330
// Nothing has computed or is computing the query, so we start a new job and insert it in the
324
331
// state map.
325
332
let id = qcx. next_job_id ( ) ;
326
333
let job = QueryJob :: new ( id, span, current_job_id) ;
327
- entry. insert ( QueryResult :: Started ( job) ) ;
334
+ entry. insert_hashed_nocheck ( key_hash , key , QueryResult :: Started ( job) ) ;
328
335
329
336
// Drop the lock before we start executing the query
330
337
drop ( state_lock) ;
331
338
332
- execute_job :: < _ , _ , INCR > ( query, qcx, state, key, id, dep_node)
339
+ execute_job :: < _ , _ , INCR > ( query, qcx, state, key, key_hash , id, dep_node)
333
340
}
334
- Entry :: Occupied ( mut entry) => {
341
+ RawEntryMut :: Occupied ( mut entry) => {
335
342
match entry. get_mut ( ) {
336
343
QueryResult :: Started ( job) => {
337
344
#[ cfg( parallel_compiler) ]
@@ -342,7 +349,15 @@ where
342
349
343
350
// Only call `wait_for_query` if we're using a Rayon thread pool
344
351
// as it will attempt to mark the worker thread as blocked.
345
- return wait_for_query ( query, qcx, span, key, latch, current_job_id) ;
352
+ return wait_for_query (
353
+ query,
354
+ qcx,
355
+ span,
356
+ key,
357
+ key_hash,
358
+ latch,
359
+ current_job_id,
360
+ ) ;
346
361
}
347
362
348
363
let id = job. id ;
@@ -364,6 +379,7 @@ fn execute_job<Q, Qcx, const INCR: bool>(
364
379
qcx : Qcx ,
365
380
state : & QueryState < Q :: Key > ,
366
381
key : Q :: Key ,
382
+ key_hash : u64 ,
367
383
id : QueryJobId ,
368
384
dep_node : Option < DepNode > ,
369
385
) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -395,7 +411,7 @@ where
395
411
// This can't happen, as query feeding adds the very dependencies to the fed query
396
412
// as its feeding query had. So if the fed query is red, so is its feeder, which will
397
413
// get evaluated first, and re-feed the query.
398
- if let Some ( ( cached_result, _) ) = cache. lookup ( & key) {
414
+ if let Some ( ( cached_result, _) ) = cache. lookup ( & key, key_hash ) {
399
415
let Some ( hasher) = query. hash_result ( ) else {
400
416
panic ! (
401
417
"no_hash fed query later has its value computed.\n \
@@ -427,7 +443,7 @@ where
427
443
}
428
444
}
429
445
}
430
- job_owner. complete ( cache, result, dep_node_index) ;
446
+ job_owner. complete ( cache, key_hash , result, dep_node_index) ;
431
447
432
448
( result, Some ( dep_node_index) )
433
449
}
@@ -826,7 +842,7 @@ where
826
842
{
827
843
// We may be concurrently trying both execute and force a query.
828
844
// Ensure that only one of them runs the query.
829
- if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key) {
845
+ if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key, sharded :: make_hash ( & key ) ) {
830
846
qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
831
847
return ;
832
848
}
0 commit comments