@@ -13,24 +13,22 @@ use crate::query::SerializedDepNodeIndex;
13
13
use crate :: query:: { QueryContext , QueryMap , QuerySideEffects , QueryStackFrame } ;
14
14
use crate :: values:: Value ;
15
15
use crate :: HandleCycleError ;
16
+ #[ cfg( parallel_compiler) ]
17
+ use rustc_data_structures:: cold_path;
16
18
use rustc_data_structures:: fingerprint:: Fingerprint ;
17
19
use rustc_data_structures:: fx:: FxHashMap ;
18
- use rustc_data_structures:: stack:: ensure_sufficient_stack;
19
- use rustc_data_structures:: sync:: Lock ;
20
- #[ cfg( parallel_compiler) ]
21
- use rustc_data_structures:: { cold_path, sharded:: Sharded } ;
22
- use rustc_data_structures:: profiling:: TimingGuard ;
23
20
use rustc_data_structures:: sharded:: Sharded ;
24
21
use rustc_data_structures:: stack:: ensure_sufficient_stack;
25
-
26
22
use rustc_data_structures:: sync:: Lock ;
23
+
27
24
use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
28
- use rustc_session :: Session ; use rustc_span:: { Span , DUMMY_SP } ;
25
+ use rustc_span:: { Span , DUMMY_SP } ;
29
26
use std:: cell:: Cell ;
30
27
use std:: collections:: hash_map:: Entry ;
31
28
use std:: fmt:: Debug ;
32
29
use std:: hash:: Hash ;
33
30
use std:: mem;
31
+ use std:: ops:: DerefMut ;
34
32
use thin_vec:: ThinVec ;
35
33
36
34
use super :: QueryConfig ;
@@ -226,7 +224,6 @@ where
226
224
227
225
#[ cold]
228
226
#[ inline( never) ]
229
- #[ cfg( not( parallel_compiler) ) ]
230
227
fn cycle_error < Q , Qcx > (
231
228
query : Q ,
232
229
qcx : Qcx ,
@@ -296,7 +293,10 @@ where
296
293
Qcx : QueryContext ,
297
294
{
298
295
let state = query. query_state ( qcx) ;
299
- /*// For the parallel compiler we need to check both the query cache and query state structures
296
+ let mut state_lock = state. active . get_shard_by_value ( & key) . lock ( ) ;
297
+ let lock = state_lock. deref_mut ( ) ;
298
+
299
+ // For the parallel compiler we need to check both the query cache and query state structures
300
300
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
301
301
// query is not still executing. Without checking the query cache here, we can end up
302
302
// re-executing the query since `try_start` only checks that the query is not currently
@@ -307,35 +307,44 @@ where
307
307
qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
308
308
return ( value, Some ( index) ) ;
309
309
}
310
- }*/
311
-
312
- let job = state. active . with_get_shard_by_value ( & key, |state_lock| {
313
- JobOwner :: < ' _ , Q :: Key , Qcx :: DepKind > :: try_start ( & qcx, state, state_lock, span, key)
314
- } ) ;
315
-
316
- match job {
317
- TryGetJob :: NotYetStarted ( job) => {
318
- let ( result, dep_node_index) = execute_job ( query, qcx, key. clone ( ) , dep_node, job. id ) ;
319
- let cache = query. query_cache ( qcx) ;
320
- if query. feedable ( ) {
321
- // We should not compute queries that also got a value via feeding.
322
- // This can't happen, as query feeding adds the very dependencies to the fed query
323
- // as its feeding query had. So if the fed query is red, so is its feeder, which will
324
- // get evaluated first, and re-feed the query.
325
- if let Some ( ( cached_result, _) ) = cache. lookup ( & key) {
326
- panic ! (
327
- "fed query later has its value computed. The already cached value: {cached_result:?}"
328
- ) ;
329
-
330
-
310
+ }
331
311
312
+ let current_job_id = qcx. current_query_job ( ) ;
332
313
314
+ match lock. entry ( key) {
315
+ Entry :: Vacant ( entry) => {
316
+ // Nothing has computed or is computing the query, so we start a new job and insert it in the
317
+ // state map.
318
+ let id = qcx. next_job_id ( ) ;
319
+ let job = QueryJob :: new ( id, span, current_job_id) ;
320
+ entry. insert ( QueryResult :: Started ( job) ) ;
333
321
322
+ // Drop the lock before we start executing the query
323
+ drop ( state_lock) ;
334
324
325
+ execute_job ( query, qcx, state, key, id, dep_node)
326
+ }
327
+ Entry :: Occupied ( mut entry) => {
328
+ match entry. get_mut ( ) {
329
+ #[ cfg( not( parallel_compiler) ) ]
330
+ QueryResult :: Started ( job) => {
331
+ let id = job. id ;
332
+ drop ( state_lock) ;
335
333
334
+ // If we are single-threaded we know that we have cycle error,
335
+ // so we just return the error.
336
+ cycle_error ( query, qcx, id, span)
336
337
}
337
338
#[ cfg( parallel_compiler) ]
338
339
QueryResult :: Started ( job) => {
340
+ if std:: intrinsics:: likely ( !rustc_data_structures:: sync:: active ( ) ) {
341
+ let id = job. id ;
342
+ drop ( state_lock) ;
343
+
344
+ // If we are single-threaded we know that we have cycle error,
345
+ // so we just return the error.
346
+ return cycle_error ( query, qcx, id, span) ;
347
+ }
339
348
// Get the latch out
340
349
let latch = job. latch ( ) ;
341
350
drop ( state_lock) ;
@@ -345,30 +354,41 @@ where
345
354
QueryResult :: Poisoned => FatalError . raise ( ) ,
346
355
}
347
356
}
348
- TryGetJob :: Cycle ( error) => {
349
- let result = mk_cycle ( qcx, error, query. handle_cycle_error ( ) ) ;
350
- ( result, None )
357
+ }
358
+ }
359
+
360
+ #[ inline( always) ]
361
+ fn execute_job < Q , Qcx > (
362
+ query : Q ,
363
+ qcx : Qcx ,
364
+ state : & QueryState < Q :: Key , Qcx :: DepKind > ,
365
+ key : Q :: Key ,
366
+ id : QueryJobId ,
367
+ dep_node : Option < DepNode < Qcx :: DepKind > > ,
368
+ ) -> ( Q :: Value , Option < DepNodeIndex > )
369
+ where
370
+ Q : QueryConfig < Qcx > ,
371
+ Qcx : QueryContext ,
372
+ {
373
+ // Use `JobOwner` so the query will be poisoned if executing it panics.
374
+ let job_owner = JobOwner { state, key } ;
375
+
376
+ let ( result, dep_node_index) = match qcx. dep_context ( ) . dep_graph ( ) . data ( ) {
377
+ None => execute_job_non_incr ( query, qcx, key, id) ,
378
+ Some ( data) => execute_job_incr ( query, qcx, data, key, dep_node, id) ,
379
+ } ;
380
+
381
+ let cache = query. query_cache ( qcx) ;
382
+ if query. feedable ( ) {
383
+ // We should not compute queries that also got a value via feeding.
384
+ // This can't happen, as query feeding adds the very dependencies to the fed query
385
+ // as its feeding query had. So if the fed query is red, so is its feeder, which will
386
+ // get evaluated first, and re-feed the query.
387
+ if let Some ( ( cached_result, _) ) = cache. lookup ( & key) {
388
+ panic ! (
389
+ "fed query later has its value computed. The already cached value: {cached_result:?}"
390
+ ) ;
351
391
}
352
- #[ cfg( parallel_compiler) ]
353
- TryGetJob :: JobWait ( current_job_id, query_blocked_prof_timer, latch) => {
354
- // With parallel queries we might just have to wait on some other
355
- // thread.
356
- let result = latch. wait_on ( current_job_id, span) ;
357
- match result {
358
- Ok ( ( ) ) => {
359
- let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key) else {
360
- panic ! ( "value must be in cache after waiting" )
361
- } ;
362
- qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
363
- query_blocked_prof_timer. finish_with_query_invocation_id ( index. into ( ) ) ;
364
-
365
- ( v, Some ( index) )
366
- }
367
- Err ( error) => {
368
- let result = mk_cycle ( qcx, error, query. handle_cycle_error ( ) ) ;
369
- ( result, None )
370
- }
371
- } }
372
392
}
373
393
job_owner. complete ( cache, result, dep_node_index) ;
374
394
0 commit comments