@@ -466,42 +466,8 @@ impl<K: DepKind> DepGraph<K> {
466
466
pub fn read_index ( & self , dep_node_index : DepNodeIndex ) {
467
467
if let Some ( ref data) = self . data {
468
468
K :: read_deps ( |task_deps| {
469
- match task_deps {
470
- TaskDepsRef :: Allow ( deps) => deps. with_lock ( |task_deps| {
471
- // As long as we only have a low number of reads we can avoid doing a hash
472
- // insert and potentially allocating/reallocating the hashmap
473
- let new_read = if task_deps. reads . len ( ) < TASK_DEPS_READS_CAP {
474
- task_deps. reads . iter ( ) . all ( |other| * other != dep_node_index)
475
- } else {
476
- task_deps. read_set . insert ( dep_node_index)
477
- } ;
478
- if new_read {
479
- task_deps. reads . push ( dep_node_index) ;
480
- if task_deps. reads . len ( ) == TASK_DEPS_READS_CAP {
481
- // Fill `read_set` with what we have so far so we can use the hashset
482
- // next time
483
- task_deps. read_set . extend ( task_deps. reads . iter ( ) . copied ( ) ) ;
484
- }
485
-
486
- #[ cfg( debug_assertions) ]
487
- {
488
- if let Some ( target) = task_deps. node {
489
- if let Some ( ref forbidden_edge) = data. current . forbidden_edge {
490
- let src =
491
- forbidden_edge. index_to_node . lock ( ) [ & dep_node_index] ;
492
- if forbidden_edge. test ( & src, & target) {
493
- panic ! (
494
- "forbidden edge {:?} -> {:?} created" ,
495
- src, target
496
- )
497
- }
498
- }
499
- }
500
- }
501
- } else if cfg ! ( debug_assertions) {
502
- data. current . total_duplicate_read_count . fetch_add ( 1 , Relaxed ) ;
503
- }
504
- } ) ,
469
+ let mut task_deps = match task_deps {
470
+ TaskDepsRef :: Allow ( deps) => deps. lock ( ) ,
505
471
TaskDepsRef :: EvalAlways => {
506
472
// We don't need to record dependencies of eval_always
507
473
// queries. They are re-evaluated unconditionally anyway.
@@ -512,6 +478,41 @@ impl<K: DepKind> DepGraph<K> {
512
478
panic ! ( "Illegal read of: {dep_node_index:?}" )
513
479
}
514
480
} ;
481
+ let task_deps = & mut * task_deps;
482
+
483
+ if cfg ! ( debug_assertions) {
484
+ data. current . total_read_count . fetch_add ( 1 , Relaxed ) ;
485
+ }
486
+
487
+ // As long as we only have a low number of reads we can avoid doing a hash
488
+ // insert and potentially allocating/reallocating the hashmap
489
+ let new_read = if task_deps. reads . len ( ) < TASK_DEPS_READS_CAP {
490
+ task_deps. reads . iter ( ) . all ( |other| * other != dep_node_index)
491
+ } else {
492
+ task_deps. read_set . insert ( dep_node_index)
493
+ } ;
494
+ if new_read {
495
+ task_deps. reads . push ( dep_node_index) ;
496
+ if task_deps. reads . len ( ) == TASK_DEPS_READS_CAP {
497
+ // Fill `read_set` with what we have so far so we can use the hashset
498
+ // next time
499
+ task_deps. read_set . extend ( task_deps. reads . iter ( ) . copied ( ) ) ;
500
+ }
501
+
502
+ #[ cfg( debug_assertions) ]
503
+ {
504
+ if let Some ( target) = task_deps. node {
505
+ if let Some ( ref forbidden_edge) = data. current . forbidden_edge {
506
+ let src = forbidden_edge. index_to_node . lock ( ) [ & dep_node_index] ;
507
+ if forbidden_edge. test ( & src, & target) {
508
+ panic ! ( "forbidden edge {:?} -> {:?} created" , src, target)
509
+ }
510
+ }
511
+ }
512
+ }
513
+ } else if cfg ! ( debug_assertions) {
514
+ data. current . total_duplicate_read_count . fetch_add ( 1 , Relaxed ) ;
515
+ }
515
516
} )
516
517
}
517
518
}
@@ -573,9 +574,7 @@ impl<K: DepKind> DepGraph<K> {
573
574
574
575
let mut edges = SmallVec :: new ( ) ;
575
576
K :: read_deps ( |task_deps| match task_deps {
576
- TaskDepsRef :: Allow ( deps) => {
577
- deps. with_borrow ( |deps| edges. extend ( deps. reads . iter ( ) . copied ( ) ) )
578
- }
577
+ TaskDepsRef :: Allow ( deps) => edges. extend ( deps. lock ( ) . reads . iter ( ) . copied ( ) ) ,
579
578
TaskDepsRef :: EvalAlways => {
580
579
edges. push ( DepNodeIndex :: FOREVER_RED_NODE ) ;
581
580
}
@@ -628,7 +627,7 @@ impl<K: DepKind> DepGraphData<K> {
628
627
#[ inline]
629
628
pub fn dep_node_index_of_opt ( & self , dep_node : & DepNode < K > ) -> Option < DepNodeIndex > {
630
629
if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
631
- self . current . prev_index_to_index . with_borrow ( |nodes| nodes [ prev_index] )
630
+ self . current . prev_index_to_index . lock ( ) [ prev_index]
632
631
} else {
633
632
self . current
634
633
. new_node_to_index
@@ -668,7 +667,7 @@ impl<K: DepKind> DepGraphData<K> {
668
667
}
669
668
670
669
pub fn mark_debug_loaded_from_disk ( & self , dep_node : DepNode < K > ) {
671
- self . debug_loaded_from_disk . with_lock ( |node| node . insert ( dep_node) ) ;
670
+ self . debug_loaded_from_disk . lock ( ) . insert ( dep_node) ;
672
671
}
673
672
}
674
673
@@ -691,29 +690,25 @@ impl<K: DepKind> DepGraph<K> {
691
690
}
692
691
693
692
pub fn debug_was_loaded_from_disk ( & self , dep_node : DepNode < K > ) -> bool {
694
- self . data
695
- . as_ref ( )
696
- . unwrap ( )
697
- . debug_loaded_from_disk
698
- . with_borrow ( |node| node. contains ( & dep_node) )
693
+ self . data . as_ref ( ) . unwrap ( ) . debug_loaded_from_disk . lock ( ) . contains ( & dep_node)
699
694
}
700
695
701
696
#[ inline( always) ]
702
697
pub fn register_dep_node_debug_str < F > ( & self , dep_node : DepNode < K > , debug_str_gen : F )
703
- where
704
- F : FnOnce ( ) -> String ,
698
+ where
699
+ F : FnOnce ( ) -> String ,
705
700
{
706
701
let dep_node_debug = & self . data . as_ref ( ) . unwrap ( ) . dep_node_debug ;
707
702
708
- if dep_node_debug. with_borrow ( |node| node . contains_key ( & dep_node) ) {
703
+ if dep_node_debug. borrow ( ) . contains_key ( & dep_node) {
709
704
return ;
710
705
}
711
706
let debug_str = self . with_ignore ( debug_str_gen) ;
712
- dep_node_debug. with_lock ( |node| node . insert ( dep_node, debug_str) ) ;
707
+ dep_node_debug. borrow_mut ( ) . insert ( dep_node, debug_str) ;
713
708
}
714
709
715
710
pub fn dep_node_debug_str ( & self , dep_node : DepNode < K > ) -> Option < String > {
716
- self . data . as_ref ( ) ?. dep_node_debug . with_borrow ( |node| node . get ( & dep_node) . cloned ( ) )
711
+ self . data . as_ref ( ) ?. dep_node_debug . borrow ( ) . get ( & dep_node) . cloned ( )
717
712
}
718
713
719
714
fn node_color ( & self , dep_node : & DepNode < K > ) -> Option < DepNodeColor > {
@@ -1301,26 +1296,25 @@ impl<K: DepKind> CurrentDepGraph<K> {
1301
1296
) -> DepNodeIndex {
1302
1297
self . debug_assert_not_in_new_nodes ( prev_graph, prev_index) ;
1303
1298
1304
- self . prev_index_to_index . with_lock ( |prev_index_to_index| {
1305
- match prev_index_to_index[ prev_index] {
1306
- Some ( dep_node_index) => dep_node_index,
1307
- None => {
1308
- let key = prev_graph. index_to_node ( prev_index) ;
1309
- let edges = prev_graph
1310
- . edge_targets_from ( prev_index)
1311
- . iter ( )
1312
- . map ( |i| prev_index_to_index[ * i] . unwrap ( ) )
1313
- . collect ( ) ;
1314
- let fingerprint = prev_graph. fingerprint_by_index ( prev_index) ;
1315
- let dep_node_index =
1316
- self . encoder . borrow ( ) . send ( profiler, key, fingerprint, edges) ;
1317
- prev_index_to_index[ prev_index] = Some ( dep_node_index) ;
1318
- #[ cfg( debug_assertions) ]
1319
- self . record_edge ( dep_node_index, key, fingerprint) ;
1320
- dep_node_index
1321
- }
1299
+ let mut prev_index_to_index = self . prev_index_to_index . lock ( ) ;
1300
+
1301
+ match prev_index_to_index[ prev_index] {
1302
+ Some ( dep_node_index) => dep_node_index,
1303
+ None => {
1304
+ let key = prev_graph. index_to_node ( prev_index) ;
1305
+ let edges = prev_graph
1306
+ . edge_targets_from ( prev_index)
1307
+ . iter ( )
1308
+ . map ( |i| prev_index_to_index[ * i] . unwrap ( ) )
1309
+ . collect ( ) ;
1310
+ let fingerprint = prev_graph. fingerprint_by_index ( prev_index) ;
1311
+ let dep_node_index = self . encoder . borrow ( ) . send ( profiler, key, fingerprint, edges) ;
1312
+ prev_index_to_index[ prev_index] = Some ( dep_node_index) ;
1313
+ #[ cfg( debug_assertions) ]
1314
+ self . record_edge ( dep_node_index, key, fingerprint) ;
1315
+ dep_node_index
1322
1316
}
1323
- } )
1317
+ }
1324
1318
}
1325
1319
1326
1320
#[ inline]
0 commit comments