@@ -9,6 +9,7 @@ use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
99use rustc_index:: vec:: IndexVec ;
1010use rustc_serialize:: opaque:: { FileEncodeResult , FileEncoder } ;
1111use smallvec:: { smallvec, SmallVec } ;
12+ use std:: assert_matches:: assert_matches;
1213use std:: collections:: hash_map:: Entry ;
1314use std:: fmt:: Debug ;
1415use std:: hash:: Hash ;
@@ -165,7 +166,11 @@ impl<K: DepKind> DepGraph<K> {
165166 pub fn assert_ignored ( & self ) {
166167 if let Some ( ..) = self . data {
167168 K :: read_deps ( |task_deps| {
168- assert ! ( task_deps. is_none( ) , "expected no task dependency tracking" ) ;
169+ assert_matches ! (
170+ task_deps,
171+ TaskDepsRef :: Ignore ,
172+ "expected no task dependency tracking"
173+ ) ;
169174 } )
170175 }
171176 }
@@ -174,7 +179,7 @@ impl<K: DepKind> DepGraph<K> {
174179 where
175180 OP : FnOnce ( ) -> R ,
176181 {
177- K :: with_deps ( None , op)
182+ K :: with_deps ( TaskDepsRef :: Ignore , op)
178183 }
179184
180185 /// Used to wrap the deserialization of a query result from disk,
@@ -227,10 +232,7 @@ impl<K: DepKind> DepGraph<K> {
227232 where
228233 OP : FnOnce ( ) -> R ,
229234 {
230- let mut deps = TaskDeps :: default ( ) ;
231- deps. read_allowed = false ;
232- let deps = Lock :: new ( deps) ;
233- K :: with_deps ( Some ( & deps) , op)
235+ K :: with_deps ( TaskDepsRef :: Forbid , op)
234236 }
235237
236238 /// Starts a new dep-graph task. Dep-graph tasks are specified
@@ -313,10 +315,15 @@ impl<K: DepKind> DepGraph<K> {
313315 reads : SmallVec :: new ( ) ,
314316 read_set : Default :: default ( ) ,
315317 phantom_data : PhantomData ,
316- read_allowed : true ,
317318 } ) )
318319 } ;
319- let result = K :: with_deps ( task_deps. as_ref ( ) , || task ( cx, arg) ) ;
320+
321+ let task_deps_ref = match & task_deps {
322+ Some ( deps) => TaskDepsRef :: Allow ( deps) ,
323+ None => TaskDepsRef :: Ignore ,
324+ } ;
325+
326+ let result = K :: with_deps ( task_deps_ref, || task ( cx, arg) ) ;
320327 let edges = task_deps. map_or_else ( || smallvec ! [ ] , |lock| lock. into_inner ( ) . reads ) ;
321328
322329 let dcx = cx. dep_context ( ) ;
@@ -369,7 +376,7 @@ impl<K: DepKind> DepGraph<K> {
369376
370377 if let Some ( ref data) = self . data {
371378 let task_deps = Lock :: new ( TaskDeps :: default ( ) ) ;
372- let result = K :: with_deps ( Some ( & task_deps) , op) ;
379+ let result = K :: with_deps ( TaskDepsRef :: Allow ( & task_deps) , op) ;
373380 let task_deps = task_deps. into_inner ( ) ;
374381 let task_deps = task_deps. reads ;
375382
@@ -422,47 +429,47 @@ impl<K: DepKind> DepGraph<K> {
422429 pub fn read_index ( & self , dep_node_index : DepNodeIndex ) {
423430 if let Some ( ref data) = self . data {
424431 K :: read_deps ( |task_deps| {
425- if let Some ( task_deps) = task_deps {
426- let mut task_deps = task_deps. lock ( ) ;
427- let task_deps = & mut * task_deps;
428-
429- if !task_deps. read_allowed {
430- panic ! ( "Illegal read of: {:?}" , dep_node_index) ;
432+ let mut task_deps = match task_deps {
433+ TaskDepsRef :: Allow ( deps) => deps. lock ( ) ,
434+ TaskDepsRef :: Ignore => return ,
435+ TaskDepsRef :: Forbid => {
436+ panic ! ( "Illegal read of: {:?}" , dep_node_index)
431437 }
438+ } ;
439+ let task_deps = & mut * task_deps;
432440
433- if cfg ! ( debug_assertions) {
434- data. current . total_read_count . fetch_add ( 1 , Relaxed ) ;
435- }
441+ if cfg ! ( debug_assertions) {
442+ data. current . total_read_count . fetch_add ( 1 , Relaxed ) ;
443+ }
436444
437- // As long as we only have a low number of reads we can avoid doing a hash
438- // insert and potentially allocating/reallocating the hashmap
439- let new_read = if task_deps. reads . len ( ) < TASK_DEPS_READS_CAP {
440- task_deps. reads . iter ( ) . all ( |other| * other != dep_node_index)
441- } else {
442- task_deps. read_set . insert ( dep_node_index)
443- } ;
444- if new_read {
445- task_deps. reads . push ( dep_node_index) ;
446- if task_deps. reads . len ( ) == TASK_DEPS_READS_CAP {
447- // Fill `read_set` with what we have so far so we can use the hashset
448- // next time
449- task_deps. read_set . extend ( task_deps. reads . iter ( ) . copied ( ) ) ;
450- }
445+ // As long as we only have a low number of reads we can avoid doing a hash
446+ // insert and potentially allocating/reallocating the hashmap
447+ let new_read = if task_deps. reads . len ( ) < TASK_DEPS_READS_CAP {
448+ task_deps. reads . iter ( ) . all ( |other| * other != dep_node_index)
449+ } else {
450+ task_deps. read_set . insert ( dep_node_index)
451+ } ;
452+ if new_read {
453+ task_deps. reads . push ( dep_node_index) ;
454+ if task_deps. reads . len ( ) == TASK_DEPS_READS_CAP {
455+ // Fill `read_set` with what we have so far so we can use the hashset
456+ // next time
457+ task_deps. read_set . extend ( task_deps. reads . iter ( ) . copied ( ) ) ;
458+ }
451459
452- #[ cfg( debug_assertions) ]
453- {
454- if let Some ( target) = task_deps. node {
455- if let Some ( ref forbidden_edge) = data. current . forbidden_edge {
456- let src = forbidden_edge. index_to_node . lock ( ) [ & dep_node_index] ;
457- if forbidden_edge. test ( & src, & target) {
458- panic ! ( "forbidden edge {:?} -> {:?} created" , src, target)
459- }
460+ #[ cfg( debug_assertions) ]
461+ {
462+ if let Some ( target) = task_deps. node {
463+ if let Some ( ref forbidden_edge) = data. current . forbidden_edge {
464+ let src = forbidden_edge. index_to_node . lock ( ) [ & dep_node_index] ;
465+ if forbidden_edge. test ( & src, & target) {
466+ panic ! ( "forbidden edge {:?} -> {:?} created" , src, target)
460467 }
461468 }
462469 }
463- } else if cfg ! ( debug_assertions) {
464- data. current . total_duplicate_read_count . fetch_add ( 1 , Relaxed ) ;
465470 }
471+ } else if cfg ! ( debug_assertions) {
472+ data. current . total_duplicate_read_count . fetch_add ( 1 , Relaxed ) ;
466473 }
467474 } )
468475 }
@@ -1185,29 +1192,41 @@ impl<K: DepKind> CurrentDepGraph<K> {
11851192const TASK_DEPS_READS_CAP : usize = 8 ;
11861193type EdgesVec = SmallVec < [ DepNodeIndex ; TASK_DEPS_READS_CAP ] > ;
11871194
1188- pub struct TaskDeps < K > {
1195+ #[ derive( Debug , Clone , Copy ) ]
1196+ pub enum TaskDepsRef < ' a , K : DepKind > {
1197+ /// New dependencies can be added to the
1198+ /// `TaskDeps`. This is used when executing a 'normal' query
1199+ /// (no `eval_always` modifier)
1200+ Allow ( & ' a Lock < TaskDeps < K > > ) ,
1201+ /// New dependencies are ignored. This is used when
1202+ /// executing an `eval_always` query, since there's no
1203+ /// need to track dependencies for a query that's always
1204+ /// re-executed. This is also used for `dep_graph.with_ignore`
1205+ Ignore ,
1206+ /// Any attempt to add new dependencies will cause a panic.
1207+ /// This is used when decoding a query result from disk,
1208+ /// to ensure that the decoding process doesn't itself
1209+ /// require the execution of any queries.
1210+ Forbid ,
1211+ }
1212+
1213+ #[ derive( Debug ) ]
1214+ pub struct TaskDeps < K : DepKind > {
11891215 #[ cfg( debug_assertions) ]
11901216 node : Option < DepNode < K > > ,
11911217 reads : EdgesVec ,
11921218 read_set : FxHashSet < DepNodeIndex > ,
11931219 phantom_data : PhantomData < DepNode < K > > ,
1194- /// Whether or not we allow `DepGraph::read_index` to run.
1195- /// This is normally true, except inside `with_query_deserialization`,
1196- /// where it set to `false` to enforce that no new `DepNode` edges are
1197- /// created. See the documentation of `with_query_deserialization` for
1198- /// more details.
1199- read_allowed : bool ,
12001220}
12011221
1202- impl < K > Default for TaskDeps < K > {
1222+ impl < K : DepKind > Default for TaskDeps < K > {
12031223 fn default ( ) -> Self {
12041224 Self {
12051225 #[ cfg( debug_assertions) ]
12061226 node : None ,
12071227 reads : EdgesVec :: new ( ) ,
12081228 read_set : FxHashSet :: default ( ) ,
12091229 phantom_data : PhantomData ,
1210- read_allowed : true ,
12111230 }
12121231 }
12131232}
0 commit comments