@@ -83,9 +83,10 @@ use rustc::middle::region;
8383use rustc:: ty:: Ty ;
8484use rustc:: hir;
8585use rustc:: mir:: * ;
86- use syntax_pos:: { Span } ;
86+ use syntax_pos:: { Span , DUMMY_SP } ;
8787use rustc_data_structures:: fx:: FxHashMap ;
8888use std:: collections:: hash_map:: Entry ;
89+ use std:: mem;
8990
9091#[ derive( Debug ) ]
9192pub struct Scope < ' tcx > {
@@ -107,6 +108,8 @@ pub struct Scope<'tcx> {
107108 /// * polluting the cleanup MIR with StorageDead creates
108109 /// landing pads even though there's no actual destructors
109110 /// * freeing up stack space has no effect during unwinding
111+ /// Note that for generators we do emit StorageDeads, for the
112+ /// use of optimizations in the MIR generator transform.
110113 needs_cleanup : bool ,
111114
112115 /// set of places to drop when exiting this scope. This starts
@@ -466,10 +469,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
466469 /// This path terminates in GeneratorDrop. Returns the start of the path.
467470 /// None indicates there’s no cleanup to do at this point.
468471 pub fn generator_drop_cleanup ( & mut self ) -> Option < BasicBlock > {
469- if !self . scopes . iter ( ) . any ( |scope| scope. needs_cleanup ) {
470- return None ;
471- }
472-
473472 // Fill in the cache for unwinds
474473 self . diverge_cleanup_gen ( true ) ;
475474
@@ -480,7 +479,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
480479 let result = block;
481480
482481 while let Some ( scope) = scopes. next ( ) {
483- if !scope. needs_cleanup {
482+ if !scope. needs_cleanup && ! self . is_generator {
484483 continue ;
485484 }
486485
@@ -802,7 +801,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
802801
803802 for scope in self . scopes [ first_uncached..] . iter_mut ( ) {
804803 target = build_diverge_scope ( & mut self . cfg , scope. region_scope_span ,
805- scope, target, generator_drop) ;
804+ scope, target, generator_drop, self . is_generator ) ;
806805 }
807806
808807 target
@@ -900,12 +899,6 @@ fn build_scope_drops<'tcx>(
900899 // drops panic (panicking while unwinding will abort, so there's no need for
901900 // another set of arrows). The drops for the unwind path should have already
902901 // been generated by `diverge_cleanup_gen`.
903- //
904- // The code in this function reads from right to left.
905- // Storage dead drops have to be done left to right (since we can only push
906- // to the end of a Vec). So, we find the next drop and then call
907- // push_storage_deads which will iterate backwards through them so that
908- // they are added in the correct order.
909902
910903 let mut unwind_blocks = scope. drops . iter ( ) . rev ( ) . filter_map ( |drop_data| {
911904 if let DropKind :: Value { cached_block } = drop_data. kind {
@@ -936,11 +929,6 @@ fn build_scope_drops<'tcx>(
936929 block = next;
937930 }
938931 DropKind :: Storage => {
939- // We do not need to emit StorageDead for generator drops
940- if generator_drop {
941- continue
942- }
943-
944932 // Drop the storage for both value and storage drops.
945933 // Only temps and vars need their storage dead.
946934 match drop_data. location {
@@ -962,7 +950,8 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
962950 span : Span ,
963951 scope : & mut Scope < ' tcx > ,
964952 mut target : BasicBlock ,
965- generator_drop : bool )
953+ generator_drop : bool ,
954+ is_generator : bool )
966955 -> BasicBlock
967956{
968957 // Build up the drops in **reverse** order. The end result will
@@ -981,41 +970,90 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
981970 scope : source_scope
982971 } ;
983972
984- // Next, build up the drops. Here we iterate the vector in
973+ // We keep track of StorageDead statements to prepend to our current block
974+ // and store them here, in reverse order.
975+ let mut storage_deads = vec ! [ ] ;
976+
977+ let mut target_built_by_us = false ;
978+
979+ // Build up the drops. Here we iterate the vector in
985980 // *forward* order, so that we generate drops[0] first (right to
986981 // left in diagram above).
987982 for ( j, drop_data) in scope. drops . iter_mut ( ) . enumerate ( ) {
988983 debug ! ( "build_diverge_scope drop_data[{}]: {:?}" , j, drop_data) ;
989984 // Only full value drops are emitted in the diverging path,
990- // not StorageDead.
985+ // not StorageDead, except in the case of generators .
991986 //
992987 // Note: This may not actually be what we desire (are we
993988 // "freeing" stack storage as we unwind, or merely observing a
994989 // frozen stack)? In particular, the intent may have been to
995990 // match the behavior of clang, but on inspection eddyb says
996991 // this is not what clang does.
997- let cached_block = match drop_data. kind {
998- DropKind :: Value { ref mut cached_block } => cached_block. ref_mut ( generator_drop) ,
999- DropKind :: Storage => continue
1000- } ;
1001- target = if let Some ( cached_block) = * cached_block {
1002- cached_block
1003- } else {
1004- let block = cfg. start_new_cleanup_block ( ) ;
1005- cfg. terminate ( block, source_info ( drop_data. span ) ,
1006- TerminatorKind :: Drop {
1007- location : drop_data. location . clone ( ) ,
1008- target,
1009- unwind : None
1010- } ) ;
1011- * cached_block = Some ( block) ;
1012- block
992+ match drop_data. kind {
993+ DropKind :: Storage if is_generator => {
994+ // Only temps and vars need their storage dead.
995+ match drop_data. location {
996+ Place :: Base ( PlaceBase :: Local ( index) ) => {
997+ storage_deads. push ( Statement {
998+ source_info : source_info ( drop_data. span ) ,
999+ kind : StatementKind :: StorageDead ( index)
1000+ } ) ;
1001+ }
1002+ _ => unreachable ! ( ) ,
1003+ } ;
1004+ }
1005+ DropKind :: Storage => { }
1006+ DropKind :: Value { ref mut cached_block } => {
1007+ let cached_block = cached_block. ref_mut ( generator_drop) ;
1008+ target = if let Some ( cached_block) = * cached_block {
1009+ storage_deads. clear ( ) ;
1010+ target_built_by_us = false ;
1011+ cached_block
1012+ } else {
1013+ push_storage_deads (
1014+ cfg, & mut target, & mut storage_deads, target_built_by_us, source_scope) ;
1015+ let block = cfg. start_new_cleanup_block ( ) ;
1016+ cfg. terminate ( block, source_info ( drop_data. span ) ,
1017+ TerminatorKind :: Drop {
1018+ location : drop_data. location . clone ( ) ,
1019+ target,
1020+ unwind : None
1021+ } ) ;
1022+ * cached_block = Some ( block) ;
1023+ target_built_by_us = true ;
1024+ block
1025+ } ;
1026+ }
10131027 } ;
10141028 }
1015-
1029+ push_storage_deads ( cfg , & mut target , & mut storage_deads , target_built_by_us , source_scope ) ;
10161030 * scope. cached_unwind . ref_mut ( generator_drop) = Some ( target) ;
10171031
1032+ assert ! ( storage_deads. is_empty( ) ) ;
10181033 debug ! ( "build_diverge_scope({:?}, {:?}) = {:?}" , scope, span, target) ;
10191034
10201035 target
10211036}
1037+
1038+ fn push_storage_deads ( cfg : & mut CFG < ' tcx > ,
1039+ target : & mut BasicBlock ,
1040+ storage_deads : & mut Vec < Statement < ' tcx > > ,
1041+ target_built_by_us : bool ,
1042+ source_scope : SourceScope ) {
1043+ if storage_deads. is_empty ( ) { return ; }
1044+ if !target_built_by_us {
1045+ // We cannot add statements to an existing block, so we create a new
1046+ // block for our StorageDead statements.
1047+ let block = cfg. start_new_cleanup_block ( ) ;
1048+ let source_info = SourceInfo { span : DUMMY_SP , scope : source_scope } ;
1049+ cfg. terminate ( block, source_info, TerminatorKind :: Goto { target : * target } ) ;
1050+ * target = block;
1051+ }
1052+ let statements = & mut cfg. block_data_mut ( * target) . statements ;
1053+ storage_deads. reverse ( ) ;
1054+ debug ! ( "push_storage_deads({:?}), storage_deads={:?}, statements={:?}" ,
1055+ * target, storage_deads, statements) ;
1056+ storage_deads. append ( statements) ;
1057+ mem:: swap ( statements, storage_deads) ;
1058+ assert ! ( storage_deads. is_empty( ) ) ;
1059+ }
0 commit comments