@@ -90,8 +90,8 @@ use rustc_index::{IndexSlice, IndexVec};
90
90
use rustc_middle:: middle:: region;
91
91
use rustc_middle:: mir:: * ;
92
92
use rustc_middle:: thir:: { Expr , LintLevel } ;
93
-
94
93
use rustc_middle:: ty:: Ty ;
94
+ use rustc_session:: lint:: Level ;
95
95
use rustc_span:: { Span , DUMMY_SP } ;
96
96
97
97
#[ derive( Debug ) ]
@@ -760,20 +760,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
760
760
) {
761
761
let ( current_root, parent_root) =
762
762
if self . tcx . sess . opts . unstable_opts . maximal_hir_to_mir_coverage {
763
- // Some consumers of rustc need to map MIR locations back to HIR nodes. Currently the
764
- // the only part of rustc that tracks MIR -> HIR is the `SourceScopeLocalData::lint_root`
765
- // field that tracks lint levels for MIR locations. Normally the number of source scopes
766
- // is limited to the set of nodes with lint annotations. The -Zmaximal-hir-to-mir-coverage
767
- // flag changes this behavior to maximize the number of source scopes, increasing the
768
- // granularity of the MIR->HIR mapping.
763
+ // Some consumers of rustc need to map MIR locations back to HIR nodes. Currently
764
+ // the the only part of rustc that tracks MIR -> HIR is the
765
+ // `SourceScopeLocalData::lint_root` field that tracks lint levels for MIR
766
+ // locations. Normally the number of source scopes is limited to the set of nodes
767
+ // with lint annotations. The -Zmaximal-hir-to-mir-coverage flag changes this
768
+ // behavior to maximize the number of source scopes, increasing the granularity of
769
+ // the MIR->HIR mapping.
769
770
( current_id, parent_id)
770
771
} else {
771
- // Use `maybe_lint_level_root_bounded` with `self.hir_id` as a bound
772
- // to avoid adding Hir dependencies on our parents.
773
- // We estimate the true lint roots here to avoid creating a lot of source scopes.
772
+ // Use `maybe_lint_level_root_bounded` to avoid adding Hir dependencies on our
773
+ // parents. We estimate the true lint roots here to avoid creating a lot of source
774
+ // scopes.
774
775
(
775
- self . tcx . maybe_lint_level_root_bounded ( current_id, self . hir_id ) ,
776
- self . tcx . maybe_lint_level_root_bounded ( parent_id, self . hir_id ) ,
776
+ self . maybe_lint_level_root_bounded ( current_id) ,
777
+ if parent_id == self . hir_id {
778
+ parent_id // this is very common
779
+ } else {
780
+ self . maybe_lint_level_root_bounded ( parent_id)
781
+ } ,
777
782
)
778
783
} ;
779
784
@@ -783,6 +788,50 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
783
788
}
784
789
}
785
790
791
+ /// Walks upwards from `orig_id` to find a node which might change lint levels with attributes.
792
+ /// It stops at `self.hir_id` and just returns it if reached.
793
+ fn maybe_lint_level_root_bounded ( & mut self , orig_id : HirId ) -> HirId {
794
+ // This assertion lets us just store `ItemLocalId` in the cache, rather
795
+ // than the full `HirId`.
796
+ assert_eq ! ( orig_id. owner, self . hir_id. owner) ;
797
+
798
+ let mut id = orig_id;
799
+ let hir = self . tcx . hir ( ) ;
800
+ loop {
801
+ if id == self . hir_id {
802
+ // This is a moderately common case, mostly hit for previously unseen nodes.
803
+ break ;
804
+ }
805
+
806
+ if hir. attrs ( id) . iter ( ) . any ( |attr| Level :: from_attr ( attr) . is_some ( ) ) {
807
+ // This is a rare case. It's for a node path that doesn't reach the root due to an
808
+ // intervening lint level attribute. This result doesn't get cached.
809
+ return id;
810
+ }
811
+
812
+ let next = hir. parent_id ( id) ;
813
+ if next == id {
814
+ bug ! ( "lint traversal reached the root of the crate" ) ;
815
+ }
816
+ id = next;
817
+
818
+ // This lookup is just an optimization; it can be removed without affecting
819
+ // functionality. It might seem strange to see this at the end of this loop, but the
820
+ // `orig_id` passed in to this function is almost always previously unseen, for which a
821
+ // lookup will be a miss. So we only do lookups for nodes up the parent chain, where
822
+ // cache lookups have a very high hit rate.
823
+ if self . lint_level_roots_cache . contains ( id. local_id ) {
824
+ break ;
825
+ }
826
+ }
827
+
828
+ // `orig_id` traced to `self_id`; record this fact. If `orig_id` is a leaf node it will
829
+ // rarely (never?) subsequently be searched for, but it's hard to know if that is the case.
830
+ // The performance wins from the cache all come from caching non-leaf nodes.
831
+ self . lint_level_roots_cache . insert ( orig_id. local_id ) ;
832
+ self . hir_id
833
+ }
834
+
786
835
/// Creates a new source scope, nested in the current one.
787
836
pub ( crate ) fn new_source_scope (
788
837
& mut self ,
0 commit comments