Skip to content

Commit 1b55244

Browse files
authored
Rollup merge of rust-lang#132389 - Zalathar:graph-tweaks, r=jieyouxu
coverage: Simplify parts of coverage graph creation This is a combination of three semi-related simplifications to how coverage graphs are created, grouped into one PR to avoid conflicts. There are no observable changes to the output of any of the coverage tests.
2 parents 5043c57 + 4a70f4b commit 1b55244

File tree

1 file changed

+79
-98
lines changed
  • compiler/rustc_mir_transform/src/coverage

1 file changed

+79
-98
lines changed

compiler/rustc_mir_transform/src/coverage/graph.rs

+79-98
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use std::cmp::Ordering;
22
use std::collections::VecDeque;
3-
use std::iter;
43
use std::ops::{Index, IndexMut};
4+
use std::{iter, mem, slice};
55

66
use rustc_data_structures::captures::Captures;
77
use rustc_data_structures::fx::FxHashSet;
@@ -127,10 +127,10 @@ impl CoverageGraph {
127127
let mut bcbs = IndexVec::<BasicCoverageBlock, _>::with_capacity(num_basic_blocks);
128128
let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
129129

130-
let mut add_basic_coverage_block = |basic_blocks: &mut Vec<BasicBlock>| {
130+
let mut flush_chain_into_new_bcb = |current_chain: &mut Vec<BasicBlock>| {
131131
// Take the accumulated list of blocks, leaving the vector empty
132132
// to be used by subsequent BCBs.
133-
let basic_blocks = std::mem::take(basic_blocks);
133+
let basic_blocks = mem::take(current_chain);
134134

135135
let bcb = bcbs.next_index();
136136
for &bb in basic_blocks.iter() {
@@ -141,48 +141,41 @@ impl CoverageGraph {
141141
bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable()
142142
});
143143
let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable };
144-
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
144+
debug!("adding {bcb:?}: {bcb_data:?}");
145145
bcbs.push(bcb_data);
146146
};
147147

148-
// Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
149-
// each block terminator's `successors()`. Coverage spans must map to actual source code,
150-
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
151-
// intentionally omits unwind paths.
152-
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
153-
// `catch_unwind()` handlers.
148+
// Traverse the MIR control-flow graph, accumulating chains of blocks
149+
// that can be combined into a single node in the coverage graph.
150+
// A depth-first search ensures that if two nodes can be chained
151+
// together, they will be adjacent in the traversal order.
154152

155153
// Accumulates a chain of blocks that will be combined into one BCB.
156-
let mut basic_blocks = Vec::new();
154+
let mut current_chain = vec![];
157155

158-
let filtered_successors = |bb| bcb_filtered_successors(mir_body[bb].terminator());
159-
for bb in short_circuit_preorder(mir_body, filtered_successors)
156+
let subgraph = CoverageRelevantSubgraph::new(&mir_body.basic_blocks);
157+
for bb in graph::depth_first_search(subgraph, mir::START_BLOCK)
160158
.filter(|&bb| mir_body[bb].terminator().kind != TerminatorKind::Unreachable)
161159
{
162-
// If the previous block can't be chained into `bb`, flush the accumulated
163-
// blocks into a new BCB, then start building the next chain.
164-
if let Some(&prev) = basic_blocks.last()
165-
&& (!filtered_successors(prev).is_chainable() || {
166-
// If `bb` has multiple predecessor blocks, or `prev` isn't
167-
// one of its predecessors, we can't chain and must flush.
168-
let predecessors = &mir_body.basic_blocks.predecessors()[bb];
169-
predecessors.len() > 1 || !predecessors.contains(&prev)
170-
})
171-
{
172-
debug!(
173-
terminator_kind = ?mir_body[prev].terminator().kind,
174-
predecessors = ?&mir_body.basic_blocks.predecessors()[bb],
175-
"can't chain from {prev:?} to {bb:?}"
176-
);
177-
add_basic_coverage_block(&mut basic_blocks);
160+
if let Some(&prev) = current_chain.last() {
161+
// Adding a block to a non-empty chain is allowed if the
162+
// previous block permits chaining, and the current block has
163+
// `prev` as its sole predecessor.
164+
let can_chain = subgraph.coverage_successors(prev).is_out_chainable()
165+
&& mir_body.basic_blocks.predecessors()[bb].as_slice() == &[prev];
166+
if !can_chain {
167+
// The current block can't be added to the existing chain, so
168+
// flush that chain into a new BCB, and start a new chain.
169+
flush_chain_into_new_bcb(&mut current_chain);
170+
}
178171
}
179172

180-
basic_blocks.push(bb);
173+
current_chain.push(bb);
181174
}
182175

183-
if !basic_blocks.is_empty() {
176+
if !current_chain.is_empty() {
184177
debug!("flushing accumulated blocks into one last BCB");
185-
add_basic_coverage_block(&mut basic_blocks);
178+
flush_chain_into_new_bcb(&mut current_chain);
186179
}
187180

188181
(bcbs, bb_to_bcb)
@@ -389,34 +382,28 @@ impl BasicCoverageBlockData {
389382
/// indicates whether that block can potentially be combined into the same BCB
390383
/// as its sole successor.
391384
#[derive(Clone, Copy, Debug)]
392-
enum CoverageSuccessors<'a> {
393-
/// The terminator has exactly one straight-line successor, so its block can
394-
/// potentially be combined into the same BCB as that successor.
395-
Chainable(BasicBlock),
396-
/// The block cannot be combined into the same BCB as its successor(s).
397-
NotChainable(&'a [BasicBlock]),
398-
/// Yield terminators are not chainable, and their execution count can also
399-
/// differ from the execution count of their out-edge.
400-
Yield(BasicBlock),
385+
struct CoverageSuccessors<'a> {
386+
/// Coverage-relevant successors of the corresponding terminator.
387+
/// There might be 0, 1, or multiple targets.
388+
targets: &'a [BasicBlock],
389+
/// `Yield` terminators are not chainable, because their sole out-edge is
390+
/// only followed if/when the generator is resumed after the yield.
391+
is_yield: bool,
401392
}
402393

403394
impl CoverageSuccessors<'_> {
404-
fn is_chainable(&self) -> bool {
405-
match self {
406-
Self::Chainable(_) => true,
407-
Self::NotChainable(_) => false,
408-
Self::Yield(_) => false,
409-
}
395+
/// If `false`, this terminator cannot be chained into another block when
396+
/// building the coverage graph.
397+
fn is_out_chainable(&self) -> bool {
398+
// If a terminator is out-summable and has exactly one out-edge, then
399+
// it is eligible to be chained into its successor block.
400+
self.is_out_summable() && self.targets.len() == 1
410401
}
411402

412403
/// Returns true if the terminator itself is assumed to have the same
413404
/// execution count as the sum of its out-edges (assuming no panics).
414405
fn is_out_summable(&self) -> bool {
415-
match self {
416-
Self::Chainable(_) => true,
417-
Self::NotChainable(_) => true,
418-
Self::Yield(_) => false,
419-
}
406+
!self.is_yield && !self.targets.is_empty()
420407
}
421408
}
422409

@@ -425,12 +412,7 @@ impl IntoIterator for CoverageSuccessors<'_> {
425412
type IntoIter = impl DoubleEndedIterator<Item = Self::Item>;
426413

427414
fn into_iter(self) -> Self::IntoIter {
428-
match self {
429-
Self::Chainable(bb) | Self::Yield(bb) => {
430-
Some(bb).into_iter().chain((&[]).iter().copied())
431-
}
432-
Self::NotChainable(bbs) => None.into_iter().chain(bbs.iter().copied()),
433-
}
415+
self.targets.iter().copied()
434416
}
435417
}
436418

@@ -440,48 +422,44 @@ impl IntoIterator for CoverageSuccessors<'_> {
440422
// `catch_unwind()` handlers.
441423
fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> CoverageSuccessors<'a> {
442424
use TerminatorKind::*;
443-
match terminator.kind {
425+
let mut is_yield = false;
426+
let targets = match &terminator.kind {
444427
// A switch terminator can have many coverage-relevant successors.
445-
// (If there is exactly one successor, we still treat it as not chainable.)
446-
SwitchInt { ref targets, .. } => CoverageSuccessors::NotChainable(targets.all_targets()),
428+
SwitchInt { targets, .. } => targets.all_targets(),
447429

448430
// A yield terminator has exactly 1 successor, but should not be chained,
449431
// because its resume edge has a different execution count.
450-
Yield { resume, .. } => CoverageSuccessors::Yield(resume),
432+
Yield { resume, .. } => {
433+
is_yield = true;
434+
slice::from_ref(resume)
435+
}
451436

452437
// These terminators have exactly one coverage-relevant successor,
453438
// and can be chained into it.
454439
Assert { target, .. }
455440
| Drop { target, .. }
456441
| FalseEdge { real_target: target, .. }
457442
| FalseUnwind { real_target: target, .. }
458-
| Goto { target } => CoverageSuccessors::Chainable(target),
443+
| Goto { target } => slice::from_ref(target),
459444

460445
// A call terminator can normally be chained, except when it has no
461446
// successor because it is known to diverge.
462-
Call { target: maybe_target, .. } => match maybe_target {
463-
Some(target) => CoverageSuccessors::Chainable(target),
464-
None => CoverageSuccessors::NotChainable(&[]),
465-
},
447+
Call { target: maybe_target, .. } => maybe_target.as_slice(),
466448

467449
// An inline asm terminator can normally be chained, except when it
468450
// diverges or uses asm goto.
469-
InlineAsm { ref targets, .. } => {
470-
if let [target] = targets[..] {
471-
CoverageSuccessors::Chainable(target)
472-
} else {
473-
CoverageSuccessors::NotChainable(targets)
474-
}
475-
}
451+
InlineAsm { targets, .. } => &targets,
476452

477453
// These terminators have no coverage-relevant successors.
478454
CoroutineDrop
479455
| Return
480456
| TailCall { .. }
481457
| Unreachable
482458
| UnwindResume
483-
| UnwindTerminate(_) => CoverageSuccessors::NotChainable(&[]),
484-
}
459+
| UnwindTerminate(_) => &[],
460+
};
461+
462+
CoverageSuccessors { targets, is_yield }
485463
}
486464

487465
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
@@ -616,28 +594,31 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> {
616594
}
617595
}
618596

619-
fn short_circuit_preorder<'a, 'tcx, F, Iter>(
620-
body: &'a mir::Body<'tcx>,
621-
filtered_successors: F,
622-
) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx>
623-
where
624-
F: Fn(BasicBlock) -> Iter,
625-
Iter: IntoIterator<Item = BasicBlock>,
626-
{
627-
let mut visited = BitSet::new_empty(body.basic_blocks.len());
628-
let mut worklist = vec![mir::START_BLOCK];
629-
630-
std::iter::from_fn(move || {
631-
while let Some(bb) = worklist.pop() {
632-
if !visited.insert(bb) {
633-
continue;
634-
}
635-
636-
worklist.extend(filtered_successors(bb));
597+
/// Wrapper around a [`mir::BasicBlocks`] graph that restricts each node's
598+
/// successors to only the ones considered "relevant" when building a coverage
599+
/// graph.
600+
#[derive(Clone, Copy)]
601+
struct CoverageRelevantSubgraph<'a, 'tcx> {
602+
basic_blocks: &'a mir::BasicBlocks<'tcx>,
603+
}
604+
impl<'a, 'tcx> CoverageRelevantSubgraph<'a, 'tcx> {
605+
fn new(basic_blocks: &'a mir::BasicBlocks<'tcx>) -> Self {
606+
Self { basic_blocks }
607+
}
637608

638-
return Some(bb);
639-
}
609+
fn coverage_successors(&self, bb: BasicBlock) -> CoverageSuccessors<'_> {
610+
bcb_filtered_successors(self.basic_blocks[bb].terminator())
611+
}
612+
}
613+
impl<'a, 'tcx> graph::DirectedGraph for CoverageRelevantSubgraph<'a, 'tcx> {
614+
type Node = BasicBlock;
640615

641-
None
642-
})
616+
fn num_nodes(&self) -> usize {
617+
self.basic_blocks.num_nodes()
618+
}
619+
}
620+
impl<'a, 'tcx> graph::Successors for CoverageRelevantSubgraph<'a, 'tcx> {
621+
fn successors(&self, bb: Self::Node) -> impl Iterator<Item = Self::Node> {
622+
self.coverage_successors(bb).into_iter()
623+
}
643624
}

0 commit comments

Comments
 (0)