Skip to content

Commit 131ef97

Browse files
committed
Reduce the amount of unsafe code and mark handle_deadlock as unsafe
1 parent 3e83248 commit 131ef97

File tree

2 files changed

+53
-52
lines changed

2 files changed

+53
-52
lines changed

src/librustc/ty/maps/job.rs

+49-49
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,10 @@ impl<'tcx> QueryJob<'tcx> {
151151
#[cfg(parallel_queries)]
152152
self.latch.set();
153153
}
154+
155+
fn as_ptr(&self) -> *const QueryJob<'tcx> {
156+
self as *const _
157+
}
154158
}
155159

156160
#[cfg(parallel_queries)]
@@ -233,13 +237,9 @@ impl<'tcx> QueryLatch<'tcx> {
233237
}
234238
}
235239

236-
/// A pointer to an active query job. This is used to give query jobs an identity.
237-
#[cfg(parallel_queries)]
238-
type Ref<'tcx> = *const QueryJob<'tcx>;
239-
240240
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
241241
#[cfg(parallel_queries)]
242-
type Waiter<'tcx> = (Ref<'tcx>, usize);
242+
type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
243243

244244
/// Visits all the non-resumable and resumable waiters of a query.
245245
/// Only waiters in a query are visited.
@@ -251,25 +251,23 @@ type Waiter<'tcx> = (Ref<'tcx>, usize);
251251
/// required information to resume the waiter.
252252
/// If all `visit` calls returns None, this function also returns None.
253253
#[cfg(parallel_queries)]
254-
fn visit_waiters<'tcx, F>(query_ref: Ref<'tcx>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
254+
fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
255255
where
256-
F: FnMut(Span, Ref<'tcx>) -> Option<Option<Waiter<'tcx>>>
256+
F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>
257257
{
258-
let query = unsafe { &*query_ref };
259-
260258
// Visit the parent query which is a non-resumable waiter since it's on the same stack
261259
if let Some(ref parent) = query.parent {
262-
if let Some(cycle) = visit(query.info.span, &**parent as Ref) {
260+
if let Some(cycle) = visit(query.info.span, parent.clone()) {
263261
return Some(cycle);
264262
}
265263
}
266264

267265
// Visit the explict waiters which use condvars and are resumable
268266
for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
269267
if let Some(ref waiter_query) = waiter.query {
270-
if visit(waiter.span, &**waiter_query).is_some() {
268+
if visit(waiter.span, waiter_query.clone()).is_some() {
271269
// Return a value which indicates that this waiter can be resumed
272-
return Some(Some((query_ref, i)));
270+
return Some(Some((query.clone(), i)));
273271
}
274272
}
275273
}
@@ -281,12 +279,13 @@ where
281279
/// If a cycle is detected, this initial value is replaced with the span causing
282280
/// the cycle.
283281
#[cfg(parallel_queries)]
284-
fn cycle_check<'tcx>(query: Ref<'tcx>,
282+
fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
285283
span: Span,
286-
stack: &mut Vec<(Span, Ref<'tcx>)>,
287-
visited: &mut HashSet<Ref<'tcx>>) -> Option<Option<Waiter<'tcx>>> {
288-
if visited.contains(&query) {
289-
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
284+
stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
285+
visited: &mut HashSet<*const QueryJob<'tcx>>
286+
) -> Option<Option<Waiter<'tcx>>> {
287+
if visited.contains(&query.as_ptr()) {
288+
return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
290289
// We detected a query cycle, fix up the initial span and return Some
291290

292291
// Remove previous stack entries
@@ -300,8 +299,8 @@ fn cycle_check<'tcx>(query: Ref<'tcx>,
300299
}
301300

302301
// Mark this query is visited and add it to the stack
303-
visited.insert(query);
304-
stack.push((span, query));
302+
visited.insert(query.as_ptr());
303+
stack.push((span, query.clone()));
305304

306305
// Visit all the waiters
307306
let r = visit_waiters(query, |span, successor| {
@@ -320,18 +319,21 @@ fn cycle_check<'tcx>(query: Ref<'tcx>,
320319
/// from `query` without going through any of the queries in `visited`.
321320
/// This is achieved with a depth first search.
322321
#[cfg(parallel_queries)]
323-
fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet<Ref<'tcx>>) -> bool {
322+
fn connected_to_root<'tcx>(
323+
query: Lrc<QueryJob<'tcx>>,
324+
visited: &mut HashSet<*const QueryJob<'tcx>>
325+
) -> bool {
324326
// We already visited this or we're deliberately ignoring it
325-
if visited.contains(&query) {
327+
if visited.contains(&query.as_ptr()) {
326328
return false;
327329
}
328330

329331
// This query is connected to the root (it has no query parent), return true
330-
if unsafe { (*query).parent.is_none() } {
332+
if query.parent.is_none() {
331333
return true;
332334
}
333335

334-
visited.insert(query);
336+
visited.insert(query.as_ptr());
335337

336338
let mut connected = false;
337339

@@ -351,7 +353,7 @@ fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet<Ref<'tcx>>) -
351353
/// the function returns false.
352354
#[cfg(parallel_queries)]
353355
fn remove_cycle<'tcx>(
354-
jobs: &mut Vec<Ref<'tcx>>,
356+
jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
355357
wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
356358
tcx: TyCtxt<'_, 'tcx, '_>
357359
) -> bool {
@@ -367,7 +369,7 @@ fn remove_cycle<'tcx>(
367369

368370
// Extract the spans and queries into separate arrays
369371
let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect();
370-
let queries = stack.iter().map(|e| e.1);
372+
let queries = stack.into_iter().map(|e| e.1);
371373

372374
// Shift the spans so that queries are matched with the span for their waitee
373375
let last = spans.pop().unwrap();
@@ -378,23 +380,25 @@ fn remove_cycle<'tcx>(
378380

379381
// Remove the queries in our cycle from the list of jobs to look at
380382
for r in &stack {
381-
jobs.remove_item(&r.1);
383+
if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) {
384+
jobs.remove(pos);
385+
}
382386
}
383387

384388
// Find the queries in the cycle which are
385389
// connected to queries outside the cycle
386-
let entry_points: Vec<Ref<'_>> = stack.iter().filter_map(|query| {
390+
let entry_points: Vec<Lrc<QueryJob<'tcx>>> = stack.iter().filter_map(|query| {
387391
// Mark all the other queries in the cycle as already visited
388392
let mut visited = HashSet::from_iter(stack.iter().filter_map(|q| {
389-
if q.1 != query.1 {
390-
Some(q.1)
393+
if q.1.as_ptr() != query.1.as_ptr() {
394+
Some(q.1.as_ptr())
391395
} else {
392396
None
393397
}
394398
}));
395399

396-
if connected_to_root(query.1, &mut visited) {
397-
Some(query.1)
400+
if connected_to_root(query.1.clone(), &mut visited) {
401+
Some(query.1.clone())
398402
} else {
399403
None
400404
}
@@ -403,39 +407,36 @@ fn remove_cycle<'tcx>(
403407
// Deterministically pick an entry point
404408
// FIXME: Sort this instead
405409
let mut hcx = tcx.create_stable_hashing_context();
406-
let entry_point = *entry_points.iter().min_by_key(|&&q| {
410+
let entry_point = entry_points.iter().min_by_key(|q| {
407411
let mut stable_hasher = StableHasher::<u64>::new();
408-
unsafe { (*q).info.query.hash_stable(&mut hcx, &mut stable_hasher); }
412+
q.info.query.hash_stable(&mut hcx, &mut stable_hasher);
409413
stable_hasher.finish()
410-
}).unwrap();
414+
}).unwrap().as_ptr();
411415

412416
// Shift the stack until our entry point is first
413-
while stack[0].1 != entry_point {
417+
while stack[0].1.as_ptr() != entry_point {
414418
let last = stack.pop().unwrap();
415419
stack.insert(0, last);
416420
}
417421

418422
// Create the cycle error
419423
let mut error = CycleError {
420424
usage: None,
421-
cycle: stack.iter().map(|&(s, q)| QueryInfo {
425+
cycle: stack.iter().map(|&(s, ref q)| QueryInfo {
422426
span: s,
423-
query: unsafe { (*q).info.query.clone() },
427+
query: q.info.query.clone(),
424428
} ).collect(),
425429
};
426430

427431
// We unwrap `waiter` here since there must always be one
428432
// edge which is resumeable / waited using a query latch
429433
let (waitee_query, waiter_idx) = waiter.unwrap();
430-
let waitee_query = unsafe { &*waitee_query };
431434

432435
// Extract the waiter we want to resume
433436
let waiter = waitee_query.latch.extract_waiter(waiter_idx);
434437

435438
// Set the cycle error so it will be picked up when resumed
436-
unsafe {
437-
*waiter.cycle.lock() = Some(error);
438-
}
439+
*waiter.cycle.lock() = Some(error);
439440

440441
// Put the waiter on the list of things to resume
441442
wakelist.push(waiter);
@@ -448,8 +449,9 @@ fn remove_cycle<'tcx>(
448449

449450
/// Creates a new thread and forwards information in thread locals to it.
450451
/// The new thread runs the deadlock handler.
452+
/// Must only be called when a deadlock is about to happen.
451453
#[cfg(parallel_queries)]
452-
pub fn handle_deadlock() {
454+
pub unsafe fn handle_deadlock() {
453455
use syntax;
454456
use syntax_pos;
455457

@@ -458,25 +460,23 @@ pub fn handle_deadlock() {
458460
let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| {
459461
gcx_ptr as *const _
460462
});
461-
let gcx_ptr = unsafe { &*gcx_ptr };
463+
let gcx_ptr = &*gcx_ptr;
462464

463465
let syntax_globals = syntax::GLOBALS.with(|syntax_globals| {
464466
syntax_globals as *const _
465467
});
466-
let syntax_globals = unsafe { &*syntax_globals };
468+
let syntax_globals = &*syntax_globals;
467469

468470
let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| {
469471
syntax_pos_globals as *const _
470472
});
471-
let syntax_pos_globals = unsafe { &*syntax_pos_globals };
473+
let syntax_pos_globals = &*syntax_pos_globals;
472474
thread::spawn(move || {
473475
tls::GCX_PTR.set(gcx_ptr, || {
474476
syntax_pos::GLOBALS.set(syntax_pos_globals, || {
475477
syntax_pos::GLOBALS.set(syntax_pos_globals, || {
476478
tls::with_thread_locals(|| {
477-
unsafe {
478-
tls::with_global(|tcx| deadlock(tcx, &registry))
479-
}
479+
tls::with_global(|tcx| deadlock(tcx, &registry))
480480
})
481481
})
482482
})
@@ -497,7 +497,7 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) {
497497
});
498498

499499
let mut wakelist = Vec::new();
500-
let mut jobs: Vec<_> = tcx.maps.collect_active_jobs().iter().map(|j| &**j as Ref).collect();
500+
let mut jobs: Vec<_> = tcx.maps.collect_active_jobs();
501501

502502
let mut found_cycle = false;
503503

src/librustc_driver/driver.rs

+4-3
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,10 @@ pub fn spawn_thread_pool<F: FnOnce(config::Options) -> R + sync::Send, R: sync::
8585

8686
let gcx_ptr = &Lock::new(0);
8787

88-
let config = ThreadPoolBuilder::new().num_threads(Session::query_threads_from_opts(&opts))
89-
.deadlock_handler(ty::maps::handle_deadlock)
90-
.stack_size(16 * 1024 * 1024);
88+
let config = ThreadPoolBuilder::new()
89+
.num_threads(Session::query_threads_from_opts(&opts))
90+
.deadlock_handler(|| unsafe { ty::maps::handle_deadlock() })
91+
.stack_size(16 * 1024 * 1024);
9192

9293
let with_pool = move |pool: &ThreadPool| {
9394
pool.install(move || f(opts))

0 commit comments

Comments
 (0)