@@ -325,104 +325,129 @@ mod imp {
325
325
} )
326
326
}
327
327
328
+ #[ forbid( unsafe_op_in_unsafe_fn) ]
328
329
unsafe fn install_main_guard ( ) -> Option < Range < usize > > {
329
330
let page_size = PAGE_SIZE . load ( Ordering :: Relaxed ) ;
330
- if cfg ! ( all( target_os = "linux" , not( target_env = "musl" ) ) ) {
331
- // Linux doesn't allocate the whole stack right away, and
332
- // the kernel has its own stack-guard mechanism to fault
333
- // when growing too close to an existing mapping. If we map
334
- // our own guard, then the kernel starts enforcing a rather
335
- // large gap above that, rendering much of the possible
336
- // stack space useless. See #43052.
337
- //
338
- // Instead, we'll just note where we expect rlimit to start
339
- // faulting, so our handler can report "stack overflow", and
340
- // trust that the kernel's own stack guard will work.
341
- let stackptr = get_stack_start_aligned ( ) ?;
342
- let stackaddr = stackptr. addr ( ) ;
343
- Some ( stackaddr - page_size..stackaddr)
344
- } else if cfg ! ( all( target_os = "linux" , target_env = "musl" ) ) {
345
- // For the main thread, the musl's pthread_attr_getstack
346
- // returns the current stack size, rather than maximum size
347
- // it can eventually grow to. It cannot be used to determine
348
- // the position of kernel's stack guard.
349
- None
350
- } else if cfg ! ( target_os = "freebsd" ) {
351
- // FreeBSD's stack autogrows, and optionally includes a guard page
352
- // at the bottom. If we try to remap the bottom of the stack
353
- // ourselves, FreeBSD's guard page moves upwards. So we'll just use
354
- // the builtin guard page.
355
- let stackptr = get_stack_start_aligned ( ) ?;
356
- let guardaddr = stackptr. addr ( ) ;
357
- // Technically the number of guard pages is tunable and controlled
358
- // by the security.bsd.stack_guard_page sysctl.
359
- // By default it is 1, checking once is enough since it is
360
- // a boot time config value.
361
- static PAGES : crate :: sync:: OnceLock < usize > = crate :: sync:: OnceLock :: new ( ) ;
362
-
363
- let pages = PAGES . get_or_init ( || {
364
- use crate :: sys:: weak:: dlsym;
365
- dlsym ! ( fn sysctlbyname( * const libc:: c_char, * mut libc:: c_void, * mut libc:: size_t, * const libc:: c_void, libc:: size_t) -> libc:: c_int) ;
366
- let mut guard: usize = 0 ;
367
- let mut size = crate :: mem:: size_of_val ( & guard) ;
368
- let oid = crate :: ffi:: CStr :: from_bytes_with_nul (
369
- b"security.bsd.stack_guard_page\0 " ,
370
- )
371
- . unwrap ( ) ;
372
- match sysctlbyname. get ( ) {
373
- Some ( fcn) => {
374
- if fcn ( oid. as_ptr ( ) , core:: ptr:: addr_of_mut!( guard) as * mut _ , core:: ptr:: addr_of_mut!( size) as * mut _ , crate :: ptr:: null_mut ( ) , 0 ) == 0 {
375
- guard
376
- } else {
377
- 1
378
- }
379
- } ,
380
- _ => 1 ,
381
- }
382
- } ) ;
383
- Some ( guardaddr..guardaddr + pages * page_size)
384
- } else if cfg ! ( any( target_os = "openbsd" , target_os = "netbsd" ) ) {
385
- // OpenBSD stack already includes a guard page, and stack is
386
- // immutable.
387
- // NetBSD stack includes the guard page.
388
- //
389
- // We'll just note where we expect rlimit to start
390
- // faulting, so our handler can report "stack overflow", and
391
- // trust that the kernel's own stack guard will work.
392
- let stackptr = get_stack_start_aligned ( ) ?;
393
- let stackaddr = stackptr. addr ( ) ;
394
- Some ( stackaddr - page_size..stackaddr)
395
- } else {
396
- // Reallocate the last page of the stack.
397
- // This ensures SIGBUS will be raised on
398
- // stack overflow.
399
- // Systems which enforce strict PAX MPROTECT do not allow
400
- // to mprotect() a mapping with less restrictive permissions
401
- // than the initial mmap() used, so we mmap() here with
402
- // read/write permissions and only then mprotect() it to
403
- // no permissions at all. See issue #50313.
404
- let stackptr = get_stack_start_aligned ( ) ?;
405
- let result = mmap64 (
406
- stackptr,
407
- page_size,
408
- PROT_READ | PROT_WRITE ,
409
- MAP_PRIVATE | MAP_ANON | MAP_FIXED ,
410
- -1 ,
411
- 0 ,
412
- ) ;
413
- if result != stackptr || result == MAP_FAILED {
414
- panic ! ( "failed to allocate a guard page: {}" , io:: Error :: last_os_error( ) ) ;
331
+
332
+ unsafe {
333
+ // this way someone on any unix-y OS can check that all these compile
334
+ if cfg ! ( all( target_os = "linux" , not( target_env = "musl" ) ) ) {
335
+ install_main_guard_linux ( page_size)
336
+ } else if cfg ! ( all( target_os = "linux" , target_env = "musl" ) ) {
337
+ install_main_guard_linux_musl ( page_size)
338
+ } else if cfg ! ( target_os = "freebsd" ) {
339
+ install_main_guard_freebsd ( page_size)
340
+ } else if cfg ! ( any( target_os = "netbsd" , target_os = "openbsd" ) ) {
341
+ install_main_guard_bsds ( page_size)
342
+ } else {
343
+ install_main_guard_default ( page_size)
415
344
}
345
+ }
346
+ }
347
+
348
+ unsafe fn install_main_guard_linux ( page_size : usize ) -> Option < Range < usize > > {
349
+ // Linux doesn't allocate the whole stack right away, and
350
+ // the kernel has its own stack-guard mechanism to fault
351
+ // when growing too close to an existing mapping. If we map
352
+ // our own guard, then the kernel starts enforcing a rather
353
+ // large gap above that, rendering much of the possible
354
+ // stack space useless. See #43052.
355
+ //
356
+ // Instead, we'll just note where we expect rlimit to start
357
+ // faulting, so our handler can report "stack overflow", and
358
+ // trust that the kernel's own stack guard will work.
359
+ let stackptr = get_stack_start_aligned ( ) ?;
360
+ let stackaddr = stackptr. addr ( ) ;
361
+ Some ( stackaddr - page_size..stackaddr)
362
+ }
416
363
417
- let result = mprotect ( stackptr, page_size, PROT_NONE ) ;
418
- if result != 0 {
419
- panic ! ( "failed to protect the guard page: {}" , io:: Error :: last_os_error( ) ) ;
364
+ unsafe fn install_main_guard_linux_musl ( _page_size : usize ) -> Option < Range < usize > > {
365
+ // For the main thread, the musl's pthread_attr_getstack
366
+ // returns the current stack size, rather than maximum size
367
+ // it can eventually grow to. It cannot be used to determine
368
+ // the position of kernel's stack guard.
369
+ None
370
+ }
371
+
372
+ unsafe fn install_main_guard_freebsd ( page_size : usize ) -> Option < Range < usize > > {
373
+ // FreeBSD's stack autogrows, and optionally includes a guard page
374
+ // at the bottom. If we try to remap the bottom of the stack
375
+ // ourselves, FreeBSD's guard page moves upwards. So we'll just use
376
+ // the builtin guard page.
377
+ let stackptr = get_stack_start_aligned ( ) ?;
378
+ let guardaddr = stackptr. addr ( ) ;
379
+ // Technically the number of guard pages is tunable and controlled
380
+ // by the security.bsd.stack_guard_page sysctl.
381
+ // By default it is 1, checking once is enough since it is
382
+ // a boot time config value.
383
+ static PAGES : crate :: sync:: OnceLock < usize > = crate :: sync:: OnceLock :: new ( ) ;
384
+
385
+ let pages = PAGES . get_or_init ( || {
386
+ use crate :: sys:: weak:: dlsym;
387
+ dlsym ! ( fn sysctlbyname( * const libc:: c_char, * mut libc:: c_void, * mut libc:: size_t, * const libc:: c_void, libc:: size_t) -> libc:: c_int) ;
388
+ let mut guard: usize = 0 ;
389
+ let mut size = crate :: mem:: size_of_val ( & guard) ;
390
+ let oid = crate :: ffi:: CStr :: from_bytes_with_nul (
391
+ b"security.bsd.stack_guard_page\0 " ,
392
+ )
393
+ . unwrap ( ) ;
394
+ match sysctlbyname. get ( ) {
395
+ Some ( fcn) => {
396
+ if fcn ( oid. as_ptr ( ) , core:: ptr:: addr_of_mut!( guard) as * mut _ , core:: ptr:: addr_of_mut!( size) as * mut _ , crate :: ptr:: null_mut ( ) , 0 ) == 0 {
397
+ guard
398
+ } else {
399
+ 1
400
+ }
401
+ } ,
402
+ _ => 1 ,
420
403
}
404
+ } ) ;
405
+ Some ( guardaddr..guardaddr + pages * page_size)
406
+ }
421
407
422
- let guardaddr = stackptr. addr ( ) ;
408
+ unsafe fn install_main_guard_bsds ( page_size : usize ) -> Option < Range < usize > > {
409
+ // OpenBSD stack already includes a guard page, and stack is
410
+ // immutable.
411
+ // NetBSD stack includes the guard page.
412
+ //
413
+ // We'll just note where we expect rlimit to start
414
+ // faulting, so our handler can report "stack overflow", and
415
+ // trust that the kernel's own stack guard will work.
416
+ let stackptr = get_stack_start_aligned ( ) ?;
417
+ let stackaddr = stackptr. addr ( ) ;
418
+ Some ( stackaddr - page_size..stackaddr)
419
+ }
423
420
424
- Some ( guardaddr..guardaddr + page_size)
421
+ unsafe fn install_main_guard_default ( page_size : usize ) -> Option < Range < usize > > {
422
+ // Reallocate the last page of the stack.
423
+ // This ensures SIGBUS will be raised on
424
+ // stack overflow.
425
+ // Systems which enforce strict PAX MPROTECT do not allow
426
+ // to mprotect() a mapping with less restrictive permissions
427
+ // than the initial mmap() used, so we mmap() here with
428
+ // read/write permissions and only then mprotect() it to
429
+ // no permissions at all. See issue #50313.
430
+ let stackptr = get_stack_start_aligned ( ) ?;
431
+ let result = mmap64 (
432
+ stackptr,
433
+ page_size,
434
+ PROT_READ | PROT_WRITE ,
435
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED ,
436
+ -1 ,
437
+ 0 ,
438
+ ) ;
439
+ if result != stackptr || result == MAP_FAILED {
440
+ panic ! ( "failed to allocate a guard page: {}" , io:: Error :: last_os_error( ) ) ;
441
+ }
442
+
443
+ let result = mprotect ( stackptr, page_size, PROT_NONE ) ;
444
+ if result != 0 {
445
+ panic ! ( "failed to protect the guard page: {}" , io:: Error :: last_os_error( ) ) ;
425
446
}
447
+
448
+ let guardaddr = stackptr. addr ( ) ;
449
+
450
+ Some ( guardaddr..guardaddr + page_size)
426
451
}
427
452
428
453
#[ cfg( any( target_os = "macos" , target_os = "openbsd" , target_os = "solaris" ) ) ]
0 commit comments