Skip to content

Commit dc97188

Browse files
committed
add aslr support
1 parent 06381af commit dc97188

File tree

9 files changed

+257
-35
lines changed

9 files changed

+257
-35
lines changed

Cargo.lock

Lines changed: 45 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ name = "uefi"
3434
required-features = ["uefi_bin"]
3535

3636
[dependencies]
37-
xmas-elf = { version = "0.6.2", optional = true }
37+
xmas-elf = { version = "0.8.0", optional = true }
3838
x86_64 = { version = "0.14.7", optional = true, default-features = false, features = ["instructions", "inline_asm"] }
3939
usize_conversions = { version = "0.2.0", optional = true }
4040
bit_field = { version = "0.10.0", optional = true }
@@ -51,6 +51,9 @@ json = { version = "0.12.4", optional = true }
5151
rsdp = { version = "1.0.0", optional = true }
5252
fatfs = { version = "0.3.4", optional = true }
5353
gpt = { version = "2.0.0", optional = true }
54+
raw-cpuid = { version = "10.2.0", optional = true }
55+
rand = { version = "0.8.4", optional = true, default-features = false }
56+
rand_chacha = { version = "0.3.1", optional = true, default-features = false }
5457

5558
[dependencies.noto-sans-mono-bitmap]
5659
version = "0.1.2"
@@ -72,7 +75,8 @@ bios_bin = ["binary", "rsdp"]
7275
uefi_bin = ["binary", "uefi"]
7376
binary = [
7477
"llvm-tools-build", "x86_64", "toml", "xmas-elf", "usize_conversions", "log", "conquer-once",
75-
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2",
78+
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2", "raw-cpuid", "rand",
79+
"rand_chacha"
7680
]
7781

7882
[profile.dev]

build.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,8 @@ mod binary {
356356
pub map_page_table_recursively: bool,
357357
#[serde(default = "val_true")]
358358
pub map_framebuffer: bool,
359+
#[serde(default)]
360+
pub aslr: bool,
359361
pub kernel_stack_size: Option<AlignedAddress>,
360362
pub physical_memory_offset: Option<AlignedAddress>,
361363
pub recursive_index: Option<u16>,
@@ -376,6 +378,7 @@ mod binary {
376378
let map_physical_memory = self.map_physical_memory;
377379
let map_page_table_recursively = self.map_page_table_recursively;
378380
let map_framebuffer = self.map_framebuffer;
381+
let aslr = self.aslr;
379382
let kernel_stack_size = optional(self.kernel_stack_size);
380383
let physical_memory_offset = optional(self.physical_memory_offset);
381384
let recursive_index = optional(self.recursive_index);
@@ -389,6 +392,7 @@ mod binary {
389392
map_physical_memory: #map_physical_memory,
390393
map_page_table_recursively: #map_page_table_recursively,
391394
map_framebuffer: #map_framebuffer,
395+
aslr: #aslr,
392396
kernel_stack_size: #kernel_stack_size,
393397
physical_memory_offset: #physical_memory_offset,
394398
recursive_index: #recursive_index,

src/binary/entropy.rs

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng};
2+
use raw_cpuid::CpuId;
3+
use x86_64::instructions::{port::Port, random::RdRand};
4+
5+
/// Gather entropy from various sources to seed a RNG.
6+
pub fn build_rng() -> ChaCha20Rng {
7+
const ENTROPY_SOURCES: [fn() -> [u8; 32]; 3] = [rd_rand_entropy, tsc_entropy, pit_entropy];
8+
9+
// Collect entropy from different sources and xor them all together.
10+
let mut seed = [0; 32];
11+
for entropy_source in ENTROPY_SOURCES {
12+
let entropy = entropy_source();
13+
14+
for (seed, entropy) in seed.iter_mut().zip(entropy) {
15+
*seed ^= entropy;
16+
}
17+
}
18+
19+
// Construct the RNG.
20+
ChaCha20Rng::from_seed(seed)
21+
}
22+
23+
/// Gather entropy by requesting random numbers with `rdrand` instruction if it's available.
24+
///
25+
/// This function provides excellent entropy (unless you don't trust the CPU vendors).
26+
fn rd_rand_entropy() -> [u8; 32] {
27+
let mut entropy = [0; 32];
28+
29+
// Check if the CPU supports `RDRAND`.
30+
if let Some(rd_rand) = RdRand::new() {
31+
for i in 0..4 {
32+
let value = loop {
33+
if let Some(value) = rd_rand.get_u64() {
34+
break value;
35+
}
36+
};
37+
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
38+
}
39+
}
40+
41+
entropy
42+
}
43+
44+
/// Gather entropy by reading the current time with the `rdtsc` instruction if it's available.
45+
///
46+
/// This function doesn't provide particulary good entropy, but it's better than nothing.
47+
fn tsc_entropy() -> [u8; 32] {
48+
let mut entropy = [0; 32];
49+
50+
// Check if the CPU supports `RDTSC`.
51+
let cpu_id = CpuId::new();
52+
if let Some(feature_info) = cpu_id.get_feature_info() {
53+
if !feature_info.has_tsc() {
54+
for i in 0..4 {
55+
let value = unsafe {
56+
// SAFETY: We checked that the cpu supports `RDTSC` and we run in ring 0.
57+
core::arch::x86_64::_rdtsc()
58+
};
59+
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
60+
}
61+
}
62+
}
63+
64+
entropy
65+
}
66+
67+
/// Gather entropy by reading the current count of PIT channel 1-3.
68+
///
69+
/// This function doesn't provide particulary good entropy, but it's always available.
70+
fn pit_entropy() -> [u8; 32] {
71+
let mut entropy = [0; 32];
72+
73+
for (i, entropy_byte) in entropy.iter_mut().enumerate() {
74+
// Cycle through channels 1-3.
75+
let channel = i % 3;
76+
77+
let mut port = Port::<u8>::new(0x40 + channel as u16);
78+
let value = unsafe {
79+
// SAFETY: It's safe to read from ports 0x40-0x42.
80+
port.read()
81+
};
82+
83+
*entropy_byte = value;
84+
}
85+
86+
entropy
87+
}

src/binary/level_4_entries.rs

Lines changed: 57 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
use core::{alloc::Layout, convert::TryInto};
2+
use rand::distributions::{Distribution, Uniform};
3+
use rand_chacha::ChaCha20Rng;
24
use usize_conversions::IntoUsize;
35
use x86_64::{
46
structures::paging::{Page, PageTableIndex, Size4KiB},
@@ -7,15 +9,19 @@ use x86_64::{
79
use xmas_elf::program::ProgramHeader;
810

911
use crate::{
10-
binary::{MemoryRegion, CONFIG},
12+
binary::{entropy, MemoryRegion, CONFIG},
1113
BootInfo,
1214
};
1315

1416
/// Keeps track of used entries in a level 4 page table.
1517
///
1618
/// Useful for determining a free virtual memory block, e.g. for mapping additional data.
1719
pub struct UsedLevel4Entries {
18-
entry_state: [bool; 512], // whether an entry is in use by the kernel
20+
/// Whether an entry is in use by the kernel.
21+
entry_state: [bool; 512],
22+
/// A random number generator that should be used to generate random addresses or
23+
/// `None` if aslr is disabled.
24+
rng: Option<ChaCha20Rng>,
1925
}
2026

2127
impl UsedLevel4Entries {
@@ -25,6 +31,7 @@ impl UsedLevel4Entries {
2531
pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self {
2632
let mut used = UsedLevel4Entries {
2733
entry_state: [false; 512],
34+
rng: CONFIG.aslr.then(entropy::build_rng),
2835
};
2936

3037
used.entry_state[0] = true; // TODO: Can we do this dynamically?
@@ -104,23 +111,61 @@ impl UsedLevel4Entries {
104111
/// Since this method marks each returned index as used, it can be used multiple times
105112
/// to determine multiple unused virtual memory regions.
106113
pub fn get_free_entry(&mut self) -> PageTableIndex {
107-
let (idx, entry) = self
114+
// Create an iterator over all available p4 indices.
115+
let mut free_entries = self
108116
.entry_state
109-
.iter_mut()
117+
.iter()
118+
.copied()
110119
.enumerate()
111-
.find(|(_, &mut entry)| entry == false)
112-
.expect("no usable level 4 entries found");
120+
.filter(|(_, used)| !used)
121+
.map(|(idx, _)| idx);
122+
123+
// Choose the free entry index.
124+
let idx = if let Some(rng) = self.rng.as_mut() {
125+
// Count the entries and randomly choose an index in `[0..count)`.
126+
let count = free_entries.clone().count();
127+
if count == 0 {
128+
panic!("no usable level 4 entries found")
129+
}
130+
let distribution = Uniform::from(0..count);
131+
let idx = distribution.sample(rng);
132+
133+
// Get the index of the free entry.
134+
free_entries.nth(idx).unwrap()
135+
} else {
136+
// Choose the first index.
137+
free_entries
138+
.next()
139+
.expect("no usable level 4 entries found")
140+
};
141+
142+
// Mark the entry as used.
143+
self.entry_state[idx] = true;
113144

114-
*entry = true;
115145
PageTableIndex::new(idx.try_into().unwrap())
116146
}
117147

118-
/// Returns the virtual start address of an unused level 4 entry and marks it as used.
148+
/// Returns a virtual address in an unused level 4 entry and marks it as used.
119149
///
120-
/// This is a convenience method around [`get_free_entry`], so all of its docs applies here
150+
/// This functions call [`get_free_entry`] internally, so all of its docs applies here
121151
/// too.
122-
pub fn get_free_address(&mut self) -> VirtAddr {
123-
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
124-
.start_address()
152+
pub fn get_free_address(&mut self, size: u64, alignment: u64) -> VirtAddr {
153+
assert!(alignment.is_power_of_two());
154+
155+
let base =
156+
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
157+
.start_address();
158+
159+
let offset = if let Some(rng) = self.rng.as_mut() {
160+
// Choose a random offset.
161+
const LEVEL_4_SIZE: u64 = 4096 * 512 * 512 * 512;
162+
let end = LEVEL_4_SIZE - size;
163+
let uniform_range = Uniform::from(0..end / alignment);
164+
uniform_range.sample(rng) * alignment
165+
} else {
166+
0
167+
};
168+
169+
base + offset
125170
}
126171
}

src/binary/load_kernel.rs

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,28 @@ where
5252
}
5353

5454
let elf_file = ElfFile::new(bytes)?;
55+
for program_header in elf_file.program_iter() {
56+
program::sanity_check(program_header, &elf_file)?;
57+
}
5558

5659
let virtual_address_offset = match elf_file.header.pt2.type_().as_type() {
5760
header::Type::None => unimplemented!(),
5861
header::Type::Relocatable => unimplemented!(),
5962
header::Type::Executable => 0,
60-
header::Type::SharedObject => used_entries.get_free_address().as_u64(),
63+
header::Type::SharedObject => {
64+
// Find the highest virtual memory address and the biggest alignment.
65+
let load_program_headers = elf_file
66+
.program_iter()
67+
.filter(|h| matches!(h.get_type(), Ok(Type::Load)));
68+
let size = load_program_headers
69+
.clone()
70+
.map(|h| h.virtual_addr() + h.mem_size())
71+
.max()
72+
.unwrap_or(0);
73+
let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1);
74+
75+
used_entries.get_free_address(size, align).as_u64()
76+
}
6177
header::Type::Core => unimplemented!(),
6278
header::Type::ProcessorSpecific(_) => unimplemented!(),
6379
};
@@ -79,10 +95,6 @@ where
7995
}
8096

8197
fn load_segments(&mut self) -> Result<Option<TlsTemplate>, &'static str> {
82-
for program_header in self.elf_file.program_iter() {
83-
program::sanity_check(program_header, &self.elf_file)?;
84-
}
85-
8698
// Load the segments into virtual memory.
8799
let mut tls_template = None;
88100
for program_header in self.elf_file.program_iter() {

0 commit comments

Comments
 (0)