Skip to content

Commit 0bff0aa

Browse files
surenbaghdasaryanakpm00
authored andcommitted
x86/mm: try VMA lock-based page fault handling first
Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 52f2386 commit 0bff0aa

File tree

2 files changed

+37
-0
lines changed

2 files changed

+37
-0
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ config X86_64
2727
# Options that are inherently 64-bit kernel only:
2828
select ARCH_HAS_GIGANTIC_PAGE
2929
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
30+
select ARCH_SUPPORTS_PER_VMA_LOCK
3031
select ARCH_USE_CMPXCHG_LOCKREF
3132
select HAVE_ARCH_SOFT_DIRTY
3233
select MODULES_USE_ELF_RELA

arch/x86/mm/fault.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/uaccess.h> /* faulthandler_disabled() */
2020
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
2121
#include <linux/mm_types.h>
22+
#include <linux/mm.h> /* find_and_lock_vma() */
2223

2324
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
2425
#include <asm/traps.h> /* dotraplinkage, ... */
@@ -1333,6 +1334,38 @@ void do_user_addr_fault(struct pt_regs *regs,
13331334
}
13341335
#endif
13351336

1337+
#ifdef CONFIG_PER_VMA_LOCK
1338+
if (!(flags & FAULT_FLAG_USER))
1339+
goto lock_mmap;
1340+
1341+
vma = lock_vma_under_rcu(mm, address);
1342+
if (!vma)
1343+
goto lock_mmap;
1344+
1345+
if (unlikely(access_error(error_code, vma))) {
1346+
vma_end_read(vma);
1347+
goto lock_mmap;
1348+
}
1349+
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
1350+
vma_end_read(vma);
1351+
1352+
if (!(fault & VM_FAULT_RETRY)) {
1353+
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
1354+
goto done;
1355+
}
1356+
count_vm_vma_lock_event(VMA_LOCK_RETRY);
1357+
1358+
/* Quick path to respond to signals */
1359+
if (fault_signal_pending(fault, regs)) {
1360+
if (!user_mode(regs))
1361+
kernelmode_fixup_or_oops(regs, error_code, address,
1362+
SIGBUS, BUS_ADRERR,
1363+
ARCH_DEFAULT_PKEY);
1364+
return;
1365+
}
1366+
lock_mmap:
1367+
#endif /* CONFIG_PER_VMA_LOCK */
1368+
13361369
/*
13371370
* Kernel-mode access to the user address space should only occur
13381371
* on well-defined single instructions listed in the exception
@@ -1433,6 +1466,9 @@ void do_user_addr_fault(struct pt_regs *regs,
14331466
}
14341467

14351468
mmap_read_unlock(mm);
1469+
#ifdef CONFIG_PER_VMA_LOCK
1470+
done:
1471+
#endif
14361472
if (likely(!(fault & VM_FAULT_ERROR)))
14371473
return;
14381474

0 commit comments

Comments
 (0)