Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 8a511e7

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "ARM: - Fix EL2 Stage-1 MMIO mappings where a random address was used - Fix SMCCC function number comparison when the SVE hint is set RISC-V: - Fix KVM_GET_REG_LIST API for ISA_EXT registers - Fix reading ISA_EXT register of a missing extension - Fix ISA_EXT register handling in get-reg-list test - Fix filtering of AIA registers in get-reg-list test x86: - Fixes for TSC_AUX virtualization - Stop zapping page tables asynchronously, since we don't zap them as often as before" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: SVM: Do not use user return MSR support for virtualized TSC_AUX KVM: SVM: Fix TSC_AUX virtualization setup KVM: SVM: INTERCEPT_RDTSCP is never intercepted anyway KVM: x86/mmu: Stop zapping invalidated TDP MMU roots asynchronously KVM: x86/mmu: Do not filter address spaces in for_each_tdp_mmu_root_yield_safe() KVM: x86/mmu: Open code leaf invalidation from mmu_notifier KVM: riscv: selftests: Selectively filter-out AIA registers KVM: riscv: selftests: Fix ISA_EXT register handling in get-reg-list RISC-V: KVM: Fix riscv_vcpu_get_isa_ext_single() for missing extensions RISC-V: KVM: Fix KVM_GET_REG_LIST API for ISA_EXT registers KVM: selftests: Assert that vasprintf() is successful KVM: arm64: nvhe: Ignore SVE hint in SMCCC function ID KVM: arm64: Properly return allocated EL2 VA from hyp_alloc_private_va_range()
2 parents 5edc6bb + 5804c19 commit 8a511e7

File tree

20 files changed

+209
-161
lines changed

20 files changed

+209
-161
lines changed

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
118118

119119
u64 __guest_enter(struct kvm_vcpu *vcpu);
120120

121-
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
121+
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
122122

123123
#ifdef __KVM_NVHE_HYPERVISOR__
124124
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,

arch/arm64/kvm/hyp/include/nvhe/ffa.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,6 @@
1212
#define FFA_MAX_FUNC_NUM 0x7F
1313

1414
int hyp_ffa_init(void *pages);
15-
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
15+
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
1616

1717
#endif /* __KVM_HYP_FFA_H */

arch/arm64/kvm/hyp/nvhe/ffa.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -634,9 +634,8 @@ static bool do_ffa_features(struct arm_smccc_res *res,
634634
return true;
635635
}
636636

637-
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
637+
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
638638
{
639-
DECLARE_REG(u64, func_id, host_ctxt, 0);
640639
struct arm_smccc_res res;
641640

642641
/*

arch/arm64/kvm/hyp/nvhe/hyp-init.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ __do_hyp_init:
5757
cmp x0, #HVC_STUB_HCALL_NR
5858
b.lo __kvm_handle_stub_hvc
5959

60+
bic x0, x0, #ARM_SMCCC_CALL_HINTS
6061
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
6162
cmp x0, x3
6263
b.eq 1f

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -368,6 +368,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
368368
if (static_branch_unlikely(&kvm_protected_mode_initialized))
369369
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
370370

371+
id &= ~ARM_SMCCC_CALL_HINTS;
371372
id -= KVM_HOST_SMCCC_ID(0);
372373

373374
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
@@ -392,11 +393,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
392393

393394
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
394395
{
396+
DECLARE_REG(u64, func_id, host_ctxt, 0);
395397
bool handled;
396398

397-
handled = kvm_host_psci_handler(host_ctxt);
399+
func_id &= ~ARM_SMCCC_CALL_HINTS;
400+
401+
handled = kvm_host_psci_handler(host_ctxt, func_id);
398402
if (!handled)
399-
handled = kvm_host_ffa_handler(host_ctxt);
403+
handled = kvm_host_ffa_handler(host_ctxt, func_id);
400404
if (!handled)
401405
default_host_smc_handler(host_ctxt);
402406

arch/arm64/kvm/hyp/nvhe/psci-relay.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,9 +273,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
273273
}
274274
}
275275

276-
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
276+
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
277277
{
278-
DECLARE_REG(u64, func_id, host_ctxt, 0);
279278
unsigned long ret;
280279

281280
switch (kvm_host_psci_config.version) {

arch/arm64/kvm/mmu.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -652,6 +652,9 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
652652

653653
mutex_unlock(&kvm_hyp_pgd_mutex);
654654

655+
if (!ret)
656+
*haddr = base;
657+
655658
return ret;
656659
}
657660

arch/riscv/kvm/vcpu_onereg.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
460460
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
461461
return -ENOENT;
462462

463-
*reg_val = 0;
464463
host_isa_ext = kvm_isa_ext_arr[reg_num];
464+
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
465+
return -ENOENT;
466+
467+
*reg_val = 0;
465468
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
466469
*reg_val = 1; /* Mark the given extension as available */
467470

@@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
842845
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
843846

844847
isa_ext = kvm_isa_ext_arr[i];
845-
if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
848+
if (!__riscv_isa_extension_available(NULL, isa_ext))
846849
continue;
847850

848851
if (uindices) {

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1419,7 +1419,6 @@ struct kvm_arch {
14191419
* the thread holds the MMU lock in write mode.
14201420
*/
14211421
spinlock_t tdp_mmu_pages_lock;
1422-
struct workqueue_struct *tdp_mmu_zap_wq;
14231422
#endif /* CONFIG_X86_64 */
14241423

14251424
/*
@@ -1835,7 +1834,7 @@ void kvm_mmu_vendor_module_exit(void);
18351834

18361835
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
18371836
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1838-
int kvm_mmu_init_vm(struct kvm *kvm);
1837+
void kvm_mmu_init_vm(struct kvm *kvm);
18391838
void kvm_mmu_uninit_vm(struct kvm *kvm);
18401839

18411840
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -6167,20 +6167,15 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
61676167
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
61686168
}
61696169

6170-
int kvm_mmu_init_vm(struct kvm *kvm)
6170+
void kvm_mmu_init_vm(struct kvm *kvm)
61716171
{
6172-
int r;
6173-
61746172
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
61756173
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
61766174
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
61776175
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
61786176

6179-
if (tdp_mmu_enabled) {
6180-
r = kvm_mmu_init_tdp_mmu(kvm);
6181-
if (r < 0)
6182-
return r;
6183-
}
6177+
if (tdp_mmu_enabled)
6178+
kvm_mmu_init_tdp_mmu(kvm);
61846179

61856180
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
61866181
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
@@ -6189,8 +6184,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
61896184

61906185
kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
61916186
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6192-
6193-
return 0;
61946187
}
61956188

61966189
static void mmu_free_vm_memory_caches(struct kvm *kvm)
@@ -6246,7 +6239,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
62466239
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
62476240
{
62486241
bool flush;
6249-
int i;
62506242

62516243
if (WARN_ON_ONCE(gfn_end <= gfn_start))
62526244
return;
@@ -6257,11 +6249,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
62576249

62586250
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
62596251

6260-
if (tdp_mmu_enabled) {
6261-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
6262-
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
6263-
gfn_end, true, flush);
6264-
}
6252+
if (tdp_mmu_enabled)
6253+
flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
62656254

62666255
if (flush)
62676256
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,12 @@ struct kvm_mmu_page {
5858

5959
bool tdp_mmu_page;
6060
bool unsync;
61-
u8 mmu_valid_gen;
61+
union {
62+
u8 mmu_valid_gen;
63+
64+
/* Only accessed under slots_lock. */
65+
bool tdp_mmu_scheduled_root_to_zap;
66+
};
6267

6368
/*
6469
* The shadow page can't be replaced by an equivalent huge page
@@ -100,13 +105,7 @@ struct kvm_mmu_page {
100105
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
101106
tdp_ptep_t ptep;
102107
};
103-
union {
104-
DECLARE_BITMAP(unsync_child_bitmap, 512);
105-
struct {
106-
struct work_struct tdp_mmu_async_work;
107-
void *tdp_mmu_async_data;
108-
};
109-
};
108+
DECLARE_BITMAP(unsync_child_bitmap, 512);
110109

111110
/*
112111
* Tracks shadow pages that, if zapped, would allow KVM to create an NX

0 commit comments

Comments
 (0)