Skip to content

Commit 97a71a5

Browse files
jsmattsonjrbonzini
authored andcommitted
KVM: selftests: test behavior of unmapped L2 APIC-access address
Add a regression test for commit 671ddc7 ("KVM: nVMX: Don't leak L1 MMIO regions to L2"). First, check to see that an L2 guest can be launched with a valid APIC-access address that is backed by a page of L1 physical memory. Next, set the APIC-access address to a (valid) L1 physical address that is not backed by memory. KVM can't handle this situation, so resuming L2 should result in a KVM exit for internal error (emulation). Signed-off-by: Jim Mattson <jmattson@google.com> Reviewed-by: Ricardo Koller <ricarkol@google.com> Reviewed-by: Peter Shier <pshier@google.com> Message-Id: <20201026180922.3120555-1-jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent d383b31 commit 97a71a5

5 files changed

Lines changed: 159 additions & 0 deletions

File tree

tools/testing/selftests/kvm/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
/x86_64/vmx_preemption_timer_test
1616
/x86_64/svm_vmcall_test
1717
/x86_64/sync_regs_test
18+
/x86_64/vmx_apic_access_test
1819
/x86_64/vmx_close_while_nested_test
1920
/x86_64/vmx_dirty_log_test
2021
/x86_64/vmx_set_nested_state_test

tools/testing/selftests/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
4949
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
5050
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
5151
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
52+
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
5253
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
5354
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
5455
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test

tools/testing/selftests/kvm/include/x86_64/vmx.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -573,6 +573,10 @@ struct vmx_pages {
573573
void *eptp_hva;
574574
uint64_t eptp_gpa;
575575
void *eptp;
576+
577+
void *apic_access_hva;
578+
uint64_t apic_access_gpa;
579+
void *apic_access;
576580
};
577581

578582
union vmx_basic {
@@ -615,5 +619,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
615619
uint32_t memslot, uint32_t eptp_memslot);
616620
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
617621
uint32_t eptp_memslot);
622+
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
623+
uint32_t eptp_memslot);
618624

619625
#endif /* SELFTEST_KVM_VMX_H */

tools/testing/selftests/kvm/lib/x86_64/vmx.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -542,3 +542,12 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
542542
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
543543
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
544544
}
545+
546+
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
547+
uint32_t eptp_memslot)
548+
{
549+
vmx->apic_access = (void *)vm_vaddr_alloc(vm, getpagesize(),
550+
0x10000, 0, 0);
551+
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
552+
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
553+
}
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* vmx_apic_access_test
4+
*
5+
* Copyright (C) 2020, Google LLC.
6+
*
7+
* This work is licensed under the terms of the GNU GPL, version 2.
8+
*
9+
* The first subtest simply checks to see that an L2 guest can be
10+
* launched with a valid APIC-access address that is backed by a
11+
* page of L1 physical memory.
12+
*
13+
* The second subtest sets the APIC-access address to a (valid) L1
14+
* physical address that is not backed by memory. KVM can't handle
15+
* this situation, so resuming L2 should result in a KVM exit for
16+
* internal error (emulation). This is not an architectural
17+
* requirement. It is just a shortcoming of KVM. The internal error
18+
* is unfortunate, but it's better than what used to happen!
19+
*/
20+
21+
#include "test_util.h"
22+
#include "kvm_util.h"
23+
#include "processor.h"
24+
#include "vmx.h"
25+
26+
#include <string.h>
27+
#include <sys/ioctl.h>
28+
29+
#include "kselftest.h"
30+
31+
#define VCPU_ID 0
32+
33+
/* The virtual machine object. */
34+
static struct kvm_vm *vm;
35+
36+
static void l2_guest_code(void)
37+
{
38+
/* Exit to L1 */
39+
__asm__ __volatile__("vmcall");
40+
}
41+
42+
static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
43+
{
44+
#define L2_GUEST_STACK_SIZE 64
45+
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
46+
uint32_t control;
47+
48+
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
49+
GUEST_ASSERT(load_vmcs(vmx_pages));
50+
51+
/* Prepare the VMCS for L2 execution. */
52+
prepare_vmcs(vmx_pages, l2_guest_code,
53+
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
54+
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
55+
control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
56+
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
57+
control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
58+
control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
59+
vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
60+
vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
61+
62+
/* Try to launch L2 with the memory-backed APIC-access address. */
63+
GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
64+
GUEST_ASSERT(!vmlaunch());
65+
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
66+
67+
vmwrite(APIC_ACCESS_ADDR, high_gpa);
68+
69+
/* Try to resume L2 with the unbacked APIC-access address. */
70+
GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
71+
GUEST_ASSERT(!vmresume());
72+
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
73+
74+
GUEST_DONE();
75+
}
76+
77+
int main(int argc, char *argv[])
78+
{
79+
unsigned long apic_access_addr = ~0ul;
80+
unsigned int paddr_width;
81+
unsigned int vaddr_width;
82+
vm_vaddr_t vmx_pages_gva;
83+
unsigned long high_gpa;
84+
struct vmx_pages *vmx;
85+
bool done = false;
86+
87+
nested_vmx_check_supported();
88+
89+
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
90+
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
91+
92+
kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
93+
high_gpa = (1ul << paddr_width) - getpagesize();
94+
if ((unsigned long)DEFAULT_GUEST_PHY_PAGES * getpagesize() > high_gpa) {
95+
print_skip("No unbacked physical page available");
96+
exit(KSFT_SKIP);
97+
}
98+
99+
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
100+
prepare_virtualize_apic_accesses(vmx, vm, 0);
101+
vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
102+
103+
while (!done) {
104+
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
105+
struct ucall uc;
106+
107+
vcpu_run(vm, VCPU_ID);
108+
if (apic_access_addr == high_gpa) {
109+
TEST_ASSERT(run->exit_reason ==
110+
KVM_EXIT_INTERNAL_ERROR,
111+
"Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
112+
run->exit_reason,
113+
exit_reason_str(run->exit_reason));
114+
TEST_ASSERT(run->internal.suberror ==
115+
KVM_INTERNAL_ERROR_EMULATION,
116+
"Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
117+
run->internal.suberror);
118+
break;
119+
}
120+
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
121+
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
122+
run->exit_reason,
123+
exit_reason_str(run->exit_reason));
124+
125+
switch (get_ucall(vm, VCPU_ID, &uc)) {
126+
case UCALL_ABORT:
127+
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
128+
__FILE__, uc.args[1]);
129+
/* NOT REACHED */
130+
case UCALL_SYNC:
131+
apic_access_addr = uc.args[1];
132+
break;
133+
case UCALL_DONE:
134+
done = true;
135+
break;
136+
default:
137+
TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
138+
}
139+
}
140+
kvm_vm_free(vm);
141+
return 0;
142+
}

0 commit comments

Comments
 (0)