|
12 | 12 | #include "../kvm_util_internal.h" |
13 | 13 | #include "processor.h" |
14 | 14 |
|
| 15 | +#ifndef NUM_INTERRUPTS |
| 16 | +#define NUM_INTERRUPTS 256 |
| 17 | +#endif |
| 18 | + |
| 19 | +#define DEFAULT_CODE_SELECTOR 0x8 |
| 20 | +#define DEFAULT_DATA_SELECTOR 0x10 |
| 21 | + |
15 | 22 | /* Minimum physical address used for virtual translation tables. */ |
16 | 23 | #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 |
17 | 24 |
|
| 25 | +vm_vaddr_t exception_handlers; |
| 26 | + |
18 | 27 | /* Virtual translation table structure declarations */ |
19 | 28 | struct pageMapL4Entry { |
20 | 29 | uint64_t present:1; |
@@ -557,9 +566,9 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m |
557 | 566 | sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); |
558 | 567 |
|
559 | 568 | kvm_seg_set_unusable(&sregs.ldt); |
560 | | - kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs); |
561 | | - kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.ds); |
562 | | - kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.es); |
| 569 | + kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs); |
| 570 | + kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds); |
| 571 | + kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es); |
563 | 572 | kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot); |
564 | 573 | break; |
565 | 574 |
|
@@ -1119,3 +1128,102 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) |
1119 | 1128 | *va_bits = (entry->eax >> 8) & 0xff; |
1120 | 1129 | } |
1121 | 1130 | } |
| 1131 | + |
| 1132 | +struct idt_entry { |
| 1133 | + uint16_t offset0; |
| 1134 | + uint16_t selector; |
| 1135 | + uint16_t ist : 3; |
| 1136 | + uint16_t : 5; |
| 1137 | + uint16_t type : 4; |
| 1138 | + uint16_t : 1; |
| 1139 | + uint16_t dpl : 2; |
| 1140 | + uint16_t p : 1; |
| 1141 | + uint16_t offset1; |
| 1142 | + uint32_t offset2; uint32_t reserved; |
| 1143 | +}; |
| 1144 | + |
| 1145 | +static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, |
| 1146 | + int dpl, unsigned short selector) |
| 1147 | +{ |
| 1148 | + struct idt_entry *base = |
| 1149 | + (struct idt_entry *)addr_gva2hva(vm, vm->idt); |
| 1150 | + struct idt_entry *e = &base[vector]; |
| 1151 | + |
| 1152 | + memset(e, 0, sizeof(*e)); |
| 1153 | + e->offset0 = addr; |
| 1154 | + e->selector = selector; |
| 1155 | + e->ist = 0; |
| 1156 | + e->type = 14; |
| 1157 | + e->dpl = dpl; |
| 1158 | + e->p = 1; |
| 1159 | + e->offset1 = addr >> 16; |
| 1160 | + e->offset2 = addr >> 32; |
| 1161 | +} |
| 1162 | + |
| 1163 | +void kvm_exit_unexpected_vector(uint32_t value) |
| 1164 | +{ |
| 1165 | + outl(UNEXPECTED_VECTOR_PORT, value); |
| 1166 | +} |
| 1167 | + |
| 1168 | +void route_exception(struct ex_regs *regs) |
| 1169 | +{ |
| 1170 | + typedef void(*handler)(struct ex_regs *); |
| 1171 | + handler *handlers = (handler *)exception_handlers; |
| 1172 | + |
| 1173 | + if (handlers && handlers[regs->vector]) { |
| 1174 | + handlers[regs->vector](regs); |
| 1175 | + return; |
| 1176 | + } |
| 1177 | + |
| 1178 | + kvm_exit_unexpected_vector(regs->vector); |
| 1179 | +} |
| 1180 | + |
| 1181 | +void vm_init_descriptor_tables(struct kvm_vm *vm) |
| 1182 | +{ |
| 1183 | + extern void *idt_handlers; |
| 1184 | + int i; |
| 1185 | + |
| 1186 | + vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0); |
| 1187 | + vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0); |
| 1188 | + /* Handlers have the same address in both address spaces.*/ |
| 1189 | + for (i = 0; i < NUM_INTERRUPTS; i++) |
| 1190 | + set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, |
| 1191 | + DEFAULT_CODE_SELECTOR); |
| 1192 | +} |
| 1193 | + |
| 1194 | +void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid) |
| 1195 | +{ |
| 1196 | + struct kvm_sregs sregs; |
| 1197 | + |
| 1198 | + vcpu_sregs_get(vm, vcpuid, &sregs); |
| 1199 | + sregs.idt.base = vm->idt; |
| 1200 | + sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; |
| 1201 | + sregs.gdt.base = vm->gdt; |
| 1202 | + sregs.gdt.limit = getpagesize() - 1; |
| 1203 | + kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs); |
| 1204 | + vcpu_sregs_set(vm, vcpuid, &sregs); |
| 1205 | + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; |
| 1206 | +} |
| 1207 | + |
| 1208 | +void vm_handle_exception(struct kvm_vm *vm, int vector, |
| 1209 | + void (*handler)(struct ex_regs *)) |
| 1210 | +{ |
| 1211 | + vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); |
| 1212 | + |
| 1213 | + handlers[vector] = (vm_vaddr_t)handler; |
| 1214 | +} |
| 1215 | + |
| 1216 | +void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) |
| 1217 | +{ |
| 1218 | + if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO |
| 1219 | + && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT |
| 1220 | + && vcpu_state(vm, vcpuid)->io.size == 4) { |
| 1221 | + /* Grab pointer to io data */ |
| 1222 | + uint32_t *data = (void *)vcpu_state(vm, vcpuid) |
| 1223 | + + vcpu_state(vm, vcpuid)->io.data_offset; |
| 1224 | + |
| 1225 | + TEST_ASSERT(false, |
| 1226 | + "Unexpected vectored event in guest (vector:0x%x)", |
| 1227 | + *data); |
| 1228 | + } |
| 1229 | +} |
0 commit comments