removed all usages of "ExAllocatePool", fixed some more NMI stuff

merge-requests/3/head
_xeroxz 4 years ago
parent 427091ae2e
commit ea8b460a02

@ -66,6 +66,7 @@
<ClCompile>
<LanguageStandard>stdcpp17</LanguageStandard>
<TreatWarningAsError>false</TreatWarningAsError>
<Optimization>Disabled</Optimization>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
@ -77,6 +78,7 @@
<LanguageStandard>stdcpp17</LanguageStandard>
<TreatWarningAsError>false</TreatWarningAsError>
<PreprocessorDefinitions>DBG_PRINT_BOOL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Optimization>Disabled</Optimization>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup>

@ -3,12 +3,10 @@
auto drv_entry(PDRIVER_OBJECT driver_object, PUNICODE_STRING registry_path) -> NTSTATUS
{
vmxon::g_vmx_ctx =
reinterpret_cast<hv::pvmx_ctx>(
ExAllocatePool(NonPagedPool, sizeof hv::vmx_ctx));
UNREFERENCED_PARAMETER(registry_path);
// setup vcpu structures (vmx on region and vmcs...)
vmxon::create_vcpus(vmxon::g_vmx_ctx);
vmxon::create_vcpus(&vmxon::g_vmx_ctx);
cr3 cr3_value;
cr3_value.flags = __readcr3();

@ -46,6 +46,8 @@ auto exit_handler(hv::pguest_registers regs) -> void
regs->rdx = result[3];
break;
}
// shouldnt get an exit when the LP is already executing an NMI...
// so it should be safe to inject an NMI here...
case VMX_EXIT_REASON_NMI_WINDOW:
{
vmentry_interrupt_information interrupt{};
@ -56,6 +58,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, NULL);
// turn off NMI window exiting since we handled the NMI...
ia32_vmx_procbased_ctls_register procbased_ctls;
__vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags);
@ -105,7 +108,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
}
return; // dont advance rip...
}
@ -136,7 +139,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
}
return; // dont advance rip...
}
@ -167,7 +170,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
}
return; // dont advance rip...
}

@ -18,6 +18,7 @@ using s64 = long long;
// didnt find it in intrin.h... ?
extern "C" void _sgdt(void*);
#pragma intrinsic(_sgdt);
#define MAX_CORE_COUNT 64
#ifdef DBG_PRINT_BOOL
#define DBG_PRINT(format, ...) \
@ -345,10 +346,12 @@ namespace hv
typedef struct _tss64
{
u32 reserved;
u32 reserved0;
u64 privilege_stacks[3];
// 0 is not used... 0 is reserved...
u64 interrupt_stack_table[8];
u64 reserved_1;
u64 interrupt_stack_table[7];
u16 reserved_2;
u16 iomap_base;
} tss64, *ptss64;
@ -677,23 +680,22 @@ namespace hv
typedef struct _vcpu_ctx
{
pvmxon_region_ctx vmxon;
pvmcs_ctx vmcs;
__declspec(align(PAGE_SIZE)) vmxon_region_ctx vmxon;
__declspec(align(PAGE_SIZE)) vmcs_ctx vmcs;
__declspec(align(16)) u8 host_stack[HOST_STACK_PAGES];
segment_descriptor_64 gdt[8192];
tss64 tss;
u64 vmcs_phys;
u64 vmxon_phys;
u64 host_stack;
u64 error_code;
u64 nmi_code;
tss64 tss;
segment_descriptor_64* gdt;
} vcpu_ctx, * pvcpu_ctx;
typedef struct _vmx_ctx
{
u32 vcpu_count;
pvcpu_ctx* vcpus;
vcpu_ctx vcpus[MAX_CORE_COUNT];
} vmx_ctx, *pvmx_ctx;
typedef struct _segment_info_ctx

@ -18137,6 +18137,7 @@ typedef struct
*/
typedef union
{
uint64_t flags;
struct
{
/**
@ -18187,8 +18188,6 @@ typedef union
#define EPT_POINTER_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
uint64_t reserved2 : 16;
};
uint64_t flags;
} ept_pointer;
/**

@ -3,7 +3,7 @@
auto seh_handler_ecode(hv::pidt_regs_ecode_t regs) -> void
{
g_vcpu->error_code = regs->error_code;
g_vcpu.error_code = regs->error_code;
const auto rva = regs->rip - reinterpret_cast<u64>(idt::image_base);
const auto nt_headers = reinterpret_cast<IMAGE_NT_HEADERS64*>(
reinterpret_cast<u64>(idt::image_base) +
@ -93,7 +93,7 @@ auto seh_handler(hv::pidt_regs_t regs) -> void
}
}
auto nmi_handler(hv::pidt_regs_t regs) -> void
auto nmi_handler() -> void
{
ia32_vmx_procbased_ctls_register procbased_ctls;
__vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags);

@ -5,19 +5,28 @@
#include "vmxon.hpp"
#pragma section(".idt", read, write)
#pragma section(".nmi_stk", read, write)
#pragma section(".pf_stk", read, write)
#pragma section(".de_stk", read, write)
#pragma section(".gp_stk", read, write)
extern "C" void __gp_handler(void);
extern "C" void __pf_handler(void);
extern "C" void __de_handler(void);
extern "C" void __nmi_handler(void);
extern "C" void nmi_handler(hv::pidt_regs_t regs);
extern "C" void nmi_handler(void);
extern "C" void seh_handler(hv::pidt_regs_t regs);
extern "C" void seh_handler_ecode(hv::pidt_regs_ecode_t regs);
namespace idt
{
__declspec(allocate(".nmi_stk")) inline u8 nmi_stk[HOST_STACK_SIZE];
__declspec(allocate(".pf_stk")) inline u8 pf_stk[HOST_STACK_SIZE];
__declspec(allocate(".de_stk")) inline u8 de_stk[HOST_STACK_SIZE];
__declspec(allocate(".gp_stk")) inline u8 gp_stk[HOST_STACK_SIZE];
__declspec(allocate(".idt")) inline hv::idt_entry_t table[256];
enum ist_idx : u8 { nmi = 3, de = 4, pf = 5, gp = 6 };
enum ist_idx : u8 { nmi = 4, de = 5, pf = 6, gp = 7 };
inline void* image_base = nullptr; // used for SEH...
auto create_entry(hv::idt_addr_t idt_handler, u8 ist_index) -> hv::idt_entry_t;

@ -20,7 +20,6 @@ __nmi_handler proc
push r14
push r15
mov rcx, rsp
sub rsp, 20h
call nmi_handler
add rsp, 20h
@ -125,7 +124,7 @@ __gp_handler proc
pop rcx
pop rbx
pop rax
add rsp, 8 ; remove exception code on the stack...
add rsp, 8 ; remove error code on the stack...
iretq
__gp_handler endp

@ -4,8 +4,29 @@
#define PML4_SELF_REF 255
#pragma section(".pml4", read, write)
#pragma section(".epml4", read, write)
#pragma section(".epdpt", read, write)
#pragma section(".epd", read, write)
#pragma section(".ept", read, write)
namespace mm
{
using epml4_t = ept_pml4[512];
using ept_t = epte[512];
using epdpt_t = union
{
epdpte entry_4kb[512];
epdpte_1gb entry_1gb[512];
};
using epd_t = union
{
epde entry_4kb[512];
epde_2mb entry_2mb[512];
};
typedef union _virt_addr_t
{
u64 value;
@ -127,10 +148,14 @@ namespace mm
enum class map_type{ dest, src };
inline const ppml4e vmxroot_pml4 = reinterpret_cast<ppml4e>(0x7fbfdfeff000);
// make sure this is 4kb aligned or you are going to be meeting allah...
__declspec(allocate(".pml4")) inline pml4e pml4[512];
__declspec(allocate(".epml4")) inline epml4_t epml4;
__declspec(allocate(".epdpt")) inline epdpt_t epdpt[64];
__declspec(allocate(".epd")) inline epd_t epd[128];
__declspec(allocate(".ept")) inline ept_t ept[256];
// translate vmxroot address's...
auto translate(virt_addr_t virt_addr) -> u64;

@ -164,6 +164,7 @@ namespace vmcs
procbased_ctls2.enable_rdtscp = true;
procbased_ctls2.enable_xsaves = true;
procbased_ctls2.conceal_vmx_from_pt = true;
//procbased_ctls2.enable_ept = true;
if (vmx_basic.vmx_controls)
{
@ -210,6 +211,13 @@ namespace vmcs
__vmx_vmwrite(VMCS_CTRL_VMEXIT_CONTROLS, exit_ctls.flags);
}
/*ept_pointer eptp{};
eptp.memory_type = MEMORY_TYPE_WRITE_BACK;
eptp.enable_access_and_dirty_flags = true;
eptp.page_walk_length = EPT_PAGE_WALK_LENGTH_4;
eptp.page_frame_number = reinterpret_cast<u64>(&mm::epml4) >> 12;
__vmx_vmwrite(VMCS_CTRL_EPT_POINTER, eptp.flags);*/
msr_fix_value.flags = __readmsr(IA32_VMX_PROCBASED_CTLS2);
procbased_ctls2.flags &= msr_fix_value.allowed_1_settings;
procbased_ctls2.flags |= msr_fix_value.allowed_0_settings;

@ -3,7 +3,7 @@
auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
{
const auto vcpu =
vmxon::g_vmx_ctx->vcpus[
&vmxon::g_vmx_ctx.vcpus[
KeGetCurrentProcessorNumber()];
__vmx_vmclear(&vcpu->vmcs_phys);
@ -15,27 +15,21 @@ auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
const auto [tr_descriptor, tr_rights, tr_limit, tr_base] =
gdt::get_info(gdt_value, segment_selector{ readtr() });
// copy windows TSS and windows GDT...
// change TSS base to new TSS...
hv::segment_descriptor_addr_t tss{ &vcpu->tss };
memcpy(&vcpu->tss, (void*)tr_base, sizeof hv::tss64);
memcpy(vcpu->gdt, (void*)gdt_value.base_address, PAGE_SIZE);
memcpy(&vcpu->tss, reinterpret_cast<void*>(tr_base), sizeof hv::tss64);
memcpy(vcpu->gdt, reinterpret_cast<void*>(gdt_value.base_address), PAGE_SIZE);
vcpu->tss.interrupt_stack_table[idt::ist_idx::pf] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
reinterpret_cast<u64>(idt::pf_stk) + sizeof idt::pf_stk;
vcpu->tss.interrupt_stack_table[idt::ist_idx::gp] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
reinterpret_cast<u64>(idt::gp_stk) + sizeof idt::gp_stk;
vcpu->tss.interrupt_stack_table[idt::ist_idx::de] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
reinterpret_cast<u64>(idt::de_stk) + sizeof idt::de_stk;
vcpu->tss.interrupt_stack_table[idt::ist_idx::nmi] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
reinterpret_cast<u64>(idt::nmi_stk) + sizeof idt::nmi_stk;
const auto tr_idx = segment_selector{ readtr() }.idx;
vcpu->gdt[tr_idx].base_address_upper = tss.upper;
@ -44,7 +38,8 @@ auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
vcpu->gdt[tr_idx].base_address_low = tss.low;
vmcs::setup_host(&vmxexit_handler,
vcpu->host_stack, cr3_value, (u64)vcpu->gdt);
reinterpret_cast<u64>(vcpu->host_stack),
cr3_value, reinterpret_cast<u64>(vcpu->gdt));
vmcs::setup_guest();
vmcs::setup_controls();

@ -10,17 +10,11 @@ namespace vmxon
hv::vmx_basic_msr_t vmx_basic;
vmx_basic.control = __readmsr(IA32_VMX_BASIC);
vcpu_ctx->vmxon =
reinterpret_cast<hv::pvmxon_region_ctx>(
MmAllocateContiguousMemory(PAGE_SIZE, mem_range));
vcpu_ctx->vmxon_phys =
MmGetPhysicalAddress(vcpu_ctx->vmxon).QuadPart;
RtlSecureZeroMemory(
vcpu_ctx->vmxon, PAGE_SIZE);
MmGetPhysicalAddress(&vcpu_ctx->vmxon).QuadPart;
vcpu_ctx->vmxon->header
vcpu_ctx->vmxon
.header
.bits
.revision_identifier =
vmx_basic.bits
@ -35,17 +29,11 @@ namespace vmxon
hv::vmx_basic_msr_t vmx_basic;
vmx_basic.control = __readmsr(IA32_VMX_BASIC);
vcpu_ctx->vmcs =
reinterpret_cast<hv::pvmcs_ctx>(
MmAllocateContiguousMemory(PAGE_SIZE, mem_range));
vcpu_ctx->vmcs_phys =
MmGetPhysicalAddress(vcpu_ctx->vmcs).QuadPart;
MmGetPhysicalAddress(&vcpu_ctx->vmcs).QuadPart;
RtlSecureZeroMemory(
vcpu_ctx->vmcs, PAGE_SIZE);
vcpu_ctx->vmcs->header
vcpu_ctx->vmcs
.header
.bits
.revision_identifier =
vmx_basic.bits
@ -58,31 +46,10 @@ namespace vmxon
KeQueryActiveProcessorCountEx(
ALL_PROCESSOR_GROUPS);
vmx_ctx->vcpus =
reinterpret_cast<hv::pvcpu_ctx*>(
ExAllocatePool(NonPagedPool,
sizeof(hv::pvcpu_ctx) * vmx_ctx->vcpu_count));
for (auto idx = 0u; idx < g_vmx_ctx->vcpu_count; ++idx)
for (auto idx = 0u; idx < vmx_ctx->vcpu_count; ++idx)
{
vmx_ctx->vcpus[idx] =
reinterpret_cast<hv::pvcpu_ctx>(
ExAllocatePool(NonPagedPool, sizeof hv::vcpu_ctx));
vmx_ctx->vcpus[idx]->host_stack =
reinterpret_cast<u64>(
ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES));
vmx_ctx->vcpus[idx]->gdt =
reinterpret_cast<segment_descriptor_64*>(
ExAllocatePool(NonPagedPool, PAGE_SIZE));
RtlZeroMemory(reinterpret_cast<void*>(
vmx_ctx->vcpus[idx]->host_stack), PAGE_SIZE * HOST_STACK_PAGES);
create_vmxon_region(vmx_ctx->vcpus[idx]);
create_vmcs(vmx_ctx->vcpus[idx]);
create_vmxon_region(&vmx_ctx->vcpus[idx]);
create_vmcs(&vmx_ctx->vcpus[idx]);
}
}
@ -108,7 +75,7 @@ namespace vmxon
const auto vmxon_result =
__vmx_on((unsigned long long*)
&vmxon::g_vmx_ctx->vcpus[
KeGetCurrentProcessorNumber()]->vmxon_phys);
&vmxon::g_vmx_ctx.vcpus[
KeGetCurrentProcessorNumber()].vmxon_phys);
}
}

@ -1,5 +1,6 @@
#pragma once
#include "hv_types.hpp"
#pragma section(".vcpu", read, write)
inline auto get_cpu_num() -> u32
{
@ -12,7 +13,7 @@ inline auto get_cpu_num() -> u32
}
#define g_vcpu \
vmxon::g_vmx_ctx->vcpus[get_cpu_num()]
vmxon::g_vmx_ctx.vcpus[get_cpu_num()]
namespace vmxon
{
@ -22,5 +23,5 @@ namespace vmxon
auto init_vmxon() -> void;
// vmxroot global object... contains all vcpu information...
inline hv::pvmx_ctx g_vmx_ctx;
__declspec(allocate(".vcpu")) inline hv::vmx_ctx g_vmx_ctx;
}
Loading…
Cancel
Save