Merge branch 'dev' into 'master'

removed all usages of "ExAllocatePool", fixed some more NMI stuff

See merge request _xeroxz/bluepill!3
merge-requests/4/merge
_xeroxz 4 years ago
commit 3880ada942

@ -66,6 +66,7 @@
<ClCompile> <ClCompile>
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<TreatWarningAsError>false</TreatWarningAsError> <TreatWarningAsError>false</TreatWarningAsError>
<Optimization>Disabled</Optimization>
</ClCompile> </ClCompile>
</ItemDefinitionGroup> </ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
@ -77,6 +78,7 @@
<LanguageStandard>stdcpp17</LanguageStandard> <LanguageStandard>stdcpp17</LanguageStandard>
<TreatWarningAsError>false</TreatWarningAsError> <TreatWarningAsError>false</TreatWarningAsError>
<PreprocessorDefinitions>DBG_PRINT_BOOL;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>DBG_PRINT_BOOL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Optimization>Disabled</Optimization>
</ClCompile> </ClCompile>
</ItemDefinitionGroup> </ItemDefinitionGroup>
<ItemGroup> <ItemGroup>

@ -3,12 +3,10 @@
auto drv_entry(PDRIVER_OBJECT driver_object, PUNICODE_STRING registry_path) -> NTSTATUS auto drv_entry(PDRIVER_OBJECT driver_object, PUNICODE_STRING registry_path) -> NTSTATUS
{ {
vmxon::g_vmx_ctx = UNREFERENCED_PARAMETER(registry_path);
reinterpret_cast<hv::pvmx_ctx>(
ExAllocatePool(NonPagedPool, sizeof hv::vmx_ctx));
// setup vcpu structures (vmx on region and vmcs...) // setup vcpu structures (vmx on region and vmcs...)
vmxon::create_vcpus(vmxon::g_vmx_ctx); vmxon::create_vcpus(&vmxon::g_vmx_ctx);
cr3 cr3_value; cr3 cr3_value;
cr3_value.flags = __readcr3(); cr3_value.flags = __readcr3();

@ -46,6 +46,8 @@ auto exit_handler(hv::pguest_registers regs) -> void
regs->rdx = result[3]; regs->rdx = result[3];
break; break;
} }
// shouldnt get an exit when the LP is already executing an NMI...
// so it should be safe to inject an NMI here...
case VMX_EXIT_REASON_NMI_WINDOW: case VMX_EXIT_REASON_NMI_WINDOW:
{ {
vmentry_interrupt_information interrupt{}; vmentry_interrupt_information interrupt{};
@ -56,6 +58,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags); __vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, NULL); __vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, NULL);
// turn off NMI window exiting since we handled the NMI...
ia32_vmx_procbased_ctls_register procbased_ctls; ia32_vmx_procbased_ctls_register procbased_ctls;
__vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags); __vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags);
@ -105,7 +108,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true; interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags); __vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code); __vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
} }
return; // dont advance rip... return; // dont advance rip...
} }
@ -136,7 +139,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true; interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags); __vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code); __vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
} }
return; // dont advance rip... return; // dont advance rip...
} }
@ -167,7 +170,7 @@ auto exit_handler(hv::pguest_registers regs) -> void
interrupt.deliver_error_code = true; interrupt.deliver_error_code = true;
__vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags); __vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt.flags);
__vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu->error_code); __vmx_vmwrite(VMCS_VMEXIT_INTERRUPTION_ERROR_CODE, g_vcpu.error_code);
} }
return; // dont advance rip... return; // dont advance rip...
} }

@ -18,6 +18,7 @@ using s64 = long long;
// didnt find it in intrin.h... ? // didnt find it in intrin.h... ?
extern "C" void _sgdt(void*); extern "C" void _sgdt(void*);
#pragma intrinsic(_sgdt); #pragma intrinsic(_sgdt);
#define MAX_CORE_COUNT 64
#ifdef DBG_PRINT_BOOL #ifdef DBG_PRINT_BOOL
#define DBG_PRINT(format, ...) \ #define DBG_PRINT(format, ...) \
@ -345,10 +346,12 @@ namespace hv
typedef struct _tss64 typedef struct _tss64
{ {
u32 reserved; u32 reserved0;
u64 privilege_stacks[3]; u64 privilege_stacks[3];
// 0 is not used... 0 is reserved...
u64 interrupt_stack_table[8];
u64 reserved_1; u64 reserved_1;
u64 interrupt_stack_table[7];
u16 reserved_2; u16 reserved_2;
u16 iomap_base; u16 iomap_base;
} tss64, *ptss64; } tss64, *ptss64;
@ -677,23 +680,22 @@ namespace hv
typedef struct _vcpu_ctx typedef struct _vcpu_ctx
{ {
pvmxon_region_ctx vmxon; __declspec(align(PAGE_SIZE)) vmxon_region_ctx vmxon;
pvmcs_ctx vmcs; __declspec(align(PAGE_SIZE)) vmcs_ctx vmcs;
__declspec(align(16)) u8 host_stack[HOST_STACK_PAGES];
segment_descriptor_64 gdt[8192];
tss64 tss;
u64 vmcs_phys; u64 vmcs_phys;
u64 vmxon_phys; u64 vmxon_phys;
u64 host_stack;
u64 error_code; u64 error_code;
u64 nmi_code; u64 nmi_code;
tss64 tss;
segment_descriptor_64* gdt;
} vcpu_ctx, * pvcpu_ctx; } vcpu_ctx, * pvcpu_ctx;
typedef struct _vmx_ctx typedef struct _vmx_ctx
{ {
u32 vcpu_count; u32 vcpu_count;
pvcpu_ctx* vcpus; vcpu_ctx vcpus[MAX_CORE_COUNT];
} vmx_ctx, *pvmx_ctx; } vmx_ctx, *pvmx_ctx;
typedef struct _segment_info_ctx typedef struct _segment_info_ctx

@ -18137,6 +18137,7 @@ typedef struct
*/ */
typedef union typedef union
{ {
uint64_t flags;
struct struct
{ {
/** /**
@ -18187,8 +18188,6 @@ typedef union
#define EPT_POINTER_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF) #define EPT_POINTER_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
uint64_t reserved2 : 16; uint64_t reserved2 : 16;
}; };
uint64_t flags;
} ept_pointer; } ept_pointer;
/** /**

@ -3,7 +3,7 @@
auto seh_handler_ecode(hv::pidt_regs_ecode_t regs) -> void auto seh_handler_ecode(hv::pidt_regs_ecode_t regs) -> void
{ {
g_vcpu->error_code = regs->error_code; g_vcpu.error_code = regs->error_code;
const auto rva = regs->rip - reinterpret_cast<u64>(idt::image_base); const auto rva = regs->rip - reinterpret_cast<u64>(idt::image_base);
const auto nt_headers = reinterpret_cast<IMAGE_NT_HEADERS64*>( const auto nt_headers = reinterpret_cast<IMAGE_NT_HEADERS64*>(
reinterpret_cast<u64>(idt::image_base) + reinterpret_cast<u64>(idt::image_base) +
@ -93,7 +93,7 @@ auto seh_handler(hv::pidt_regs_t regs) -> void
} }
} }
auto nmi_handler(hv::pidt_regs_t regs) -> void auto nmi_handler() -> void
{ {
ia32_vmx_procbased_ctls_register procbased_ctls; ia32_vmx_procbased_ctls_register procbased_ctls;
__vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags); __vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, &procbased_ctls.flags);

@ -5,19 +5,28 @@
#include "vmxon.hpp" #include "vmxon.hpp"
#pragma section(".idt", read, write) #pragma section(".idt", read, write)
#pragma section(".nmi_stk", read, write)
#pragma section(".pf_stk", read, write)
#pragma section(".de_stk", read, write)
#pragma section(".gp_stk", read, write)
extern "C" void __gp_handler(void); extern "C" void __gp_handler(void);
extern "C" void __pf_handler(void); extern "C" void __pf_handler(void);
extern "C" void __de_handler(void); extern "C" void __de_handler(void);
extern "C" void __nmi_handler(void); extern "C" void __nmi_handler(void);
extern "C" void nmi_handler(hv::pidt_regs_t regs); extern "C" void nmi_handler(void);
extern "C" void seh_handler(hv::pidt_regs_t regs); extern "C" void seh_handler(hv::pidt_regs_t regs);
extern "C" void seh_handler_ecode(hv::pidt_regs_ecode_t regs); extern "C" void seh_handler_ecode(hv::pidt_regs_ecode_t regs);
namespace idt namespace idt
{ {
__declspec(allocate(".nmi_stk")) inline u8 nmi_stk[HOST_STACK_SIZE];
__declspec(allocate(".pf_stk")) inline u8 pf_stk[HOST_STACK_SIZE];
__declspec(allocate(".de_stk")) inline u8 de_stk[HOST_STACK_SIZE];
__declspec(allocate(".gp_stk")) inline u8 gp_stk[HOST_STACK_SIZE];
__declspec(allocate(".idt")) inline hv::idt_entry_t table[256]; __declspec(allocate(".idt")) inline hv::idt_entry_t table[256];
enum ist_idx : u8 { nmi = 3, de = 4, pf = 5, gp = 6 }; enum ist_idx : u8 { nmi = 4, de = 5, pf = 6, gp = 7 };
inline void* image_base = nullptr; // used for SEH... inline void* image_base = nullptr; // used for SEH...
auto create_entry(hv::idt_addr_t idt_handler, u8 ist_index) -> hv::idt_entry_t; auto create_entry(hv::idt_addr_t idt_handler, u8 ist_index) -> hv::idt_entry_t;

@ -20,7 +20,6 @@ __nmi_handler proc
push r14 push r14
push r15 push r15
mov rcx, rsp
sub rsp, 20h sub rsp, 20h
call nmi_handler call nmi_handler
add rsp, 20h add rsp, 20h
@ -125,7 +124,7 @@ __gp_handler proc
pop rcx pop rcx
pop rbx pop rbx
pop rax pop rax
add rsp, 8 ; remove exception code on the stack... add rsp, 8 ; remove error code on the stack...
iretq iretq
__gp_handler endp __gp_handler endp

@ -4,8 +4,29 @@
#define PML4_SELF_REF 255 #define PML4_SELF_REF 255
#pragma section(".pml4", read, write) #pragma section(".pml4", read, write)
#pragma section(".epml4", read, write)
#pragma section(".epdpt", read, write)
#pragma section(".epd", read, write)
#pragma section(".ept", read, write)
namespace mm namespace mm
{ {
using epml4_t = ept_pml4[512];
using ept_t = epte[512];
using epdpt_t = union
{
epdpte entry_4kb[512];
epdpte_1gb entry_1gb[512];
};
using epd_t = union
{
epde entry_4kb[512];
epde_2mb entry_2mb[512];
};
typedef union _virt_addr_t typedef union _virt_addr_t
{ {
u64 value; u64 value;
@ -127,10 +148,14 @@ namespace mm
enum class map_type{ dest, src }; enum class map_type{ dest, src };
inline const ppml4e vmxroot_pml4 = reinterpret_cast<ppml4e>(0x7fbfdfeff000); inline const ppml4e vmxroot_pml4 = reinterpret_cast<ppml4e>(0x7fbfdfeff000);
// make sure this is 4kb aligned or you are going to be meeting allah...
__declspec(allocate(".pml4")) inline pml4e pml4[512]; __declspec(allocate(".pml4")) inline pml4e pml4[512];
__declspec(allocate(".epml4")) inline epml4_t epml4;
__declspec(allocate(".epdpt")) inline epdpt_t epdpt[64];
__declspec(allocate(".epd")) inline epd_t epd[128];
__declspec(allocate(".ept")) inline ept_t ept[256];
// translate vmxroot address's... // translate vmxroot address's...
auto translate(virt_addr_t virt_addr) -> u64; auto translate(virt_addr_t virt_addr) -> u64;

@ -164,6 +164,7 @@ namespace vmcs
procbased_ctls2.enable_rdtscp = true; procbased_ctls2.enable_rdtscp = true;
procbased_ctls2.enable_xsaves = true; procbased_ctls2.enable_xsaves = true;
procbased_ctls2.conceal_vmx_from_pt = true; procbased_ctls2.conceal_vmx_from_pt = true;
//procbased_ctls2.enable_ept = true;
if (vmx_basic.vmx_controls) if (vmx_basic.vmx_controls)
{ {
@ -210,6 +211,13 @@ namespace vmcs
__vmx_vmwrite(VMCS_CTRL_VMEXIT_CONTROLS, exit_ctls.flags); __vmx_vmwrite(VMCS_CTRL_VMEXIT_CONTROLS, exit_ctls.flags);
} }
/*ept_pointer eptp{};
eptp.memory_type = MEMORY_TYPE_WRITE_BACK;
eptp.enable_access_and_dirty_flags = true;
eptp.page_walk_length = EPT_PAGE_WALK_LENGTH_4;
eptp.page_frame_number = reinterpret_cast<u64>(&mm::epml4) >> 12;
__vmx_vmwrite(VMCS_CTRL_EPT_POINTER, eptp.flags);*/
msr_fix_value.flags = __readmsr(IA32_VMX_PROCBASED_CTLS2); msr_fix_value.flags = __readmsr(IA32_VMX_PROCBASED_CTLS2);
procbased_ctls2.flags &= msr_fix_value.allowed_1_settings; procbased_ctls2.flags &= msr_fix_value.allowed_1_settings;
procbased_ctls2.flags |= msr_fix_value.allowed_0_settings; procbased_ctls2.flags |= msr_fix_value.allowed_0_settings;

@ -3,7 +3,7 @@
auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
{ {
const auto vcpu = const auto vcpu =
vmxon::g_vmx_ctx->vcpus[ &vmxon::g_vmx_ctx.vcpus[
KeGetCurrentProcessorNumber()]; KeGetCurrentProcessorNumber()];
__vmx_vmclear(&vcpu->vmcs_phys); __vmx_vmclear(&vcpu->vmcs_phys);
@ -15,27 +15,21 @@ auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
const auto [tr_descriptor, tr_rights, tr_limit, tr_base] = const auto [tr_descriptor, tr_rights, tr_limit, tr_base] =
gdt::get_info(gdt_value, segment_selector{ readtr() }); gdt::get_info(gdt_value, segment_selector{ readtr() });
// copy windows TSS and windows GDT...
// change TSS base to new TSS...
hv::segment_descriptor_addr_t tss{ &vcpu->tss }; hv::segment_descriptor_addr_t tss{ &vcpu->tss };
memcpy(&vcpu->tss, (void*)tr_base, sizeof hv::tss64); memcpy(&vcpu->tss, reinterpret_cast<void*>(tr_base), sizeof hv::tss64);
memcpy(vcpu->gdt, (void*)gdt_value.base_address, PAGE_SIZE); memcpy(vcpu->gdt, reinterpret_cast<void*>(gdt_value.base_address), PAGE_SIZE);
vcpu->tss.interrupt_stack_table[idt::ist_idx::pf] = vcpu->tss.interrupt_stack_table[idt::ist_idx::pf] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool, reinterpret_cast<u64>(idt::pf_stk) + sizeof idt::pf_stk;
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
vcpu->tss.interrupt_stack_table[idt::ist_idx::gp] = vcpu->tss.interrupt_stack_table[idt::ist_idx::gp] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool, reinterpret_cast<u64>(idt::gp_stk) + sizeof idt::gp_stk;
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
vcpu->tss.interrupt_stack_table[idt::ist_idx::de] = vcpu->tss.interrupt_stack_table[idt::ist_idx::de] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool, reinterpret_cast<u64>(idt::de_stk) + sizeof idt::de_stk;
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
vcpu->tss.interrupt_stack_table[idt::ist_idx::nmi] = vcpu->tss.interrupt_stack_table[idt::ist_idx::nmi] =
reinterpret_cast<u64>(ExAllocatePool(NonPagedPool, reinterpret_cast<u64>(idt::nmi_stk) + sizeof idt::nmi_stk;
PAGE_SIZE * HOST_STACK_PAGES)) + (PAGE_SIZE * HOST_STACK_PAGES);
const auto tr_idx = segment_selector{ readtr() }.idx; const auto tr_idx = segment_selector{ readtr() }.idx;
vcpu->gdt[tr_idx].base_address_upper = tss.upper; vcpu->gdt[tr_idx].base_address_upper = tss.upper;
@ -44,7 +38,8 @@ auto vmxlaunch::init_vmcs(cr3 cr3_value) -> void
vcpu->gdt[tr_idx].base_address_low = tss.low; vcpu->gdt[tr_idx].base_address_low = tss.low;
vmcs::setup_host(&vmxexit_handler, vmcs::setup_host(&vmxexit_handler,
vcpu->host_stack, cr3_value, (u64)vcpu->gdt); reinterpret_cast<u64>(vcpu->host_stack),
cr3_value, reinterpret_cast<u64>(vcpu->gdt));
vmcs::setup_guest(); vmcs::setup_guest();
vmcs::setup_controls(); vmcs::setup_controls();

@ -10,17 +10,11 @@ namespace vmxon
hv::vmx_basic_msr_t vmx_basic; hv::vmx_basic_msr_t vmx_basic;
vmx_basic.control = __readmsr(IA32_VMX_BASIC); vmx_basic.control = __readmsr(IA32_VMX_BASIC);
vcpu_ctx->vmxon =
reinterpret_cast<hv::pvmxon_region_ctx>(
MmAllocateContiguousMemory(PAGE_SIZE, mem_range));
vcpu_ctx->vmxon_phys = vcpu_ctx->vmxon_phys =
MmGetPhysicalAddress(vcpu_ctx->vmxon).QuadPart; MmGetPhysicalAddress(&vcpu_ctx->vmxon).QuadPart;
RtlSecureZeroMemory(
vcpu_ctx->vmxon, PAGE_SIZE);
vcpu_ctx->vmxon->header vcpu_ctx->vmxon
.header
.bits .bits
.revision_identifier = .revision_identifier =
vmx_basic.bits vmx_basic.bits
@ -35,17 +29,11 @@ namespace vmxon
hv::vmx_basic_msr_t vmx_basic; hv::vmx_basic_msr_t vmx_basic;
vmx_basic.control = __readmsr(IA32_VMX_BASIC); vmx_basic.control = __readmsr(IA32_VMX_BASIC);
vcpu_ctx->vmcs =
reinterpret_cast<hv::pvmcs_ctx>(
MmAllocateContiguousMemory(PAGE_SIZE, mem_range));
vcpu_ctx->vmcs_phys = vcpu_ctx->vmcs_phys =
MmGetPhysicalAddress(vcpu_ctx->vmcs).QuadPart; MmGetPhysicalAddress(&vcpu_ctx->vmcs).QuadPart;
RtlSecureZeroMemory( vcpu_ctx->vmcs
vcpu_ctx->vmcs, PAGE_SIZE); .header
vcpu_ctx->vmcs->header
.bits .bits
.revision_identifier = .revision_identifier =
vmx_basic.bits vmx_basic.bits
@ -58,31 +46,10 @@ namespace vmxon
KeQueryActiveProcessorCountEx( KeQueryActiveProcessorCountEx(
ALL_PROCESSOR_GROUPS); ALL_PROCESSOR_GROUPS);
vmx_ctx->vcpus = for (auto idx = 0u; idx < vmx_ctx->vcpu_count; ++idx)
reinterpret_cast<hv::pvcpu_ctx*>(
ExAllocatePool(NonPagedPool,
sizeof(hv::pvcpu_ctx) * vmx_ctx->vcpu_count));
for (auto idx = 0u; idx < g_vmx_ctx->vcpu_count; ++idx)
{ {
vmx_ctx->vcpus[idx] = create_vmxon_region(&vmx_ctx->vcpus[idx]);
reinterpret_cast<hv::pvcpu_ctx>( create_vmcs(&vmx_ctx->vcpus[idx]);
ExAllocatePool(NonPagedPool, sizeof hv::vcpu_ctx));
vmx_ctx->vcpus[idx]->host_stack =
reinterpret_cast<u64>(
ExAllocatePool(NonPagedPool,
PAGE_SIZE * HOST_STACK_PAGES));
vmx_ctx->vcpus[idx]->gdt =
reinterpret_cast<segment_descriptor_64*>(
ExAllocatePool(NonPagedPool, PAGE_SIZE));
RtlZeroMemory(reinterpret_cast<void*>(
vmx_ctx->vcpus[idx]->host_stack), PAGE_SIZE * HOST_STACK_PAGES);
create_vmxon_region(vmx_ctx->vcpus[idx]);
create_vmcs(vmx_ctx->vcpus[idx]);
} }
} }
@ -108,7 +75,7 @@ namespace vmxon
const auto vmxon_result = const auto vmxon_result =
__vmx_on((unsigned long long*) __vmx_on((unsigned long long*)
&vmxon::g_vmx_ctx->vcpus[ &vmxon::g_vmx_ctx.vcpus[
KeGetCurrentProcessorNumber()]->vmxon_phys); KeGetCurrentProcessorNumber()].vmxon_phys);
} }
} }

@ -1,5 +1,6 @@
#pragma once #pragma once
#include "hv_types.hpp" #include "hv_types.hpp"
#pragma section(".vcpu", read, write)
inline auto get_cpu_num() -> u32 inline auto get_cpu_num() -> u32
{ {
@ -12,7 +13,7 @@ inline auto get_cpu_num() -> u32
} }
#define g_vcpu \ #define g_vcpu \
vmxon::g_vmx_ctx->vcpus[get_cpu_num()] vmxon::g_vmx_ctx.vcpus[get_cpu_num()]
namespace vmxon namespace vmxon
{ {
@ -22,5 +23,5 @@ namespace vmxon
auto init_vmxon() -> void; auto init_vmxon() -> void;
// vmxroot global object... contains all vcpu information... // vmxroot global object... contains all vcpu information...
inline hv::pvmx_ctx g_vmx_ctx; __declspec(allocate(".vcpu")) inline hv::vmx_ctx g_vmx_ctx;
} }
Loading…
Cancel
Save