added paging table code/example to init them.

merge-requests/1/merge
xerox 4 years ago
parent d3f4b34dbe
commit ebe2e90af9

@ -1,7 +1,7 @@
_text segment
hyperv proc
hypercall proc
cpuid
ret
hyperv endp
hypercall endp
_text ends
end

@ -1,3 +1,9 @@
#pragma once
#define VMEXIT_KEY 0xDEADBEEFDEADBEEF
extern "C" size_t hyperv(size_t key);
enum class vmexit_command_t
{
init_paging_tables = 0x111
// add your commands here...
};
extern "C" size_t hypercall(size_t key, vmexit_command_t command);

@ -3,6 +3,7 @@
int main()
{
std::printf("[+] hyper-v (CPUID) result -> 0x%x\n", hyperv(VMEXIT_KEY));
auto result = hypercall(VMEXIT_KEY, vmexit_command_t::init_paging_tables);
std::printf("[+] hyper-v (CPUID) init page table result -> %d\n", result);
std::getchar();
}

@ -315,10 +315,12 @@
<FilesToPackage Include="$(TargetPath)" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="ia32.hpp" />
<ClInclude Include="pg_table.h" />
<ClInclude Include="types.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="pg_table.cpp" />
<ClCompile Include="vmexit_handler.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

@ -17,10 +17,16 @@
<ClInclude Include="pg_table.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="ia32.hpp">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="vmexit_handler.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="pg_table.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>

File diff suppressed because it is too large Load Diff

@ -0,0 +1,138 @@
#include "pg_table.h"
namespace pg_table
{
void* translate(void* virtual_address, const ptable_entries entries)
{
virt_addr_t virt_addr{ virtual_address };
virt_addr_t cursor{ hyperv_pml4 };
if (entries) entries->pml4e = reinterpret_cast<ppml4e>(cursor.value)[virt_addr.pml4_index];
if (!reinterpret_cast<ppml4e>(cursor.value)[virt_addr.pml4_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pdpt...
cursor.pdpt_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pd_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pt_index = virt_addr.pml4_index;
if (entries) entries->pdpte = reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index];
if (!reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pd...
cursor.pdpt_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pd_index = virt_addr.pml4_index;
cursor.pt_index = virt_addr.pdpt_index;
if (entries) entries->pde = reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index];
if (!reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pt...
cursor.pdpt_index = virt_addr.pml4_index;
cursor.pd_index = virt_addr.pdpt_index;
cursor.pt_index = virt_addr.pd_index;
if (entries) entries->pte = reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index];
if (!reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index].present)
return nullptr;
return reinterpret_cast<void*>(
reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index].pfn << 12);
}
void* translate(void* virtual_address, u32 pml4_pfn, const ptable_entries entries)
{
virt_addr_t virt_addr{ virtual_address };
const auto cursor = get_cursor_page();
set_cursor_page(pml4_pfn);
if (!reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index].present)
return nullptr;
if (entries) entries->pml4e = reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index];
set_cursor_page(reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index].pfn);
if (!reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index].present)
return nullptr;
if (entries) entries->pdpte = reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index];
set_cursor_page(reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index].pfn);
if (!reinterpret_cast<ppde>(cursor)[virt_addr.pd_index].present)
return nullptr;
if (entries) entries->pde = reinterpret_cast<ppde>(cursor)[virt_addr.pd_index];
set_cursor_page(reinterpret_cast<ppde>(cursor)[virt_addr.pd_index].pfn);
if (!reinterpret_cast<ppte>(cursor)[virt_addr.pt_index].present)
return nullptr;
if (entries) entries->pte = reinterpret_cast<ppte>(cursor)[virt_addr.pt_index];
return reinterpret_cast<void*>(
reinterpret_cast<ppte>(cursor)[virt_addr.pt_index].pfn << 12);
}
void set_cursor_page(u32 phys_pfn)
{
cpuid_eax_01 cpuid_value;
__cpuid((int*)&cpuid_value, 1);
pg_table::pt[cpuid_value
.cpuid_additional_information
.initial_apic_id].pfn = phys_pfn;
// flush tlb for this page and then ensure the instruction stream
// is seralized as to not execute instructions out of order and access the page
// before the TLB is flushed...
__invlpg(get_cursor_page());
_mm_lfence();
}
void* get_cursor_page()
{
cpuid_eax_01 cpuid_value;
__cpuid((int*)&cpuid_value, 1);
constexpr auto cursor_page = 0x00007F7FFFE00000;
virt_addr_t virt_addr{ reinterpret_cast<void*>(cursor_page) };
virt_addr.pt_index = cpuid_value
.cpuid_additional_information
.initial_apic_id;
return virt_addr.value;
}
bool init_pg_tables()
{
auto pdpt_phys = reinterpret_cast<u64>(translate(pdpt));
auto pd_phys = reinterpret_cast<u64>(translate(pd));
auto pt_phys = reinterpret_cast<u64>(translate(pt));
if (!pdpt_phys || !pd_phys || !pt_phys)
return false;
hyperv_pml4[254].present = true;
hyperv_pml4[254].pfn = pdpt_phys >> 12;
hyperv_pml4[254].user_supervisor = false;
hyperv_pml4[254].rw = true;
pdpt[511].present = true;
pdpt[511].pfn = pd_phys >> 12;
pdpt[511].user_supervisor = false;
pdpt[511].rw = true;
pd[511].present = true;
pd[511].pfn = pt_phys >> 12;
pd[511].user_supervisor = false;
pd[511].rw = true;
for (auto idx = 0u; idx < 512; ++idx)
{
pt[idx].present = true;
pt[idx].user_supervisor = false;
pt[idx].rw = true;
}
return true;
}
}

@ -1,12 +1,15 @@
#pragma once
#include "types.h"
#include "ia32.hpp"
#pragma section(".pdpt", read, write)
#pragma section(".pd", read, write)
#pragma section(".pt", read, write)
typedef union _virt_addr_t
namespace pg_table
{
typedef union _virt_addr_t
{
void* value;
struct
{
@ -17,15 +20,15 @@ typedef union _virt_addr_t
u64 pml4_index : 9;
u64 reserved : 16;
};
} virt_addr_t, * pvirt_addr_t;
} virt_addr_t, * pvirt_addr_t;
typedef union _pml4e
{
typedef union _pml4e
{
u64 value;
struct
{
u64 present : 1; // Must be 1, region invalid if 0.
u64 ReadWrite : 1; // If 0, writes not allowed.
u64 rw : 1; // If 0, writes not allowed.
u64 user_supervisor : 1; // If 0, user-mode accesses not allowed.
u64 PageWriteThrough : 1; // Determines the memory type used to access PDPT.
u64 page_cache : 1; // Determines the memory type used to access PDPT.
@ -38,10 +41,10 @@ typedef union _pml4e
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pml4e, * ppml4e;
} pml4e, * ppml4e;
typedef union _pdpte
{
typedef union _pdpte
{
u64 value;
struct
{
@ -59,10 +62,10 @@ typedef union _pdpte
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pdpte, * ppdpte;
} pdpte, * ppdpte;
typedef union _pde
{
typedef union _pde
{
u64 value;
struct
{
@ -80,10 +83,10 @@ typedef union _pde
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pde, * ppde;
} pde, * ppde;
typedef union _pte
{
typedef union _pte
{
u64 value;
struct
{
@ -103,11 +106,62 @@ typedef union _pte
u64 ProtectionKey : 4; // If the PKE bit of CR4 is set, determines the protection key.
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pte, * ppte;
} pte, * ppte;
namespace pg_table
{
typedef struct _table_entries
{
pg_table::pml4e pml4e;
pg_table::pdpte pdpte;
pg_table::pde pde;
pg_table::pte pte;
} table_entries, *ptable_entries;
/// <summary>
/// payload paging tables...
/// </summary>
__declspec(allocate(".pdpt")) inline pdpte pdpt[512];
__declspec(allocate(".pd")) inline pde pd[512];
__declspec(allocate(".pt")) inline pte pt[512];
/// <summary>
/// self referencing pml4e is at 255...
/// </summary>
inline const ppml4e hyperv_pml4{ reinterpret_cast<ppml4e>(0x00007FBFDFEFF000) };
/// <summary>
/// only does address translation for hyper-v's context
/// </summary>
/// <param name="virtual_address">virtual address to be translated...</param>
/// <param name="entries">optional </param>
/// <returns>returns a physical address...</returns>
void* translate(void* virt_addr, const ptable_entries entries = nullptr);
/// <summary>
/// translates linear virtual addresses to linear physical addresses...
/// </summary>
/// <param name="virtual_address">virtual address to translate...</param>
/// <param name="pml4_pfn">page map level four page frame number...</param>
/// <param name="entries">(optional) pointer to a table_entries structure...</param>
/// <returns>linear physical address...</returns>
void* translate(void* virtual_address, u32 pml4_pfn, const ptable_entries entries = nullptr);
/// <summary>
// changes the cursor address to the specified physical address...
// after doing so, the TLB entry for that address is going to be flushed...
// a memory fence is applied to prevent out of order execution...
/// </summary>
/// <param name="phys_pfn">pfn of the physical page to change the cursor too...</param>
void set_cursor_page(u32 phys_pfn);
/// <summary>
/// get the cursor page... each core has its own cursor page...
/// </summary>
/// <returns>cursor page for the current core...</returns>
void* get_cursor_page();
/// <summary>
/// initalizes paging tables (connects pdpt->pd->pt)
/// </summary>
/// <returns>was the setup successful?</returns>
bool init_pg_tables();
}

@ -52,12 +52,6 @@
#define offset_vmcb 0xBC0
#endif
using u8 = unsigned char;
using u16 = unsigned short;
using u32 = unsigned int;
using u64 = unsigned long long;
using u128 = __m128;
#define VMEXIT_CR0_READ 0x0000
#define VMEXIT_CR1_READ 0x0001
#define VMEXIT_CR2_READ 0x0002
@ -223,8 +217,20 @@ using u128 = __m128;
#define VMEXIT_VMGEXIT 0x0403
#define VMEXIT_INVALID -1
using u8 = unsigned char;
using u16 = unsigned short;
using u32 = unsigned int;
using u64 = unsigned long long;
using u128 = __m128;
namespace svm
{
enum class vmexit_command_t
{
init_paging_tables = 0x111
// add your commands here...
};
typedef struct __declspec(align(16)) _guest_context
{
u8 gap0[8];

@ -1,7 +1,13 @@
#include "types.h"
#include "pg_table.h"
svm::pgs_base_struct vmexit_handler(void* unknown, svm::pguest_context context)
{
// AMD does not have a vmread/vmwrite instruction... only a vmload
// and vmsave instruction... this means I had to hunt down the damn
// VMCB location... this is the pointer chain to the VMCB...
//
// TODO: could sig scan for this in Voyager...
const auto vmcb = *reinterpret_cast<svm::pvmcb*>(
*reinterpret_cast<u64*>(
*reinterpret_cast<u64*>(
@ -10,7 +16,17 @@ svm::pgs_base_struct vmexit_handler(void* unknown, svm::pguest_context context)
if (vmcb->exitcode == VMEXIT_CPUID && context->rcx == VMEXIT_KEY)
{
vmcb->rax = 0xC0FFEE;
switch ((svm::vmexit_command_t)context->rdx)
{
case svm::vmexit_command_t::init_paging_tables:
// AMD is a gorilla... why is RAX in the VMCB?
vmcb->rax = pg_table::init_pg_tables();
break;
default:
break;
}
// advance RIP and return back to VMLOAD/VMRUN...
vmcb->rip = vmcb->nrip;
return reinterpret_cast<svm::pgs_base_struct>(__readgsqword(0));
}

@ -662,6 +662,7 @@
<FilesToPackage Include="$(TargetPath)" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="pg_table.cpp" />
<ClCompile Include="vmexit_handler.cpp" />
</ItemGroup>
<ItemGroup>

@ -14,6 +14,9 @@
<ClCompile Include="vmexit_handler.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="pg_table.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="types.h">

@ -53,6 +53,7 @@ using UINT64_t = unsigned long long;
*/
typedef union
{
UINT64_t flags;
struct
{
/**
@ -245,12 +246,11 @@ typedef union
#define CR0_PAGING_ENABLE(_) (((_) >> 31) & 0x01)
UINT64_t reserved4 : 32;
};
UINT64_t flags;
} cr0;
typedef union
{
UINT64_t flags;
struct
{
UINT64_t reserved1 : 3;
@ -293,15 +293,13 @@ typedef union
* @see Vol3A[4.3(32-BIT PAGING)]
* @see Vol3A[4.5(4-LEVEL PAGING)]
*/
UINT64_t address_of_page_directory : 36;
UINT64_t pml4_pfn : 36;
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_BIT 12
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_FLAG 0xFFFFFFFFF000
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_MASK 0xFFFFFFFFF
#define CR3_ADDRESS_OF_PAGE_DIRECTORY(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64_t reserved3 : 16;
};
UINT64_t flags;
} cr3;
typedef union

@ -0,0 +1,138 @@
#include "pg_table.h"
namespace pg_table
{
void* translate(void* virtual_address, const ptable_entries entries)
{
virt_addr_t virt_addr{ virtual_address };
virt_addr_t cursor{ hyperv_pml4 };
if (entries) entries->pml4e = reinterpret_cast<ppml4e>(cursor.value)[virt_addr.pml4_index];
if (!reinterpret_cast<ppml4e>(cursor.value)[virt_addr.pml4_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pdpt...
cursor.pdpt_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pd_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pt_index = virt_addr.pml4_index;
if (entries) entries->pdpte = reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index];
if (!reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pd...
cursor.pdpt_index = virt_addr_t{ hyperv_pml4 }.pml4_index;
cursor.pd_index = virt_addr.pml4_index;
cursor.pt_index = virt_addr.pdpt_index;
if (entries) entries->pde = reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index];
if (!reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index].present)
return nullptr;
// set the cursor to self reference so that when we read
// the addresses pointed to by cursor its going to be a pt...
cursor.pdpt_index = virt_addr.pml4_index;
cursor.pd_index = virt_addr.pdpt_index;
cursor.pt_index = virt_addr.pd_index;
if (entries) entries->pte = reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index];
if (!reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index].present)
return nullptr;
return reinterpret_cast<void*>(
reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index].pfn << 12);
}
void* translate(void* virtual_address, u32 pml4_pfn, const ptable_entries entries)
{
virt_addr_t virt_addr{ virtual_address };
const auto cursor = get_cursor_page();
set_cursor_page(pml4_pfn);
if (!reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index].present)
return nullptr;
if (entries) entries->pml4e = reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index];
set_cursor_page(reinterpret_cast<ppml4e>(cursor)[virt_addr.pml4_index].pfn);
if (!reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index].present)
return nullptr;
if (entries) entries->pdpte = reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index];
set_cursor_page(reinterpret_cast<ppdpte>(cursor)[virt_addr.pdpt_index].pfn);
if (!reinterpret_cast<ppde>(cursor)[virt_addr.pd_index].present)
return nullptr;
if (entries) entries->pde = reinterpret_cast<ppde>(cursor)[virt_addr.pd_index];
set_cursor_page(reinterpret_cast<ppde>(cursor)[virt_addr.pd_index].pfn);
if (!reinterpret_cast<ppte>(cursor)[virt_addr.pt_index].present)
return nullptr;
if (entries) entries->pte = reinterpret_cast<ppte>(cursor)[virt_addr.pt_index];
return reinterpret_cast<void*>(
reinterpret_cast<ppte>(cursor)[virt_addr.pt_index].pfn << 12);
}
void set_cursor_page(u32 phys_pfn)
{
cpuid_eax_01 cpuid_value;
__cpuid((int*)&cpuid_value, 1);
pg_table::pt[cpuid_value
.cpuid_additional_information
.initial_apic_id].pfn = phys_pfn;
// flush tlb for this page and then ensure the instruction stream
// is seralized as to not execute instructions out of order and access the page
// before the TLB is flushed...
__invlpg(get_cursor_page());
_mm_lfence();
}
void* get_cursor_page()
{
cpuid_eax_01 cpuid_value;
__cpuid((int*)&cpuid_value, 1);
constexpr auto cursor_page = 0x00007F7FFFE00000;
virt_addr_t virt_addr{ reinterpret_cast<void*>(cursor_page) };
virt_addr.pt_index = cpuid_value
.cpuid_additional_information
.initial_apic_id;
return virt_addr.value;
}
bool init_pg_tables()
{
auto pdpt_phys = reinterpret_cast<u64>(translate(pdpt));
auto pd_phys = reinterpret_cast<u64>(translate(pd));
auto pt_phys = reinterpret_cast<u64>(translate(pt));
if (!pdpt_phys || !pd_phys || !pt_phys)
return false;
hyperv_pml4[254].present = true;
hyperv_pml4[254].pfn = pdpt_phys >> 12;
hyperv_pml4[254].user_supervisor = false;
hyperv_pml4[254].rw = true;
pdpt[511].present = true;
pdpt[511].pfn = pd_phys >> 12;
pdpt[511].user_supervisor = false;
pdpt[511].rw = true;
pd[511].present = true;
pd[511].pfn = pt_phys >> 12;
pd[511].user_supervisor = false;
pd[511].rw = true;
for (auto idx = 0u; idx < 512; ++idx)
{
pt[idx].present = true;
pt[idx].user_supervisor = false;
pt[idx].rw = true;
}
return true;
}
}

@ -1,12 +1,15 @@
#pragma once
#include "types.h"
#include "ia32.hpp"
#pragma section(".pdpt", read, write)
#pragma section(".pd", read, write)
#pragma section(".pt", read, write)
typedef union _virt_addr_t
namespace pg_table
{
typedef union _virt_addr_t
{
void* value;
struct
{
@ -17,15 +20,15 @@ typedef union _virt_addr_t
u64 pml4_index : 9;
u64 reserved : 16;
};
} virt_addr_t, * pvirt_addr_t;
} virt_addr_t, * pvirt_addr_t;
typedef union _pml4e
{
typedef union _pml4e
{
u64 value;
struct
{
u64 present : 1; // Must be 1, region invalid if 0.
u64 ReadWrite : 1; // If 0, writes not allowed.
u64 rw : 1; // If 0, writes not allowed.
u64 user_supervisor : 1; // If 0, user-mode accesses not allowed.
u64 PageWriteThrough : 1; // Determines the memory type used to access PDPT.
u64 page_cache : 1; // Determines the memory type used to access PDPT.
@ -38,10 +41,10 @@ typedef union _pml4e
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pml4e, * ppml4e;
} pml4e, * ppml4e;
typedef union _pdpte
{
typedef union _pdpte
{
u64 value;
struct
{
@ -59,10 +62,10 @@ typedef union _pdpte
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pdpte, * ppdpte;
} pdpte, * ppdpte;
typedef union _pde
{
typedef union _pde
{
u64 value;
struct
{
@ -80,10 +83,10 @@ typedef union _pde
u64 Ignored3 : 11;
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pde, * ppde;
} pde, * ppde;
typedef union _pte
{
typedef union _pte
{
u64 value;
struct
{
@ -103,11 +106,62 @@ typedef union _pte
u64 ProtectionKey : 4; // If the PKE bit of CR4 is set, determines the protection key.
u64 nx : 1; // If 1, instruction fetches not allowed.
};
} pte, * ppte;
} pte, * ppte;
namespace pg_table
{
typedef struct _table_entries
{
pg_table::pml4e pml4e;
pg_table::pdpte pdpte;
pg_table::pde pde;
pg_table::pte pte;
} table_entries, *ptable_entries;
/// <summary>
/// payload paging tables...
/// </summary>
__declspec(allocate(".pdpt")) inline pdpte pdpt[512];
__declspec(allocate(".pd")) inline pde pd[512];
__declspec(allocate(".pt")) inline pte pt[512];
/// <summary>
/// self referencing pml4e is at 255...
/// </summary>
inline const ppml4e hyperv_pml4{ reinterpret_cast<ppml4e>(0x00007FBFDFEFF000) };
/// <summary>
/// only does address translation for hyper-v's context
/// </summary>
/// <param name="virtual_address">virtual address to be translated...</param>
/// <param name="entries">optional </param>
/// <returns>returns a physical address...</returns>
void* translate(void* virt_addr, const ptable_entries entries = nullptr);
/// <summary>
/// translates linear virtual addresses to linear physical addresses...
/// </summary>
/// <param name="virtual_address">virtual address to translate...</param>
/// <param name="pml4_pfn">page map level four page frame number...</param>
/// <param name="entries">(optional) pointer to a table_entries structure...</param>
/// <returns>linear physical address...</returns>
void* translate(void* virtual_address, u32 pml4_pfn, const ptable_entries entries = nullptr);
/// <summary>
// changes the cursor address to the specified physical address...
// after doing so, the TLB entry for that address is going to be flushed...
// a memory fence is applied to prevent out of order execution...
/// </summary>
/// <param name="phys_pfn">pfn of the physical page to change the cursor too...</param>
void set_cursor_page(u32 phys_pfn);
/// <summary>
/// get the cursor page... each core has its own cursor page...
/// </summary>
/// <returns>cursor page for the current core...</returns>
void* get_cursor_page();
/// <summary>
/// initalizes paging tables (connects pdpt->pd->pt)
/// </summary>
/// <returns>was the setup successful?</returns>
bool init_pg_tables();
}

@ -4,6 +4,7 @@
#include <cstddef>
#define PORT_NUM 0x2F8
#define VMEXIT_KEY 0xDEADBEEFDEADBEEF
#define DBG_PRINT(arg) \
__outbytestring(PORT_NUM, (unsigned char*)arg, sizeof arg);
@ -13,6 +14,12 @@ using u32 = unsigned int;
using u64 = unsigned long long;
using u128 = __m128;
enum class vmexit_command_t
{
init_paging_tables = 0x111
// add your commands here...
};
typedef struct _context_t
{
u64 rax;
@ -49,12 +56,11 @@ using vmexit_handler_t = void(__fastcall*)(pcontext_t context, void* unknown);
typedef struct _voyager_t
{
// RVA from golden record entry ---> back to original vmexit handler...
u64 vcpu_run_rva;
u64 vmexit_handler_rva;
u64 hyperv_module_base;
u64 hyperv_module_size;
u64 record_base;
u64 record_size;
} voyager_t, *pvoyager_t;
#pragma pack(pop)
__declspec(dllexport) inline voyager_t voyager_context;

@ -1,7 +1,4 @@
#include "types.h"
#include "ia32.hpp"
#define VMEXIT_KEY 0xDEADBEEFDEADBEEF
#include "pg_table.h"
#if WINVER > 1803
void vmexit_handler(pcontext_t* context, void* unknown)
@ -21,10 +18,15 @@ void vmexit_handler(pcontext_t context, void* unknown)
{
if (guest_registers->rcx == VMEXIT_KEY)
{
guest_registers->rax = 0xC0FFEE;
switch ((vmexit_command_t)(guest_registers->rdx))
{
case vmexit_command_t::init_paging_tables:
guest_registers->rax = pg_table::init_pg_tables();
break;
default:
break;
}
// advance rip, no one better execute cpuid instruction
// with 0xDEADBEEFDEADBEEF in RCX...
size_t rip, exec_len;
__vmx_vmread(VMCS_GUEST_RIP, &rip);
__vmx_vmread(VMCS_VMEXIT_INSTRUCTION_LENGTH, &exec_len);
@ -33,9 +35,9 @@ void vmexit_handler(pcontext_t context, void* unknown)
}
}
// when hyper-v gets remapped out of winload's context
// the linear virtual addresses change... thus an adjustment is required...
// since there are alot of contexts being created and switched about,
// all hooks are done relative inside of hyper-v...
reinterpret_cast<vmexit_handler_t>(
reinterpret_cast<u64>(&vmexit_handler) -
voyager_context.vcpu_run_rva)(context, unknown);
voyager_context.vmexit_handler_rva)(context, unknown);
}

@ -232,13 +232,14 @@ EFI_STATUS EFIAPI ArchStartBootApplicationHook(VOID* AppEntry, VOID* ImageBase,
LOAD_PE_IMG_MASK
);
MmMapPhysicalMemory = RESOLVE_RVA(
MmMapPhysicalMemory =
RESOLVE_RVA(
FindPattern(
ImageBase,
ImageSize,
MAP_PHYSICAL_SIG,
MAP_PHYSICAL_MASK
), 5, 1);
MAP_PHYSICAL_MASK),
5, 1);
gST->ConOut->ClearScreen(gST->ConOut);
gST->ConOut->OutputString(gST->ConOut, AsciiArt);

@ -5,9 +5,9 @@ SHITHOOK HvLoadImageBufferHook;
SHITHOOK HvLoadAllocImageHook;
SHITHOOK TransferControlShitHook;
MAP_PHYSICAL MmMapPhysicalMemory;
BOOLEAN HvExtendedAllocation = FALSE;
BOOLEAN HvHookedHyperV = FALSE;
MAP_PHYSICAL MmMapPhysicalMemory;
EFI_STATUS EFIAPI HvBlImgLoadPEImageFromSourceBuffer
(
@ -72,12 +72,18 @@ EFI_STATUS EFIAPI HvBlImgLoadPEImageFromSourceBuffer
if (!AsciiStrCmp(&pSection->Name, ".reloc"))
{
VOYAGER_T VoyagerData;
//
// the payload's base address needs to be page aligned in
// order for the paging table sections to be page aligned...
//
UINT32 PageRemainder = (0x1000 - (((*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize) << 52) >> 52));
MakeVoyagerData
(
&VoyagerData,
*ImageBase,
*ImageSize,
*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize,
*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize + PageRemainder,
PayLoadSize()
);
@ -161,12 +167,18 @@ EFI_STATUS EFIAPI HvBlImgLoadPEImageEx
if (!AsciiStrCmp(&pSection->Name, ".reloc"))
{
VOYAGER_T VoyagerData;
//
// the payload's base address needs to be page aligned in
// order for the paging table sections to be page aligned...
//
UINT32 PageRemainder = (0x1000 - (((*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize) << 52) >> 52));
MakeVoyagerData
(
&VoyagerData,
*ImageBase,
*ImageSize,
*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize,
*ImageBase + pSection->VirtualAddress + pSection->Misc.VirtualSize + PageRemainder,
PayLoadSize()
);
@ -198,7 +210,7 @@ UINT64 EFIAPI HvBlImgAllocateImageBuffer
UINT32 memoryType,
UINT32 attributes,
VOID* unused,
UINT32 flags
UINT32 Value
)
{
if (imageSize >= HV_ALLOC_SIZE && !HvExtendedAllocation)
@ -219,7 +231,7 @@ UINT64 EFIAPI HvBlImgAllocateImageBuffer
memoryType,
attributes,
unused,
flags
Value
);
// continue shithooking this function until we have extended the allocation of hyper-v...
@ -229,13 +241,18 @@ UINT64 EFIAPI HvBlImgAllocateImageBuffer
return Result;
}
VOID TransferToHyperV(VOID* Pml4PhysicalAddress, VOID* Unknown, VOID* AssemblyStub, VOID* Unknown2)
VOID TransferToHyperV(UINT64 Pml4PhysicalAddress, VOID* Unknown, VOID* AssemblyStub, VOID* Unknown2)
{
// TODO setup paging tables for the payload...
VOID* Pml4VirtualAddress = NULL;
MmMapPhysicalMemory(&Pml4VirtualAddress, Pml4PhysicalAddress, 0x1000, NULL, NULL);
DBG_PRINT("Hyper-V Pml4PhysicalAddress -> 0x%p\n", Pml4PhysicalAddress);
DBG_PRINT("Hyper-V Pml4VirtualAddress -> 0x%p\n", Pml4VirtualAddress);
PML4E_T SelfRefEntry;
PPML4E_T Pml4 = NULL;
MmMapPhysicalMemory(&Pml4, Pml4PhysicalAddress, 0x1000, NULL, NULL);
// setup self referencing paging table entry...
Pml4[255].Value = NULL;
Pml4[255].Present = TRUE;
Pml4[255].Pfn = Pml4PhysicalAddress >> 12;
Pml4[255].UserSuperVisor = FALSE;
Pml4[255].ReadWrite = TRUE;
DisableShitHook(&TransferControlShitHook);
((VOID(__fastcall*)(VOID*, VOID*, VOID*, VOID*))TransferControlShitHook.Address)

@ -2,6 +2,7 @@
#include "PayLoad.h"
#include "Hv.h"
#include "ShitHook.h"
#include "PagingTables.h"
extern SHITHOOK HvLoadImageHook;
extern SHITHOOK HvLoadAllocImageHook;
@ -20,10 +21,6 @@ static_assert(sizeof(HV_LOAD_PE_IMG_FROM_BUFFER_SIG) == sizeof(HV_LOAD_PE_IMG_FR
#define HV_LOAD_PE_IMG_MASK "xxxx?x????xxxxxxx????xxx"
static_assert(sizeof(HV_LOAD_PE_IMG_SIG) == sizeof(HV_LOAD_PE_IMG_MASK), "signature and mask do not match size...");
#define MAP_PHYSICAL_SIG "\xE8\x00\x00\x00\x00\x85\xC0\x0F\x88\x00\x00\x00\x00\x48\x8B\xBC\x24"
#define MAP_PHYSICAL_MASK "x????xxxx????xxxx"
static_assert(sizeof(MAP_PHYSICAL_SIG) == sizeof(MAP_PHYSICAL_MASK), "signature and mask do not patch sizes...\n");
// 1703-1511
//
// winload.HvlpTransferToHypervisor is used to transfer control to hyper-v...
@ -32,13 +29,8 @@ static_assert(sizeof(MAP_PHYSICAL_SIG) == sizeof(MAP_PHYSICAL_MASK), "signature
#define TRANS_TO_HV_MASK "xxxxxxxxx"
static_assert(sizeof(TRANS_TO_HV_SIG) == sizeof(TRANS_TO_HV_MASK), "signature and mask do not match size...");
typedef EFI_STATUS(EFIAPI* MAP_PHYSICAL)(VOID** VirtualAddress, VOID* PhysicalAddress, UINTN Size,
VOID* Unknown1, VOID* Unknown2);
extern MAP_PHYSICAL MmMapPhysicalMemory;
typedef EFI_STATUS(EFIAPI* ALLOCATE_IMAGE_BUFFER)(VOID** imageBuffer, UINTN imageSize, UINT32 memoryType,
UINT32 attributes, VOID* unused, UINT32 flags);
UINT32 attributes, VOID* unused, UINT32 Value);
typedef EFI_STATUS(EFIAPI* HV_LDR_LOAD_IMAGE_BUFFER)(VOID* a1, VOID* a2, VOID* a3, VOID* a4, UINT64* ImageBase,
UINT32* ImageSize, VOID* a7, VOID* a8, VOID* a9, VOID* a10, VOID* a11, VOID* a12, VOID* a13, VOID* a14, VOID* a15);
@ -64,7 +56,7 @@ UINT64 EFIAPI HvBlImgAllocateImageBuffer
UINT32 memoryType,
UINT32 attributes,
VOID* unused,
UINT32 flags
UINT32 Value
);
/// <summary>
@ -149,7 +141,7 @@ EFI_STATUS EFIAPI HvBlImgLoadPEImageFromSourceBuffer
/// <param name="AssemblyStub">assembly stub to set CR3...</param>
VOID TransferToHyperV
(
VOID* Pml4PhysicalAddress,
UINT64 Pml4PhysicalAddress,
VOID* Unknown,
VOID* AssemblyStub,
VOID* Unknown2

@ -0,0 +1,139 @@
#pragma once
#include "Utils.h"
#define MAP_PHYSICAL_SIG "\xE8\x00\x00\x00\x00\x85\xC0\x0F\x88\x00\x00\x00\x00\x48\x8B\xBC\x24"
#define MAP_PHYSICAL_MASK "x????xxxx????xxxx"
static_assert(sizeof(MAP_PHYSICAL_SIG) == sizeof(MAP_PHYSICAL_MASK), "signature and mask do not patch sizes...\n");
typedef EFI_STATUS(EFIAPI* MAP_PHYSICAL)
(
VOID** VirtualAddress,
VOID* PhysicalAddress,
UINTN Size,
VOID* Unknown,
VOID* Unknown2
);
extern MAP_PHYSICAL MmMapPhysicalMemory;
typedef union _CR3
{
UINTN Value;
struct
{
UINTN reserved1 : 3;
UINTN PageLevelWriteThrough : 1;
UINTN PageLevelCacheDisable : 1;
UINTN reserved2 : 7;
UINTN Pml4Pfn : 36;
UINTN reserved3 : 16;
};
} CR3;
typedef union _VIRT_ADDR_T
{
void* Value;
struct
{
UINT64 offset : 12;
UINT64 PtIdx : 9;
UINT64 PdIdx : 9;
UINT64 PdptIdx : 9;
UINT64 Pml4Idx : 9;
UINT64 reserved : 16;
};
} VIRT_ADDR_T, *PVIRT_ADDR_T;
typedef union _PML4E_T
{
UINT64 Value;
struct
{
UINT64 Present : 1; // Must be 1, region invalid if 0.
UINT64 ReadWrite : 1; // If 0, writes not allowed.
UINT64 UserSuperVisor : 1; // If 0, user-mode accesses not allowed.
UINT64 PageWriteThrough : 1; // Determines the memory type used to access PDPT.
UINT64 page_cache : 1; // Determines the memory type used to access PDPT.
UINT64 accessed : 1; // If 0, this entry has not been used for translation.
UINT64 Ignored1 : 1;
UINT64 LargePage : 1; // Must be 0 for PML4E.
UINT64 Ignored2 : 4;
UINT64 Pfn : 36; // The page frame number of the PDPT of this PML4E.
UINT64 Reserved : 4;
UINT64 Ignored3 : 11;
UINT64 nx : 1; // If 1, instruction fetches not allowed.
};
} PML4E_T, * PPML4E_T;
typedef union _PDPTE
{
UINT64 Value;
struct
{
UINT64 Present : 1; // Must be 1, region invalid if 0.
UINT64 ReadWrite : 1; // If 0, writes not allowed.
UINT64 UserSuperVisor : 1; // If 0, user-mode accesses not allowed.
UINT64 PageWriteThrough : 1; // Determines the memory type used to access PD.
UINT64 page_cache : 1; // Determines the memory type used to access PD.
UINT64 accessed : 1; // If 0, this entry has not been used for translation.
UINT64 Ignored1 : 1;
UINT64 LargePage : 1; // If 1, this entry maps a 1GB page.
UINT64 Ignored2 : 4;
UINT64 Pfn : 36; // The page frame number of the PD of this PDPTE.
UINT64 Reserved : 4;
UINT64 Ignored3 : 11;
UINT64 nx : 1; // If 1, instruction fetches not allowed.
};
} PDPTE_T, * PPDPTE_T;
typedef union _PDE
{
UINT64 Value;
struct
{
UINT64 Present : 1; // Must be 1, region invalid if 0.
UINT64 ReadWrite : 1; // If 0, writes not allowed.
UINT64 UserSuperVisor : 1; // If 0, user-mode accesses not allowed.
UINT64 PageWriteThrough : 1; // Determines the memory type used to access PT.
UINT64 page_cache : 1; // Determines the memory type used to access PT.
UINT64 accessed : 1; // If 0, this entry has not been used for translation.
UINT64 Ignored1 : 1;
UINT64 LargePage : 1; // If 1, this entry maps a 2MB page.
UINT64 Ignored2 : 4;
UINT64 Pfn : 36; // The page frame number of the PT of this PDE.
UINT64 Reserved : 4;
UINT64 Ignored3 : 11;
UINT64 nx : 1; // If 1, instruction fetches not allowed.
};
} PDE_T, * PPDE_T;
typedef union _PTE
{
UINT64 Value;
struct
{
UINT64 Present : 1; // Must be 1, region invalid if 0.
UINT64 ReadWrite : 1; // If 0, writes not allowed.
UINT64 UserSuperVisor : 1; // If 0, user-mode accesses not allowed.
UINT64 PageWriteThrough : 1; // Determines the memory type used to access the memory.
UINT64 page_cache : 1; // Determines the memory type used to access the memory.
UINT64 accessed : 1; // If 0, this entry has not been used for translation.
UINT64 Dirty : 1; // If 0, the memory backing this page has not been written to.
UINT64 PageAccessType : 1; // Determines the memory type used to access the memory.
UINT64 Global : 1; // If 1 and the PGE bit of CR4 is set, translations are global.
UINT64 Ignored2 : 3;
UINT64 Pfn : 36; // The page frame number of the backing physical page.
UINT64 reserved : 4;
UINT64 Ignored3 : 7;
UINT64 ProtectionKey : 4; // If the PKE bit of CR4 is set, determines the protection key.
UINT64 nx : 1; // If 1, instruction fetches not allowed.
};
} PTE_T, *PPTE_T;
typedef struct _TABLE_ENTRIES
{
PML4E_T Pml4Entry;
PDPTE_T PdptEntry;
PDE_T PdEntry;
PTE_T PtEntry;
} TABLE_ENTRIES, *PTABLE_ENTRIES;

@ -4,6 +4,14 @@
// to read the payload from disk and delete it after...
VOID* PayLoad = NULL;
PPTE_T PayLoadPt = NULL;
PPDE_T PayLoadPd = NULL;
PPDPTE_T PayLoadPdPt = NULL;
UINT64 PayLoadPtPhysAddr = NULL;
UINT64 PayLoadPdPhysAddr = NULL;
UINT64 PayLoadPdPtPhysAddr = NULL;
UINT32 PayLoadSize(VOID)
{
EFI_IMAGE_DOS_HEADER* RecordDosImageHeader = PayLoad;
@ -14,7 +22,7 @@ UINT32 PayLoadSize(VOID)
if (RecordNtHeaders->Signature != EFI_IMAGE_NT_SIGNATURE)
return NULL;
return RecordNtHeaders->OptionalHeader.SizeOfImage;
return RecordNtHeaders->OptionalHeader.SizeOfImage + 0x1000;
}
VOID* PayLoadEntry(VOID* ModuleBase)

@ -1,7 +1,15 @@
#pragma once
#include "Utils.h"
#include "PagingTables.h"
#include <Library/ShellLib.h>
extern VOID* PayLoad;
extern PPTE_T PayLoadPt;
extern PPDE_T PayLoadPd;
extern PPDPTE_T PayLoadPdPt;
extern UINT64 PayLoadPtPhysAddr;
extern UINT64 PayLoadPdPhysAddr;
extern UINT64 PayLoadPdPtPhysAddr;
#pragma pack(push, 1)
typedef struct _VOYAGER_T

@ -552,6 +552,7 @@
<ClInclude Include="BootMgfw.h" />
<ClInclude Include="Hv.h" />
<ClInclude Include="HvLoader.h" />
<ClInclude Include="PagingTables.h" />
<ClInclude Include="ShitHook.h" />
<ClInclude Include="PayLoad.h" />
<ClInclude Include="SplashScreen.h" />

@ -64,5 +64,8 @@
<ClInclude Include="Hv.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="PagingTables.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>

@ -100,12 +100,17 @@ EFI_STATUS EFIAPI BlLdrLoadImage
if (!AsciiStrCmp(&pSection->Name, ".reloc"))
{
VOYAGER_T VoyagerData;
//
// the payload's base address needs to be page aligned in
// order for the paging table sections to be page aligned...
//
UINT32 PageRemainder = (0x1000 - (((TableEntry->ModuleBase + pSection->VirtualAddress + pSection->Misc.VirtualSize) << 52) >> 52));
MakeVoyagerData
(
&VoyagerData,
TableEntry->ModuleBase,
TableEntry->SizeOfImage,
TableEntry->ModuleBase + pSection->VirtualAddress + pSection->Misc.VirtualSize,
TableEntry->ModuleBase + pSection->VirtualAddress + pSection->Misc.VirtualSize + PageRemainder,
PayLoadSize()
);
@ -115,6 +120,7 @@ EFI_STATUS EFIAPI BlLdrLoadImage
VoyagerData.HypervModuleSize,
MapModule(&VoyagerData, PayLoad)
);
// make the .reloc section RWX and increase the sections size...
pSection->Characteristics = SECTION_RWX;
pSection->Misc.VirtualSize += PayLoadSize();
@ -204,8 +210,7 @@ EFI_STATUS EFIAPI BlImgLoadPEImageEx
MakeShitHook(&HvLoadImageBufferHook, RESOLVE_RVA(LoadImage, 5, 1), &HvBlImgLoadPEImageFromSourceBuffer, TRUE);
#elif WINVER <= 1607
MakeShitHook(&HvLoadImageHook, RESOLVE_RVA(LoadImage, 10, 6), &HvBlImgLoadPEImageEx, TRUE);
#endif
#endif
MakeShitHook(&HvLoadAllocImageHook, RESOLVE_RVA(AllocImage, 5, 1), &HvBlImgAllocateImageBuffer, TRUE);
InstalledHvLoaderHook = TRUE;
}
@ -219,7 +224,7 @@ UINT64 EFIAPI BlImgAllocateImageBuffer
UINT32 memoryType,
UINT32 attributes,
VOID* unused,
UINT32 flags
UINT32 Value
)
{
//
@ -251,7 +256,7 @@ UINT64 EFIAPI BlImgAllocateImageBuffer
memoryType,
attributes,
unused,
flags
Value
);
// keep hooking until we extend an allocation...

@ -25,7 +25,7 @@ extern SHITHOOK WinLoadAllocateImageHook;
static_assert(sizeof(ALLOCATE_IMAGE_BUFFER_SIG) == sizeof(ALLOCATE_IMAGE_BUFFER_MASK), "signature and mask do not match size!");
typedef UINT64 (EFIAPI* ALLOCATE_IMAGE_BUFFER)(VOID** imageBuffer, UINTN imageSize, UINT32 memoryType,
UINT32 attributes, VOID* unused, UINT32 flags);
UINT32 attributes, VOID* unused, UINT32 Value);
typedef EFI_STATUS(EFIAPI* LDR_LOAD_IMAGE)(VOID* a1, VOID* a2, CHAR16* ImagePath, UINT64* ImageBasePtr, UINT32* ImageSize,
VOID* a6, VOID* a7, VOID* a8, VOID* a9, VOID* a10, VOID* a11, VOID* a12, VOID* a13, VOID* a14);
@ -93,7 +93,7 @@ EFI_STATUS EFIAPI BlImgAllocateImageBuffer
UINT32 memoryType,
UINT32 attributes,
VOID* unused,
UINT32 flags
UINT32 Value
);
/// <summary>

Loading…
Cancel
Save